diff --git a/.circleci/config.yml b/.circleci/config.yml
index 4f8dc3d2d5ad4..dad9a5e0d1a38 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -2,139 +2,192 @@
defaults:
defaults: &defaults
working_directory: '/go/src/github.com/influxdata/telegraf'
- go-1_9: &go-1_9
+ environment:
+ GOFLAGS: -p=8
+ go-1_13: &go-1_13
docker:
- - image: 'quay.io/influxdb/telegraf-ci:1.9.7'
- go-1_10: &go-1_10
+ - image: 'quay.io/influxdb/telegraf-ci:1.13.13'
+ go-1_14: &go-1_14
docker:
- - image: 'quay.io/influxdb/telegraf-ci:1.10.8'
- go-1_11: &go-1_11
- docker:
- - image: 'quay.io/influxdb/telegraf-ci:1.11.5'
+ - image: 'quay.io/influxdb/telegraf-ci:1.14.5'
+ mac: &mac
+ macos:
+ xcode: 11.3.1
+ working_directory: '~/go/src/github.com/influxdata/telegraf'
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: 1
+ GOFLAGS: -p=8
version: 2
jobs:
deps:
- <<: [ *defaults, *go-1_11 ]
+ <<: [ *defaults, *go-1_14 ]
steps:
- checkout
- restore_cache:
- key: vendor-{{ checksum "Gopkg.lock" }}
+ key: go-mod-v1-{{ checksum "go.sum" }}
- run: 'make deps'
- - run: 'dep check'
+ - run: 'make tidy'
- save_cache:
- name: 'vendored deps'
- key: vendor-{{ checksum "Gopkg.lock" }}
+ name: 'go module cache'
+ key: go-mod-v1-{{ checksum "go.sum" }}
paths:
- - './vendor'
+ - '/go/pkg/mod'
- persist_to_workspace:
- root: '/go/src'
+ root: '/go'
paths:
- '*'
+ macdeps:
+ <<: [ *mac ]
+ steps:
+ - checkout
+ - restore_cache:
+ key: mac-go-mod-v1-{{ checksum "go.sum" }}
+ - run: 'brew install go' # latest
+ - run: 'make deps'
+ - run: 'make tidy'
+ - save_cache:
+ name: 'go module cache'
+ key: mac-go-mod-v1-{{ checksum "go.sum" }}
+ paths:
+ - '~/go/pkg/mod'
+ - '/usr/local/Cellar/go'
+ - '/usr/local/bin/go'
+ - '/usr/local/bin/gofmt'
+ - persist_to_workspace:
+ root: '/'
+ paths:
+ - 'usr/local/bin/go'
+ - 'usr/local/Cellar/go'
+ - 'usr/local/bin/gofmt'
+ - 'Users/distiller/go'
- test-go-1.9:
- <<: [ *defaults, *go-1_9 ]
+ test-go-1.13:
+ <<: [ *defaults, *go-1_13 ]
steps:
- attach_workspace:
- at: '/go/src'
- # disabled due to gofmt differences (1.10 vs 1.11).
- #- run: 'make check'
+ at: '/go'
+ - run: 'make'
- run: 'make test'
- test-go-1.10:
- <<: [ *defaults, *go-1_10 ]
+ test-go-1.13-386:
+ <<: [ *defaults, *go-1_13 ]
steps:
- attach_workspace:
- at: '/go/src'
- # disabled due to gofmt differences (1.10 vs 1.11).
- #- run: 'make check'
- - run: 'make test'
- test-go-1.11:
- <<: [ *defaults, *go-1_11 ]
+ at: '/go'
+ - run: 'GOARCH=386 make'
+ - run: 'GOARCH=386 make test'
+ test-go-1.14:
+ <<: [ *defaults, *go-1_14 ]
steps:
- attach_workspace:
- at: '/go/src'
+ at: '/go'
+ - run: 'make'
- run: 'make check'
+ - run: 'make check-deps'
- run: 'make test'
- test-go-1.11-386:
- <<: [ *defaults, *go-1_11 ]
+ test-go-1.14-386:
+ <<: [ *defaults, *go-1_14 ]
steps:
- attach_workspace:
- at: '/go/src'
+ at: '/go'
+ - run: 'GOARCH=386 make'
- run: 'GOARCH=386 make check'
- run: 'GOARCH=386 make test'
+ test-go-darwin:
+ <<: [ *mac ]
+ steps:
+ - attach_workspace:
+ at: '/'
+ - run: 'make'
+ - run: 'make check'
+ - run: 'make test'
package:
- <<: [ *defaults, *go-1_11 ]
+ <<: [ *defaults, *go-1_14 ]
steps:
- attach_workspace:
- at: '/go/src'
+ at: '/go'
- run: 'make package'
- store_artifacts:
- path: './build'
- destination: 'build'
+ path: './build/dist'
+ destination: 'build/dist'
+
release:
- <<: [ *defaults, *go-1_11 ]
+ <<: [ *defaults, *go-1_14 ]
steps:
- attach_workspace:
- at: '/go/src'
- - run: 'make package-release'
+ at: '/go'
+ - run: 'make package'
- store_artifacts:
- path: './build'
- destination: 'build'
+ path: './build/dist'
+ destination: 'build/dist'
nightly:
- <<: [ *defaults, *go-1_11 ]
+ <<: [ *defaults, *go-1_14 ]
steps:
- attach_workspace:
- at: '/go/src'
- - run: 'make package-nightly'
+ at: '/go'
+ - run: 'NIGHTLY=1 make package'
+ - run: 'make upload-nightly'
- store_artifacts:
- path: './build'
- destination: 'build'
+ path: './build/dist'
+ destination: 'build/dist'
workflows:
version: 2
check:
jobs:
+ - 'macdeps':
+ filters:
+ tags:
+ only: /.*/
- 'deps':
filters:
tags:
only: /.*/
- - 'test-go-1.9':
+ - 'test-go-1.13':
requires:
- 'deps'
filters:
tags:
only: /.*/
- - 'test-go-1.10':
+ - 'test-go-1.13-386':
requires:
- 'deps'
filters:
tags:
only: /.*/
- - 'test-go-1.11':
+ - 'test-go-1.14':
requires:
- 'deps'
filters:
tags:
only: /.*/
- - 'test-go-1.11-386':
+ - 'test-go-1.14-386':
requires:
- 'deps'
filters:
tags:
only: /.*/
+ - 'test-go-darwin':
+ requires:
+ - 'macdeps'
+ filters:
+ tags: # only runs on tags if you specify this filter
+ only: /.*/
- 'package':
requires:
- - 'test-go-1.9'
- - 'test-go-1.10'
- - 'test-go-1.11'
- - 'test-go-1.11-386'
+ - 'test-go-darwin'
+ - 'test-go-1.13'
+ - 'test-go-1.13-386'
+ - 'test-go-1.14'
+ - 'test-go-1.14-386'
- 'release':
requires:
- - 'test-go-1.9'
- - 'test-go-1.10'
- - 'test-go-1.11'
- - 'test-go-1.11-386'
+ - 'test-go-darwin'
+ - 'test-go-1.13'
+ - 'test-go-1.13-386'
+ - 'test-go-1.14'
+ - 'test-go-1.14-386'
filters:
tags:
only: /.*/
@@ -143,24 +196,29 @@ workflows:
nightly:
jobs:
- 'deps'
- - 'test-go-1.9':
+ - 'macdeps'
+ - 'test-go-1.13':
requires:
- 'deps'
- - 'test-go-1.10':
+ - 'test-go-1.13-386':
requires:
- 'deps'
- - 'test-go-1.11':
+ - 'test-go-1.14':
requires:
- 'deps'
- - 'test-go-1.11-386':
+ - 'test-go-1.14-386':
requires:
- 'deps'
+ - 'test-go-darwin':
+ requires:
+ - 'macdeps'
- 'nightly':
requires:
- - 'test-go-1.9'
- - 'test-go-1.10'
- - 'test-go-1.11'
- - 'test-go-1.11-386'
+ - 'test-go-darwin'
+ - 'test-go-1.13'
+ - 'test-go-1.13-386'
+ - 'test-go-1.14'
+ - 'test-go-1.14-386'
triggers:
- schedule:
cron: "0 7 * * *"
diff --git a/.gitattributes b/.gitattributes
index 276cc770936fd..21bc439bf797e 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,4 +1,5 @@
CHANGELOG.md merge=union
README.md merge=union
+go.sum merge=union
plugins/inputs/all/all.go merge=union
plugins/outputs/all/all.go merge=union
diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md
index 49cfdefe3d0ec..28c6237ac75d1 100644
--- a/.github/ISSUE_TEMPLATE/Bug_report.md
+++ b/.github/ISSUE_TEMPLATE/Bug_report.md
@@ -1,28 +1,46 @@
---
name: Bug report
+labels: bug
about: Create a report to help us improve
---
+
### Relevant telegraf.conf:
-
+
```toml
```
### System info:
-
+
+
+### Docker
+
+
### Steps to reproduce:
+
+
1. ...
2. ...
### Expected behavior:
+
+
### Actual behavior:
+
+
### Additional info:
-
+
diff --git a/.github/ISSUE_TEMPLATE/Feature_request.md b/.github/ISSUE_TEMPLATE/Feature_request.md
index 20aba04bec860..00eb152e1e4a9 100644
--- a/.github/ISSUE_TEMPLATE/Feature_request.md
+++ b/.github/ISSUE_TEMPLATE/Feature_request.md
@@ -1,5 +1,6 @@
---
name: Feature request
+labels: feature request
about: Suggest an idea for this project
---
diff --git a/.github/ISSUE_TEMPLATE/Support.md b/.github/ISSUE_TEMPLATE/Support.md
new file mode 100644
index 0000000000000..d150dbfdcd7ba
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/Support.md
@@ -0,0 +1,22 @@
+---
+name: Support request
+labels: support
+about: Open a support request
+
+---
+
+
diff --git a/.gitignore b/.gitignore
index 4176a04131cfc..df2b3d06643c5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,7 @@
+/.idea
/build
/telegraf
/telegraf.exe
/telegraf.gz
/vendor
+.DS_Store
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 70edb6e836731..ec9540d30d56a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,14 +1,711 @@
-## v1.11 [unreleased]
+## v1.16.0 [unreleased]
+
+#### Release Notes
+
+ - Many documentation updates
+ - New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md)
+ - [#7837](https://github.com/influxdata/telegraf/pull/7837) `build` update Go versions: 1.14.5, 1.13.13
+ - [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck
+
+#### Features
+
+ - [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order
+ - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input
+ - [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache"
+ - [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec
+ - [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin
+ - [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive
+
+#### Bug Fixes
+
+ - [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed
+ - [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags
+ - [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config
+ - [#7867](https://github.com/influxdata/telegraf/pull/7867) `agent` fix issue with execd restart_delay being ignored
+ - [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` now skips NaN and Inf JSON values
+
+#### New Input Plugins
+
+- [proxmox](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/proxmox/README.md) - Contributed by @effitient
+- [opcua](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/opcua/README.md) - Contributed by @influxdata
+
+#### New Output Plugins
+
+- [dynatrace](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/dynatrace/README.md) - Contributed by @thschue
+- [sumologic](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/dynatrace/README.md) - Contributed by @pmalek-sumo
+
+
+
+#### New External Plugins
+
+ See [EXTERNAL_PLUGINS.md](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md) for a full list of external plugins
+
+ - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS.
+ - [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos
+
+## v1.15.2 [2020-07-31]
+
+#### Bug Fixes
+
+- [#7905](https://github.com/influxdata/telegraf/issues/7905): Fix RPM /var/log/telegraf permissions
+- [#7880](https://github.com/influxdata/telegraf/issues/7880): Fix tail following on EOF
+
+## v1.15.1 [2020-07-22]
+
+#### Bug Fixes
+
+- [#7877](https://github.com/influxdata/telegraf/pull/7877): Fix architecture in non-amd64 deb and rpm packages.
+
+## v1.15.0 [2020-07-22]
+
+#### Release Notes
+
+- The `logparser` input is deprecated, use the `tail` input with `data_format =
+ "grok"` as a replacement.
+
+- The `cisco_telemetry_gnmi` input has been renamed to `gnmi` to better reflect
+ its general support for gNMI devices.
+
+- Several fields used primarily for debugging have been removed from the
+ `splunkmetric` serializer, if you are making use of these fields they can be
+ added back with the `tag` option.
+
+- Telegraf's `--test` mode now runs processors and aggregators before printing
+ metrics.
+
+- Official packages now built with Go 1.14.5.
+
+- When updating the Debian package you will no longer be prompted to merge the
+ telegraf.conf file, instead the new version will be installed to
+ `/etc/telegraf/telegraf.conf.sample`. The tar and zip packages now include
+ the version in the top level directory.
+
+#### New Inputs
+
+- [nginx_sts](/plugins/inputs/nginx_sts/README.md) - Contributed by @zdmytriv
+- [redfish](/plugins/inputs/redfish/README.md) - Contributed by @sarvanikonda
+
+#### New Processors
+
+- [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr
+- [execd](/plugins/processors/execd/README.md) - Contributed by @influxdata
+- [filepath](/plugins/processors/filepath/README.md) - Contributed by @kir4h
+- [ifname](/plugins/processors/ifname/README.md) - Contributed by @influxdata
+- [port_name](/plugins/processors/port_name/README.md) - Contributed by @influxdata
+- [reverse_dns](/plugins/processors/reverse_dns/README.md) - Contributed by @influxdata
+- [starlark](/plugins/processors/starlark/README.md) - Contributed by @influxdata
+
+#### New Outputs
+
+- [newrelic](/plugins/outputs/newrelic/README.md) - Contributed by @hsinghkalsi
+- [execd](/plugins/outputs/execd/README.md) - Contributed by @influxdata
+
+#### Features
+
+- [#7634](https://github.com/influxdata/telegraf/pull/7634): Add support for streaming processors.
+- [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin.
+- [#7193](https://github.com/influxdata/telegraf/pull/7193): Add additional concurrent transaction information.
+- [#7223](https://github.com/influxdata/telegraf/pull/7223): Add ability to specify HTTP Headers in http_listener_v2 which will added as tags.
+- [#7140](https://github.com/influxdata/telegraf/pull/7140): Apply ping deadline to dns lookup.
+- [#7225](https://github.com/influxdata/telegraf/pull/7225): Add support for 64-bit integer types to modbus input.
+- [#7231](https://github.com/influxdata/telegraf/pull/7231): Add possibility to specify measurement per register.
+- [#7136](https://github.com/influxdata/telegraf/pull/7136): Support multiple templates for graphite serializers.
+- [#7250](https://github.com/influxdata/telegraf/pull/7250): Deploy telegraf configuration as a "non config" file.
+- [#7214](https://github.com/influxdata/telegraf/pull/7214): Add VolumeSpace query for sqlserver input with metric_version 2.
+- [#7304](https://github.com/influxdata/telegraf/pull/7304): Add reading bearer token from a file to http input.
+- [#7366](https://github.com/influxdata/telegraf/pull/7366): add support for SIGUSR1 to trigger flush.
+- [#7271](https://github.com/influxdata/telegraf/pull/7271): Add retry when slave is busy to modbus input.
+- [#7356](https://github.com/influxdata/telegraf/pull/7356): Add option to save retention policy as tag in influxdb_listener.
+- [#6915](https://github.com/influxdata/telegraf/pull/6915): Add support for MDS and RGW sockets to ceph input.
+- [#7391](https://github.com/influxdata/telegraf/pull/7391): Extract target as a tag for each rule in iptables input.
+- [#7434](https://github.com/influxdata/telegraf/pull/7434): Use docker log timestamp as metric time.
+- [#7359](https://github.com/influxdata/telegraf/pull/7359): Add cpu query to sqlserver input.
+- [#7464](https://github.com/influxdata/telegraf/pull/7464): Add field creation to date processor and integer unix time support.
+- [#7483](https://github.com/influxdata/telegraf/pull/7483): Add integer mapping support to enum processor.
+- [#7321](https://github.com/influxdata/telegraf/pull/7321): Add additional fields to mongodb input.
+- [#7491](https://github.com/influxdata/telegraf/pull/7491): Add authentication support to the http_response input plugin.
+- [#7503](https://github.com/influxdata/telegraf/pull/7503): Add truncate_tags setting to wavefront output.
+- [#7545](https://github.com/influxdata/telegraf/pull/7545): Add configurable separator graphite serializer and output.
+- [#7489](https://github.com/influxdata/telegraf/pull/7489): Add cluster state integer to mongodb input.
+- [#7515](https://github.com/influxdata/telegraf/pull/7515): Add option to disable mongodb cluster status.
+- [#7319](https://github.com/influxdata/telegraf/pull/7319): Add support for battery level monitoring to the fibaro input.
+- [#7405](https://github.com/influxdata/telegraf/pull/7405): Allow collection of HTTP Headers in http_response input.
+- [#7540](https://github.com/influxdata/telegraf/pull/7540): Add processor to look up service name by port.
+- [#7474](https://github.com/influxdata/telegraf/pull/7474): Add new once mode that write to outputs and exits.
+- [#7474](https://github.com/influxdata/telegraf/pull/7474): Run processors and aggregators during test mode.
+- [#7294](https://github.com/influxdata/telegraf/pull/7294): Add SNMPv3 trap support to snmp_trap input.
+- [#7646](https://github.com/influxdata/telegraf/pull/7646): Add video codec stats to nvidia-smi.
+- [#7651](https://github.com/influxdata/telegraf/pull/7651): Fix source field for icinga2 plugin and add tag for server hostname.
+- [#7619](https://github.com/influxdata/telegraf/pull/7619): Add timezone configuration to csv input data format.
+- [#7596](https://github.com/influxdata/telegraf/pull/7596): Add ability to collect response body as field with http_response.
+- [#7267](https://github.com/influxdata/telegraf/pull/7267): Add ability to add selectors as tags in kube_inventory.
+- [#7712](https://github.com/influxdata/telegraf/pull/7712): Add counter type to sqlserver perfmon collector.
+- [#7575](https://github.com/influxdata/telegraf/pull/7575): Add missing nvme attributes to smart plugin.
+- [#7726](https://github.com/influxdata/telegraf/pull/7726): Add laundry to mem plugin on FreeBSD.
+- [#7762](https://github.com/influxdata/telegraf/pull/7762): Allow per input overriding of collection_jitter and precision.
+- [#7686](https://github.com/influxdata/telegraf/pull/7686): Improve performance of procstat: Up to 40/120x better performance.
+- [#7677](https://github.com/influxdata/telegraf/pull/7677): Expand execd shim support for processor and outputs.
+- [#7154](https://github.com/influxdata/telegraf/pull/7154): Add v3 metadata support to ecs input.
+- [#7792](https://github.com/influxdata/telegraf/pull/7792): Support utf-16 in file and tail inputs.
+
+#### Bug Fixes
+
+- [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled.
+- [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue.
+- [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from splunkmetric serializer.
+- [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets.
+- [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent.
+- [#7524](https://github.com/influxdata/telegraf/pull/7524): Fix typo in total_elapsed_time_ms field of sqlserver input.
+- [#7203](https://github.com/influxdata/telegraf/issues/7203): Exclude csv_timestamp_column and csv_measurement_column from fields.
+- [#7018](https://github.com/influxdata/telegraf/issues/7018): Fix incorrect uptime when clock is adjusted.
+- [#6807](https://github.com/influxdata/telegraf/issues/6807): Fix memory leak when using procstat on Windows.
+- [#7495](https://github.com/influxdata/telegraf/issues/7495): Improve sqlserver input compatibility with older server versions.
+- [#7558](https://github.com/influxdata/telegraf/issues/7558): Remove trailing backslash from tag keys/values in influx serializer.
+- [#7715](https://github.com/influxdata/telegraf/issues/7715): Fix incorrect Azure SQL DB server properties.
+- [#7431](https://github.com/influxdata/telegraf/issues/7431): Fix json unmarshal error in the kibana input.
+- [#5633](https://github.com/influxdata/telegraf/issues/5633): Send metrics in FIFO order.
+
+## v1.14.5 [2020-06-30]
+
+#### Bug Fixes
+
+- [#7686](https://github.com/influxdata/telegraf/pull/7686): Improve the performance of the procstat input.
+- [#7658](https://github.com/influxdata/telegraf/pull/7658): Fix ping exit code handling on non-Linux.
+- [#7718](https://github.com/influxdata/telegraf/pull/7718): Skip overs errors in the output of the sensors command.
+- [#7748](https://github.com/influxdata/telegraf/issues/7748): Prevent startup when tags have incorrect type in configuration file.
+- [#7699](https://github.com/influxdata/telegraf/issues/7699): Fix panic with GJSON multiselect query in json parser.
+- [#7754](https://github.com/influxdata/telegraf/issues/7754): Allow any key usage type on x509 certificate.
+- [#7705](https://github.com/influxdata/telegraf/issues/7705): Allow histograms and summary types without buckets or quantiles in prometheus_client output.
+
+## v1.14.4 [2020-06-09]
+
+#### Bug Fixes
+
+- [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query.
+- [#7579](https://github.com/influxdata/telegraf/pull/7579): Fix numeric to bool conversion in converter processor.
+- [#7551](https://github.com/influxdata/telegraf/issues/7551): Fix typo in name of gc_cpu_fraction field of the influxdb input.
+- [#7617](https://github.com/influxdata/telegraf/issues/7617): Fix issue with influx stream parser blocking when data is in buffer.
+
+## v1.14.3 [2020-05-19]
+
+#### Bug Fixes
+
+- [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser.
+- [#7343](https://github.com/influxdata/telegraf/issues/7343): Handle multiple metrics with the same timestamp in dedup processor.
+- [#5905](https://github.com/influxdata/telegraf/issues/5905): Fix reconnection of timed out HTTP2 connections influxdb outputs.
+- [#7468](https://github.com/influxdata/telegraf/issues/7468): Fix negative value parsing in impi_sensor input.
+
+## v1.14.2 [2020-04-28]
+
+#### Bug Fixes
+
+- [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input.
+- [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call.
+- [#7318](https://github.com/influxdata/telegraf/issues/7318): Fix dimension limit on azure_monitor output.
+- [#7407](https://github.com/influxdata/telegraf/pull/7407): Fix 64-bit integer to string conversion in snmp input.
+- [#7327](https://github.com/influxdata/telegraf/issues/7327): Fix shard indices reporting in elasticsearch input.
+- [#7388](https://github.com/influxdata/telegraf/issues/7388): Ignore fields with NaN or Inf floats in the JSON serializer.
+- [#7402](https://github.com/influxdata/telegraf/issues/7402): Fix typo in name of gc_cpu_fraction field of the kapacitor input.
+- [#7235](https://github.com/influxdata/telegraf/issues/7235): Don't retry `create database` when using database_tag if forbidden by the server in influxdb output.
+- [#7406](https://github.com/influxdata/telegraf/issues/7406): Allow CR and FF inside of string fields in influx parser.
+
+## v1.14.1 [2020-04-14]
+
+#### Bug Fixes
+
+- [#7236](https://github.com/influxdata/telegraf/issues/7236): Fix PerformanceCounter query performance degradation in sqlserver input.
+- [#7257](https://github.com/influxdata/telegraf/issues/7257): Fix error when using the Name field in template processor.
+- [#7289](https://github.com/influxdata/telegraf/pull/7289): Fix export timestamp not working for prometheus on v2.
+- [#7310](https://github.com/influxdata/telegraf/issues/7310): Fix exclude database and retention policy tags is shared.
+- [#7262](https://github.com/influxdata/telegraf/issues/7262): Fix status path when using globs in phpfpm.
+
+## v1.14 [2020-03-26]
+
+#### Release Notes
+
+- In the `sqlserver` input, the `sqlserver_azurestats` measurement has been
+ renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric
+ metrics were previously being reported incorrectly as strings.
+
+- The `date` processor now uses the UTC timezone when creating its tag. In
+ previous versions the local time was used.
+
+#### New Inputs
+
+- [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov
+- [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen
+- [eventhub_consumer](/plugins/inputs/eventhub_consumer/README.md) - Contributed by @R290
+- [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell
+- [lanz](/plugins/inputs/lanz/README.md): Contributed by @timhughes
+- [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais
+- [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri
+- [sflow](/plugins/inputs/sflow/README.md) - Contributed by @influxdata
+- [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI
+
+#### New Processors
+
+- [dedup](/plugins/processors/dedup/README.md) - Contributed by @igomura
+- [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern
+- [s2geo](/plugins/processors/s2geo/README.md) - Contributed by @alespour
+
+#### New Outputs
+
+- [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert
+
+#### Features
+
+- [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger.
+- [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input.
+- [#6764](https://github.com/influxdata/telegraf/pull/6764): Add ability to collect pod labels to kubernetes input.
+- [#6770](https://github.com/influxdata/telegraf/pull/6770): Expose unbound-control config file option.
+- [#6508](https://github.com/influxdata/telegraf/pull/6508): Add support for new nginx plus api endpoints.
+- [#6342](https://github.com/influxdata/telegraf/pull/6342): Add kafka SASL version control to support Azure Event Hub.
+- [#6869](https://github.com/influxdata/telegraf/pull/6869): Add RBPEX IO statistics to DatabaseIO query in sqlserver input.
+- [#6869](https://github.com/influxdata/telegraf/pull/6869): Add space on disk for each file to DatabaseIO query in the sqlserver input.
+- [#6869](https://github.com/influxdata/telegraf/pull/6869): Calculate DB Name instead of GUID in physical_db_name in the sqlserver input.
+- [#6733](https://github.com/influxdata/telegraf/pull/6733): Add latency stats to mongo input.
+- [#6844](https://github.com/influxdata/telegraf/pull/6844): Add source and port tags to jenkins_job metrics.
+- [#6886](https://github.com/influxdata/telegraf/pull/6886): Add date offset and timezone options to date processor.
+- [#6859](https://github.com/influxdata/telegraf/pull/6859): Exclude resources by inventory path in vsphere input.
+- [#6700](https://github.com/influxdata/telegraf/pull/6700): Allow a user defined field to be used as the graylog short_message.
+- [#6917](https://github.com/influxdata/telegraf/pull/6917): Add server_name override for x509_cert plugin.
+- [#6921](https://github.com/influxdata/telegraf/pull/6921): Add udp internal metrics for the statsd input.
+- [#6914](https://github.com/influxdata/telegraf/pull/6914): Add replica set tag to mongodb input.
+- [#6935](https://github.com/influxdata/telegraf/pull/6935): Add counters for merged reads and writes to diskio input.
+- [#6982](https://github.com/influxdata/telegraf/pull/6982): Add support for titlecase transformation to strings processor.
+- [#6993](https://github.com/influxdata/telegraf/pull/6993): Add support for MDB database information to openldap input.
+- [#6957](https://github.com/influxdata/telegraf/pull/6957): Add new fields for Jenkins total and busy executors.
+- [#7035](https://github.com/influxdata/telegraf/pull/7035): Fix dash to underscore replacement when handling embedded tags in Cisco MDT.
+- [#7039](https://github.com/influxdata/telegraf/pull/7039): Add process created_at time to procstat input.
+- [#7022](https://github.com/influxdata/telegraf/pull/7022): Add support for credentials file to nats_consumer and nats output.
+- [#7065](https://github.com/influxdata/telegraf/pull/7065): Add additional tags and fields to apcupsd.
+- [#7084](https://github.com/influxdata/telegraf/pull/7084): Add RabbitMQ slave_nodes and synchronized_slave_nodes metrics.
+- [#7089](https://github.com/influxdata/telegraf/pull/7089): Allow globs in FPM unix socket paths.
+- [#7071](https://github.com/influxdata/telegraf/pull/7071): Add non-cumulative histogram to histogram aggregator.
+- [#6969](https://github.com/influxdata/telegraf/pull/6969): Add label and field selectors to prometheus input k8s discovery.
+- [#7049](https://github.com/influxdata/telegraf/pull/7049): Add support for converting tag or field to measurement in converter processor.
+- [#7103](https://github.com/influxdata/telegraf/pull/7103): Add volume_mount_point to DatabaseIO query in sqlserver input.
+- [#7142](https://github.com/influxdata/telegraf/pull/7142): Add topic tag options to kafka output.
+- [#7141](https://github.com/influxdata/telegraf/pull/7141): Add support for setting InfluxDB retention policy using tag.
+- [#7163](https://github.com/influxdata/telegraf/pull/7163): Add Database IO Tempdb per Azure DB to sqlserver input.
+- [#7150](https://github.com/influxdata/telegraf/pull/7150): Add option for explicitly including queries in sqlserver input.
+- [#7173](https://github.com/influxdata/telegraf/pull/7173): Add support for GNMI DecimalVal type to cisco_telemetry_gnmi.
+
+#### Bug Fixes
+
+- [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input.
+- [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input.
+- [#7005](https://github.com/influxdata/telegraf/pull/7005): Search for chronyc only when chrony input plugin is enabled.
+- [#2280](https://github.com/influxdata/telegraf/issues/2280): Fix request to InfluxDB Listener failing with EOF.
+- [#6124](https://github.com/influxdata/telegraf/issues/6124): Fix InfluxDB listener to continue parsing after error.
+- [#7133](https://github.com/influxdata/telegraf/issues/7133): Fix log rotation to use actual file size instead of bytes written.
+- [#7103](https://github.com/influxdata/telegraf/pull/7103): Fix several issues with DatabaseIO query in sqlserver input.
+- [#7119](https://github.com/influxdata/telegraf/pull/7119): Fix internal metrics for output split into multiple lines.
+- [#7021](https://github.com/influxdata/telegraf/pull/7021): Fix schedulers query compatibility with pre SQL-2016.
+- [#7182](https://github.com/influxdata/telegraf/pull/7182): Set headers on influxdb_listener ping URL.
+- [#7165](https://github.com/influxdata/telegraf/issues/7165): Fix url encoding of job names in jenkins input plugin.
+
+## v1.13.4 [2020-02-25]
+
+#### Release Notes
+
+- Official packages now built with Go 1.13.8.
+
+#### Bug Fixes
+
+- [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input.
+- [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions.
+- [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 8192 stats in the ethtool input.
+- [#7060](https://github.com/influxdata/telegraf/issues/7060): Fix perf counters collection on named instances in sqlserver input.
+- [#6926](https://github.com/influxdata/telegraf/issues/6926): Use add time for prometheus expiration calculation.
+- [#7057](https://github.com/influxdata/telegraf/issues/7057): Fix inconsistency with input error counting in internal input.
+- [#7063](https://github.com/influxdata/telegraf/pull/7063): Use the same timestamp per call if no time is provided in prometheus input.
+
+## v1.13.3 [2020-02-04]
+
+#### Bug Fixes
+
+- [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4.
+- [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins.
+- [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 4096 stats in the ethtool input.
+- [#6973](https://github.com/influxdata/telegraf/issues/6973): Expire metrics on query in addition to on add.
+
+## v1.13.2 [2020-01-21]
+
+#### Bug Fixes
+
+- [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows.
+- [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input.
+- [#6883](https://github.com/influxdata/telegraf/issues/6883): Add custom attributes for all resource types in vsphere input.
+- [#6899](https://github.com/influxdata/telegraf/pull/6899): Fix URL agent address form with udp in snmp input.
+- [#6619](https://github.com/influxdata/telegraf/issues/6619): Change logic to allow recording of device fields when attributes is false.
+- [#6903](https://github.com/influxdata/telegraf/issues/6903): Do not add invalid timestamps to kafka messages.
+- [#6906](https://github.com/influxdata/telegraf/issues/6906): Fix json_strict option and set default of true.
+
+## v1.13.1 [2020-01-08]
+
+#### Bug Fixes
+
+- [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover.
+- [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps.
+- [#6823](https://github.com/influxdata/telegraf/pull/6823): Fix missing config fields in prometheus serializer.
+- [#6694](https://github.com/influxdata/telegraf/issues/6694): Fix panic on connection loss with undelivered messages in mqtt_consumer.
+- [#6679](https://github.com/influxdata/telegraf/issues/6679): Encode query hash fields as hex strings in sqlserver input.
+- [#6345](https://github.com/influxdata/telegraf/issues/6345): Invalidate diskio cache if the metadata mtime has changed.
+- [#6800](https://github.com/influxdata/telegraf/issues/6800): Show platform not supported warning only on plugin creation.
+- [#6814](https://github.com/influxdata/telegraf/issues/6814): Fix rabbitmq cannot complete gather after request error.
+- [#6846](https://github.com/influxdata/telegraf/issues/6846): Fix /sbin/init --version executed on Telegraf startup.
+- [#6847](https://github.com/influxdata/telegraf/issues/6847): Use last path element as field key if path fully specified in cisco_telemetry_gnmi input.
+
+## v1.13 [2019-12-12]
+
+#### Release Notes
+
+- Official packages built with Go 1.13.5. This affects the minimum supported
+ version on several platforms, most notably requiring Windows 7 (2008 R2) or
+ later. For details, check the release notes for Go
+ [ports](https://golang.org/doc/go1.13#ports).
+- The `prometheus` input and `prometheus_client` output have a new mapping to
+ and from Telegraf metrics, which can be enabled by setting `metric_version = 2`.
+ The original mapping is deprecated. When both plugins have the same setting,
+ passthrough metrics will be unchanged. Refer to the `prometheus` input for
+ details about the mapping.
+
+#### New Inputs
+
+- [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn
+- [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston
+- [snmp_trap](/plugins/inputs/snmp_trap/README.md) - Contributed by @influxdata
+- [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta
+- [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream
+- [systemd_units](/plugins/inputs/systemd_units/README.md) - Contributed by @benschweizer
+
+#### New Processors
+
+- [clone](/plugins/processors/clone/README.md) - Contributed by @adrianlzt
+
+#### New Aggregators
+
+- [merge](/plugins/aggregators/merge/README.md) - Contributed by @influxdata
+
+#### Features
+
+- [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input.
+- [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input.
+- [#5921](https://github.com/influxdata/telegraf/pull/5921): Add replication metrics to the redis input.
+- [#6177](https://github.com/influxdata/telegraf/pull/6177): Support NX-OS telemetry extensions in cisco_telemetry_mdt.
+- [#6415](https://github.com/influxdata/telegraf/pull/6415): Allow graphite parser to create Inf and NaN values.
+- [#6434](https://github.com/influxdata/telegraf/pull/6434): Use prefix base detection for ints in grok parser.
+- [#6465](https://github.com/influxdata/telegraf/pull/6465): Add more performance counter metrics to sqlserver input.
+- [#6476](https://github.com/influxdata/telegraf/pull/6476): Add millisecond unix time support to grok parser.
+- [#6473](https://github.com/influxdata/telegraf/pull/6473): Add container id as optional source tag to docker and docker_log input.
+- [#6504](https://github.com/influxdata/telegraf/pull/6504): Add lang parameter to OpenWeathermap input plugin.
+- [#6540](https://github.com/influxdata/telegraf/pull/6540): Log file open errors at debug level in tail input.
+- [#6553](https://github.com/influxdata/telegraf/pull/6553): Add timeout option to cloudwatch input.
+- [#6549](https://github.com/influxdata/telegraf/pull/6549): Support custom success codes in http input.
+- [#6530](https://github.com/influxdata/telegraf/pull/6530): Improve ipvs input error strings and logging.
+- [#6532](https://github.com/influxdata/telegraf/pull/6532): Add strict mode to JSON parser that can be disable to ignore invalid items.
+- [#6543](https://github.com/influxdata/telegraf/pull/6543): Add support for Kubernetes 1.16 and remove deprecated API usage.
+- [#6283](https://github.com/influxdata/telegraf/pull/6283): Add gathering of RabbitMQ federation link metrics.
+- [#6356](https://github.com/influxdata/telegraf/pull/6356): Add bearer token defaults for Kubernetes plugins.
+- [#5870](https://github.com/influxdata/telegraf/pull/5870): Add support for SNMP over TCP.
+- [#6603](https://github.com/influxdata/telegraf/pull/6603): Add support for per output flush jitter.
+- [#6650](https://github.com/influxdata/telegraf/pull/6650): Add a nameable file tag to file input plugin.
+- [#6640](https://github.com/influxdata/telegraf/pull/6640): Add Splunk MultiMetric support.
+- [#6680](https://github.com/influxdata/telegraf/pull/6668): Add support for sending HTTP Basic Auth in influxdb input
+- [#5767](https://github.com/influxdata/telegraf/pull/5767): Add ability to configure the url tag in the prometheus input.
+- [#5767](https://github.com/influxdata/telegraf/pull/5767): Add prometheus metric_version=2 mapping to internal metrics/line protocol.
+- [#6703](https://github.com/influxdata/telegraf/pull/6703): Add prometheus metric_version=2 support to prometheus_client output.
+- [#6660](https://github.com/influxdata/telegraf/pull/6660): Add content_encoding compression support to socket_listener.
+- [#6689](https://github.com/influxdata/telegraf/pull/6689): Add high resolution metrics support to CloudWatch output.
+- [#6716](https://github.com/influxdata/telegraf/pull/6716): Add SReclaimable and SUnreclaim to mem input.
+- [#6695](https://github.com/influxdata/telegraf/pull/6695): Allow multiple certificates per file in x509_cert input.
+- [#6686](https://github.com/influxdata/telegraf/pull/6686): Add additional tags to the x509 input.
+- [#6703](https://github.com/influxdata/telegraf/pull/6703): Add batch data format support to file output.
+- [#6688](https://github.com/influxdata/telegraf/pull/6688): Support partition assignment strategy configuration in kafka_consumer.
+- [#6731](https://github.com/influxdata/telegraf/pull/6731): Add node type tag to mongodb input.
+- [#6669](https://github.com/influxdata/telegraf/pull/6669): Add uptime_ns field to mongodb input.
+- [#6735](https://github.com/influxdata/telegraf/pull/6735): Support resolution of symlinks in filecount input.
+- [#6746](https://github.com/influxdata/telegraf/pull/6746): Set message timestamp to the metric time in kafka output.
+- [#6740](https://github.com/influxdata/telegraf/pull/6740): Add base64decode operation to string processor.
+- [#6790](https://github.com/influxdata/telegraf/pull/6790): Add option to control collecting global variables to mysql input.
+
+#### Bug Fixes
+
+- [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config.
+- [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input.
+- [#6573](https://github.com/influxdata/telegraf/issues/6573): Fix not a valid field error in Windows with nvidia input.
+- [#6614](https://github.com/influxdata/telegraf/issues/6614): Fix influxdb output serialization on connection closed.
+- [#6690](https://github.com/influxdata/telegraf/issues/6690): Fix ping skips remaining hosts after dns lookup error.
+- [#6684](https://github.com/influxdata/telegraf/issues/6684): Log mongodb oplog auth errors at debug level.
+- [#6705](https://github.com/influxdata/telegraf/issues/6705): Remove trailing underscore trimming from json flattener.
+- [#6421](https://github.com/influxdata/telegraf/issues/6421): Revert change causing cpu usage to be capped at 100 percent.
+- [#6523](https://github.com/influxdata/telegraf/issues/6523): Accept any media type in the prometheus input.
+- [#6769](https://github.com/influxdata/telegraf/issues/6769): Fix unix socket dial arguments in uwsgi input.
+- [#6757](https://github.com/influxdata/telegraf/issues/6757): Replace colon chars in prometheus output labels with metric_version=1.
+- [#6773](https://github.com/influxdata/telegraf/issues/6773): Set TrimLeadingSpace when TrimSpace is on in csv parser.
+
+## v1.12.6 [2019-11-19]
+
+#### Bug Fixes
+
+- [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level.
+- [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input.
+- [#6642](https://github.com/influxdata/telegraf/issues/6642): Fix interface option with method = native in ping input.
+- [#6680](https://github.com/influxdata/telegraf/pull/6680): Fix panic in mongodb input if shard connection pool stats are unreadable.
+
+## v1.12.5 [2019-11-12]
+
+#### Bug Fixes
+
+- [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin.
+- [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag.
+- [#6337](https://github.com/influxdata/telegraf/issues/6337): Change no metric error message to debug level in cloudwatch input.
+- [#6602](https://github.com/influxdata/telegraf/issues/6602): Add missing ServerProperties query to sqlserver input docs.
+- [#6643](https://github.com/influxdata/telegraf/pull/6643): Fix mongodb connections_total_created field loading.
+- [#6627](https://github.com/influxdata/telegraf/issues/6578): Fix metric creation when node is offline in jenkins input.
+- [#6649](https://github.com/influxdata/telegraf/issues/6615): Fix docker uptime_ns calculation when container has been restarted.
+- [#6647](https://github.com/influxdata/telegraf/issues/6646): Fix mysql field type conflict in conversion of gtid_mode to an integer.
+- [#5529](https://github.com/influxdata/telegraf/issues/5529): Fix mysql field type conflict with ssl_verify_depth and ssl_ctx_verify_depth.
+
+## v1.12.4 [2019-10-23]
+
+#### Release Notes
+
+- Official packages built with Go 1.12.12.
+
+#### Bug Fixes
+
+- [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method.
+- [#6541](https://github.com/influxdata/telegraf/issues/6541): Exclude alias tag if unset from plugin internal stats.
+- [#6564](https://github.com/influxdata/telegraf/issues/6564): Fix socket_mode option in powerdns_recursor input.
+
+## v1.12.3 [2019-10-07]
+
+#### Bug Fixes
+
+- [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output.
+- [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10.
+- [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial number match in smart input.
+- [#6469](https://github.com/influxdata/telegraf/pull/6469): Add auth header only when env var is set.
+- [#6468](https://github.com/influxdata/telegraf/pull/6468): Fix running multiple mysql and sqlserver plugin instances.
+- [#6471](https://github.com/influxdata/telegraf/issues/6471): Fix database routing on retry with exclude_database_tag.
+- [#6488](https://github.com/influxdata/telegraf/issues/6488): Fix logging panic in exec input with nagios data format.
+
+## v1.12.2 [2019-09-24]
+
+#### Bug Fixes
+
+- [#6386](https://github.com/influxdata/telegraf/issues/6386): Fix detection of layout timestamps in csv and json parser.
+- [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input.
+- [#6398](https://github.com/influxdata/telegraf/issues/6398): Keep boolean values listed in json_string_fields.
+- [#6393](https://github.com/influxdata/telegraf/issues/6393): Disable Go plugin support in official builds.
+- [#6391](https://github.com/influxdata/telegraf/issues/6391): Fix path handling issues in cisco_telemetry_gnmi.
+
+## v1.12.1 [2019-09-10]
+
+#### Bug Fixes
+
+- [#6344](https://github.com/influxdata/telegraf/issues/6344): Fix depends on GLIBC_2.14 symbol version.
+- [#6329](https://github.com/influxdata/telegraf/issues/6329): Fix filecount for paths with trailing slash.
+- [#6331](https://github.com/influxdata/telegraf/issues/6331): Convert check state to an integer in icinga2 input.
+- [#6354](https://github.com/influxdata/telegraf/issues/6354): Fix could not mark message delivered error in kafka_consumer.
+- [#6362](https://github.com/influxdata/telegraf/issues/6362): Skip collection stats when disabled in mongodb input.
+- [#6366](https://github.com/influxdata/telegraf/issues/6366): Fix error reading closed response body on redirect in http_response.
+- [#6373](https://github.com/influxdata/telegraf/issues/6373): Fix apcupsd documentation to reflect plugin.
+- [#6375](https://github.com/influxdata/telegraf/issues/6375): Display retry log message only when retry after is received.
+
+## v1.12 [2019-09-03]
+
+#### Release Notes
+
+- The cluster health related fields in the elasticsearch input have been split
+ out from the `elasticsearch_indices` measurement into the new
+ `elasticsearch_cluster_health_indices` measurement as they were originally
+ combined by error.
+
+#### New Inputs
+
+- [apcupsd](/plugins/inputs/apcupsd/README.md) - Contributed by @jonaz
+- [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu
+- [fireboard](/plugins/inputs/fireboard/README.md) - Contributed by @ronnocol
+- [logstash](/plugins/inputs/logstash/README.md) - Contributed by @lkmcs @dmitryilyin @arkady-emelyanov
+- [marklogic](/plugins/inputs/marklogic/README.md) - Contributed by @influxdata
+- [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer
+- [uwsgi](/plugins/inputs/uwsgi/README.md) - Contributed by @blaggacao
+
+#### New Parsers
+
+- [form_urlencoded](/plugins/parsers/form_urlencoded/README.md) - Contributed by @byonchev
+
+#### New Processors
+
+- [date](/plugins/processors/date/README.md) - Contributed by @influxdata
+- [pivot](/plugins/processors/pivot/README.md) - Contributed by @influxdata
+- [tag_limit](/plugins/processors/tag_limit/README.md) - Contributed by @memory
+- [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata
+
+#### New Outputs
+
+- [exec](/plugins/outputs/exec/README.md) - Contributed by @Jaeyo
+
+#### Features
+
+- [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer.
+- [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values.
+- [#5997](https://github.com/influxdata/telegraf/pull/5997): Add starttime field to phpfpm input.
+- [#5998](https://github.com/influxdata/telegraf/pull/5998): Add cluster name tag to elasticsearch indices.
+- [#6006](https://github.com/influxdata/telegraf/pull/6006): Add support for interface field in http_response input plugin.
+- [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin.
+- [#6016](https://github.com/influxdata/telegraf/pull/6016): Add better user-facing errors for API timeouts in docker input.
+- [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutual auth support to jti_openconfig_telemetry input.
+- [#6053](https://github.com/influxdata/telegraf/pull/6053): Add support for ES 7.x to elasticsearch output.
+- [#6062](https://github.com/influxdata/telegraf/pull/6062): Add basic auth to prometheus input plugin.
+- [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input.
+- [#5572](https://github.com/influxdata/telegraf/pull/5572): Support floats in statsd percentiles.
+- [#6050](https://github.com/influxdata/telegraf/pull/6050): Add native Go ping method to ping input plugin.
+- [#6074](https://github.com/influxdata/telegraf/pull/6074): Resume from last known offset in tail inputwhen reloading Telegraf.
+- [#6111](https://github.com/influxdata/telegraf/pull/6111): Add improved support for Azure SQL Database to sqlserver input.
+- [#6079](https://github.com/influxdata/telegraf/pull/6079): Add extra attributes for NVMe devices to smart input.
+- [#6084](https://github.com/influxdata/telegraf/pull/6084): Add docker_devicemapper measurement to docker input plugin.
+- [#6122](https://github.com/influxdata/telegraf/pull/6122): Add basic auth support to elasticsearch input.
+- [#6102](https://github.com/influxdata/telegraf/pull/6102): Support string field glob matching in json parser.
+- [#6101](https://github.com/influxdata/telegraf/pull/6101): Update gjson to allow multipath syntax in json parser.
+- [#6144](https://github.com/influxdata/telegraf/pull/6144): Add support for collecting SQL Requests to identify waits and blocking to sqlserver input.
+- [#6105](https://github.com/influxdata/telegraf/pull/6105): Collect k8s endpoints, ingress, and services in kube_inventory plugin.
+- [#6129](https://github.com/influxdata/telegraf/pull/6129): Add support for field/tag keys to strings processor.
+- [#6143](https://github.com/influxdata/telegraf/pull/6143): Add certificate verification status to x509_cert input.
+- [#6163](https://github.com/influxdata/telegraf/pull/6163): Support percentage value parsing in redis input.
+- [#6024](https://github.com/influxdata/telegraf/pull/6024): Load external Go plugins from --plugin-directory.
+- [#6184](https://github.com/influxdata/telegraf/pull/6184): Add ability to exclude db/bucket tag from influxdb outputs.
+- [#6137](https://github.com/influxdata/telegraf/pull/6137): Gather per collections stats in mongodb input plugin.
+- [#6195](https://github.com/influxdata/telegraf/pull/6195): Add TLS & credentials configuration for nats_consumer input plugin.
+- [#6194](https://github.com/influxdata/telegraf/pull/6194): Add support for enterprise repos to github plugin.
+- [#6060](https://github.com/influxdata/telegraf/pull/6060): Add Indices stats to elasticsearch input.
+- [#6189](https://github.com/influxdata/telegraf/pull/6189): Add left function to string processor.
+- [#6049](https://github.com/influxdata/telegraf/pull/6049): Add grace period for metrics late for aggregation.
+- [#4435](https://github.com/influxdata/telegraf/pull/4435): Add diff and non_negative_diff to basicstats aggregator.
+- [#6201](https://github.com/influxdata/telegraf/pull/6201): Add device tags to smart_attributes.
+- [#5719](https://github.com/influxdata/telegraf/pull/5719): Collect framework_offers and allocator metrics in mesos input.
+- [#6216](https://github.com/influxdata/telegraf/pull/6216): Add telegraf and go version to the internal input plugin.
+- [#6214](https://github.com/influxdata/telegraf/pull/6214): Update the number of logical CPUs dynamically in system plugin.
+- [#6259](https://github.com/influxdata/telegraf/pull/6259): Add darwin (macOS) builds to the release.
+- [#6241](https://github.com/influxdata/telegraf/pull/6241): Add configurable timeout setting to smart input.
+- [#6249](https://github.com/influxdata/telegraf/pull/6249): Add memory_usage field to procstat input plugin.
+- [#5971](https://github.com/influxdata/telegraf/pull/5971): Add support for custom attributes to vsphere input.
+- [#5926](https://github.com/influxdata/telegraf/pull/5926): Add cmdstat metrics to redis input.
+- [#6261](https://github.com/influxdata/telegraf/pull/6261): Add content_length metric to http_response input plugin.
+- [#6257](https://github.com/influxdata/telegraf/pull/6257): Add database_tag option to influxdb_listener to add database from query string.
+- [#6246](https://github.com/influxdata/telegraf/pull/6246): Add capability to limit TLS versions and cipher suites.
+- [#6266](https://github.com/influxdata/telegraf/pull/6266): Add topic_tag option to mqtt_consumer.
+- [#6207](https://github.com/influxdata/telegraf/pull/6207): Add ability to label inputs for logging.
+- [#6300](https://github.com/influxdata/telegraf/pull/6300): Add TLS support to nginx_plus, nginx_plus_api and nginx_vts.
+
+#### Bug Fixes
+
+- [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input.
+- [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input.
+- [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input.
+- [#6100](https://github.com/influxdata/telegraf/issues/6100): Fix SSPI authentication not working in sqlserver input.
+- [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input.
+- [#6136](https://github.com/influxdata/telegraf/issues/6136): Support Kafka 2.3.0 consumer groups.
+- [#6232](https://github.com/influxdata/telegraf/issues/6232): Fix persistent session in mqtt_consumer.
+- [#6235](https://github.com/influxdata/telegraf/issues/6235): Fix finder inconsistencies in vsphere input.
+- [#6138](https://github.com/influxdata/telegraf/issues/6138): Fix parsing multiple metrics on the first line of tailed file.
+- [#2526](https://github.com/influxdata/telegraf/issues/2526): Send TERM to exec processes before sending KILL signal.
+- [#5326](https://github.com/influxdata/telegraf/issues/5326): Query oplog only when connected to a replica set.
+- [#6317](https://github.com/influxdata/telegraf/pull/6317): Use environment variables to locate Program Files on Windows.
+
+## v1.11.5 [2019-08-27]
+
+#### Bug Fixes
+
+- [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues.
+- [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error.
+- [#6309](https://github.com/influxdata/telegraf/issues/6309): Fix with multiple instances only last configuration is used in smart input.
+- [#6303](https://github.com/influxdata/telegraf/pull/6303): Build official packages with Go 1.12.9.
+- [#6234](https://github.com/influxdata/telegraf/issues/6234): Split out -w argument in iptables input.
+- [#6270](https://github.com/influxdata/telegraf/issues/6270): Add support for parked process state on Linux.
+- [#6287](https://github.com/influxdata/telegraf/issues/6287): Remove leading slash from rcon command.
+- [#6313](https://github.com/influxdata/telegraf/pull/6313): Allow jobs with dashes in the name in lustre2 input.
+
+## v1.11.4 [2019-08-06]
+
+#### Bug Fixes
+
+- [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field.
+- [#6191](https://github.com/influxdata/telegraf/issues/6191): Skip floats that are NaN or Inf in Datadog output.
+- [#6209](https://github.com/influxdata/telegraf/issues/6209): Fix reload panic in socket_listener input plugin.
+
+## v1.11.3 [2019-07-23]
+
+#### Bug Fixes
+
+- [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input.
+- [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output.
+- [#6121](https://github.com/influxdata/telegraf/pull/6121): Fix panic in statd input when processing datadog events.
+- [#6125](https://github.com/influxdata/telegraf/issues/6125): Treat empty array as successful parse in json parser.
+- [#6094](https://github.com/influxdata/telegraf/issues/6094): Add missing rcode and zonestat to bind input.
+- [#6114](https://github.com/influxdata/telegraf/issues/6114): Fix lustre2 input plugin config parse regression.
+- [#5894](https://github.com/influxdata/telegraf/issues/5894): Fix template pattern partial wildcard matching.
+- [#6151](https://github.com/influxdata/telegraf/issues/6151): Fix panic in github input.
+
+## v1.11.2 [2019-07-09]
+
+#### Bug Fixes
+
+- [#6056](https://github.com/influxdata/telegraf/pull/6056): Fix source address ping flag on BSD.
+- [#6059](https://github.com/influxdata/telegraf/issues/6059): Fix value out of range error on 32-bit systems in bind input.
+- [#3573](https://github.com/influxdata/telegraf/issues/3573): Fix tail and logparser stop working after reload.
+- [#6077](https://github.com/influxdata/telegraf/pull/6077): Fix filecount path separator handling in Windows.
+- [#6075](https://github.com/influxdata/telegraf/issues/6075): Fix panic with empty datadog tag string.
+- [#6069](https://github.com/influxdata/telegraf/issues/6069): Apply topic filter to partition metrics in burrow input.
+
+## v1.11.1 [2019-06-25]
+
+#### Bug Fixes
+
+- [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input.
+- [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry.
+- [#5972](https://github.com/influxdata/telegraf/issues/5972): Don't consider pid of 0 when using systemd lookup in procstat.
+- [#5807](https://github.com/influxdata/telegraf/issues/5807): Skip 404 error reporting in nginx_plus_api input.
+- [#5999](https://github.com/influxdata/telegraf/issues/5999): Fix panic if pool_mode column does not exist.
+- [#6019](https://github.com/influxdata/telegraf/issues/6019): Add missing container_id field to docker_container_status metrics.
+- [#5742](https://github.com/influxdata/telegraf/issues/5742): Ignore error when utmp is missing in system input.
+- [#6032](https://github.com/influxdata/telegraf/issues/6032): Add device, serial_no, and wwn tags to synthetic attributes.
+- [#6012](https://github.com/influxdata/telegraf/issues/6012): Fix parsing of remote tcp address in statsd input.
+
+## v1.11 [2019-06-11]
#### Release Notes
- The `uptime_format` field in the system input has been deprecated, use the
`uptime` field instead.
+- The `cloudwatch` input has been updated to use a more efficient API, it now
+ requires `GetMetricData` permissions instead of `GetMetricStatistics`. The
+ `units` tag is not available from this API and is no longer collected.
#### New Inputs
- [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek
+- [cisco_telemetry_gnmi](/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx
+- [cisco_telemetry_mdt](/plugins/inputs/cisco_telemetry_mdt/README.md) - Contributed by @sbyx
+- [ecs](/plugins/inputs/ecs/README.md) - Contributed by @rbtr
- [github](/plugins/inputs/github/README.md) - Contributed by @influxdata
+- [openweathermap](/plugins/inputs/openweathermap/README.md) - Contributed by @regel
+- [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje
+
+#### New Aggregators
+
+- [final](/plugins/aggregators/final/README.md) - Contributed by @oplehto
+
+#### New Outputs
+
+- [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo
+- [health](/plugins/outputs/health/README.md) - Contributed by @influxdata
#### New Serializers
@@ -25,16 +722,71 @@
- [#5697](https://github.com/influxdata/telegraf/pull/5697): Add namespace restriction to prometheus input plugin.
- [#5681](https://github.com/influxdata/telegraf/pull/5681): Add cmdline tag to procstat input.
- [#5704](https://github.com/influxdata/telegraf/pull/5704): Support verbose query param in ping endpoint of influxdb_listener.
-
-#### Bugfixes
-
+- [#5713](https://github.com/influxdata/telegraf/pull/5713): Enhance HTTP connection options for phpfpm input plugin.
+- [#5544](https://github.com/influxdata/telegraf/pull/5544): Use more efficient GetMetricData API to collect cloudwatch metrics.
+- [#5544](https://github.com/influxdata/telegraf/pull/5544): Allow selection of collected statistic types in cloudwatch input.
+- [#5757](https://github.com/influxdata/telegraf/pull/5757): Speed up interface stat collection in net input.
+- [#5769](https://github.com/influxdata/telegraf/pull/5769): Add pagefault data to procstat input plugin.
+- [#5760](https://github.com/influxdata/telegraf/pull/5760): Add option to set permissions for unix domain sockets to socket_listener.
+- [#5585](https://github.com/influxdata/telegraf/pull/5585): Add cli support for outputting sections of the config.
+- [#5770](https://github.com/influxdata/telegraf/pull/5770): Add service-display-name option for use with Windows service.
+- [#5778](https://github.com/influxdata/telegraf/pull/5778): Add support for log rotation.
+- [#5765](https://github.com/influxdata/telegraf/pull/5765): Support more drive types in smart input.
+- [#5829](https://github.com/influxdata/telegraf/pull/5829): Add support for HTTP basic auth to solr input.
+- [#5791](https://github.com/influxdata/telegraf/pull/5791): Add support for datadog events to statsd input.
+- [#5817](https://github.com/influxdata/telegraf/pull/5817): Allow devices option to match against devlinks.
+- [#5855](https://github.com/influxdata/telegraf/pull/5855): Support tags in enum processor.
+- [#5830](https://github.com/influxdata/telegraf/pull/5830): Add support for gzip compression to amqp plugins.
+- [#5831](https://github.com/influxdata/telegraf/pull/5831): Support passive queue declaration in amqp_consumer.
+- [#5901](https://github.com/influxdata/telegraf/pull/5901): Set user agent in stackdriver output.
+- [#5885](https://github.com/influxdata/telegraf/pull/5885): Extend metrics collected from Nvidia GPUs.
+- [#5547](https://github.com/influxdata/telegraf/pull/5547): Add file rotation support to the file output.
+- [#5955](https://github.com/influxdata/telegraf/pull/5955): Add source tag to hddtemp plugin.
+
+#### Bug Fixes
+
+- [#5692](https://github.com/influxdata/telegraf/pull/5692): Temperature input plugin stops working when WiFi is turned off.
- [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager.
-
-## v1.10.3 [unreleased]
-
-#### Bugfixes
+- [#5730](https://github.com/influxdata/telegraf/pull/5730): Don't start telegraf when stale pidfile found.
+- [#5477](https://github.com/influxdata/telegraf/pull/5477): Support Minecraft server 1.13 and newer in minecraft input.
+- [#4098](https://github.com/influxdata/telegraf/issues/4098): Fix inline table support in configuration file.
+- [#1598](https://github.com/influxdata/telegraf/issues/1598): Fix multi-line basic strings support in configuration file.
+- [#5746](https://github.com/influxdata/telegraf/issues/5746): Verify a process passed by pid_file exists in procstat input.
+- [#5455](https://github.com/influxdata/telegraf/issues/5455): Fix unsupported pkt type error in pgbouncer.
+- [#5771](https://github.com/influxdata/telegraf/pull/5771): Fix only one job per storage target reported in lustre2 input.
+- [#5796](https://github.com/influxdata/telegraf/issues/5796): Set default timeout of 5s in fibaro input.
+- [#5835](https://github.com/influxdata/telegraf/issues/5835): Fix docker input does not parse image name correctly.
+- [#5661](https://github.com/influxdata/telegraf/issues/5661): Fix direct exchange routing key in amqp output.
+- [#5819](https://github.com/influxdata/telegraf/issues/5819): Fix scale set resource id with azure_monitor output.
+- [#5883](https://github.com/influxdata/telegraf/issues/5883): Skip invalid power times in apex_neptune input.
+- [#3485](https://github.com/influxdata/telegraf/issues/3485): Fix sqlserver connection closing on error.
+- [#5917](https://github.com/influxdata/telegraf/issues/5917): Fix toml option name in nginx_upstream_check.
+- [#5920](https://github.com/influxdata/telegraf/issues/5920): Fixed datastore name mapping in vsphere input.
+- [#5879](https://github.com/influxdata/telegraf/issues/5879): Fix multiple SIGHUP causes Telegraf to shutdown.
+- [#5891](https://github.com/influxdata/telegraf/issues/5891): Fix connection leak in influxdb outputs on reload.
+- [#5858](https://github.com/influxdata/telegraf/issues/5858): Fix batch fails when single metric is unserializable.
+- [#5536](https://github.com/influxdata/telegraf/issues/5536): Log a warning on write if the metric buffer has overflowed.
+
+## v1.10.4 [2019-05-14]
+
+#### Bug Fixes
+
+- [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser.
+- [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet.
+- [#5792](https://github.com/influxdata/telegraf/pull/5792): Don't discard metrics on forbidden error in influxdb_v2 output.
+- [#5803](https://github.com/influxdata/telegraf/issues/5803): Fix http output cannot set Host header.
+- [#5619](https://github.com/influxdata/telegraf/issues/5619): Fix interval estimation in vsphere input.
+- [#5782](https://github.com/influxdata/telegraf/pull/5782): Skip lines with missing refid in ntpq input.
+- [#5755](https://github.com/influxdata/telegraf/issues/5755): Add support for hex values to ipmi_sensor input.
+- [#5824](https://github.com/influxdata/telegraf/issues/5824): Fix parse of unix timestamp with more than ns precision.
+- [#5836](https://github.com/influxdata/telegraf/issues/5836): Restore field name case in interrupts input.
+
+## v1.10.3 [2019-04-16]
+
+#### Bug Fixes
- [#5680](https://github.com/influxdata/telegraf/pull/5680): Allow colons in metric names in prometheus_client output.
+- [#5716](https://github.com/influxdata/telegraf/pull/5716): Set log directory attributes in rpm spec.
## v1.10.2 [2019-04-02]
@@ -44,7 +796,7 @@
the grok parser. If you are capturing quoted strings you may need to update
the patterns.
-#### Bugfixes
+#### Bug Fixes
- [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators.
- [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input.
@@ -64,7 +816,7 @@
## v1.10.1 [2019-03-19]
-#### Bugfixes
+#### Bug Fixes
- [#5448](https://github.com/influxdata/telegraf/issues/5448): Show error when TLS configuration cannot be loaded.
- [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins.
@@ -134,7 +886,7 @@
- [#5490](https://github.com/influxdata/telegraf/pull/5490): Add tag based routing in influxdb/influxdb_v2 outputs.
- [#5533](https://github.com/influxdata/telegraf/pull/5533): Allow grok parser to produce metrics with no fields.
-#### Bugfixes
+#### Bug Fixes
- [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process.
- [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input.
@@ -150,7 +902,7 @@
## v1.9.5 [2019-02-26]
-#### Bugfixes
+#### Bug Fixes
- [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output.
- [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output.
@@ -164,7 +916,7 @@
## v1.9.4 [2019-02-05]
-#### Bugfixes
+#### Bug Fixes
- [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser.
- [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input.
@@ -173,18 +925,18 @@
## v1.9.3 [2019-01-22]
-#### Bugfixes
+#### Bug Fixes
- [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input.
- [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails.
-- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages.
+- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparseable messages.
- [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods.
- [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase.
- [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes.
## v1.9.2 [2019-01-08]
-#### Bugfixes
+#### Bug Fixes
- [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout.
- [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version.
@@ -203,7 +955,7 @@
## v1.9.1 [2018-12-11]
-#### Bugfixes
+#### Bug Fixes
- [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer.
- [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input.
@@ -272,7 +1024,7 @@
- [#4920](https://github.com/influxdata/telegraf/pull/4920): Add scraping for Prometheus endpoint in Kubernetes.
- [#4938](https://github.com/influxdata/telegraf/pull/4938): Add per output flush_interval, metric_buffer_limit and metric_batch_size.
-#### Bugfixes
+#### Bug Fixes
- [#4950](https://github.com/influxdata/telegraf/pull/4950): Remove the time_key from the field values in JSON parser.
- [#3968](https://github.com/influxdata/telegraf/issues/3968): Fix input time rounding when using a custom interval.
@@ -285,7 +1037,7 @@
## v1.8.3 [2018-10-30]
-### Bugfixes
+### Bug Fixes
- [#4873](https://github.com/influxdata/telegraf/pull/4873): Add DN attributes as tags in x509_cert input to avoid series overwrite.
- [#4921](https://github.com/influxdata/telegraf/issues/4921): Prevent connection leak by closing unused connections in amqp output.
@@ -297,7 +1049,7 @@
## v1.8.2 [2018-10-17]
-### Bugfixes
+### Bug Fixes
- [#4844](https://github.com/influxdata/telegraf/pull/4844): Update write path to match updated InfluxDB v2 API.
- [#4840](https://github.com/influxdata/telegraf/pull/4840): Fix missing timeouts in vsphere input.
@@ -312,7 +1064,7 @@
## v1.8.1 [2018-10-03]
-### Bugfixes
+### Bug Fixes
- [#4750](https://github.com/influxdata/telegraf/pull/4750): Fix hardware_type may be truncated in sqlserver input.
- [#4723](https://github.com/influxdata/telegraf/issues/4723): Improve performance in basicstats aggregator.
@@ -436,7 +1188,7 @@
- [#4682](https://github.com/influxdata/telegraf/pull/4682): Allow alternate binaries for iptables input plugin.
- [#4645](https://github.com/influxdata/telegraf/pull/4645): Add influxdb_v2 output plugin.
-### Bugfixes
+### Bug Fixes
- [#3438](https://github.com/influxdata/telegraf/issues/3438): Fix divide by zero in logparser input.
- [#4499](https://github.com/influxdata/telegraf/issues/4499): Fix instance and object name in performance counters with backslashes.
@@ -453,7 +1205,7 @@
## v1.7.4 [2018-08-29]
-### Bugfixes
+### Bug Fixes
- [#4534](https://github.com/influxdata/telegraf/pull/4534): Skip unserializable metric in influxDB UDP output.
- [#4554](https://github.com/influxdata/telegraf/pull/4554): Fix powerdns input tests.
@@ -466,7 +1218,7 @@
## v1.7.3 [2018-08-07]
-### Bugfixes
+### Bug Fixes
- [#4434](https://github.com/influxdata/telegraf/issues/4434): Reduce required docker API version.
- [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input.
@@ -478,7 +1230,7 @@
## v1.7.2 [2018-07-18]
-### Bugfixes
+### Bug Fixes
- [#4381](https://github.com/influxdata/telegraf/issues/4381): Use localhost as default server tag in zookeeper input.
- [#4374](https://github.com/influxdata/telegraf/issues/4374): Don't set values when pattern doesn't match in regex processor.
@@ -490,7 +1242,7 @@
## v1.7.1 [2018-07-03]
-### Bugfixes
+### Bug Fixes
- [#4277](https://github.com/influxdata/telegraf/pull/4277): Treat sigterm as a clean shutdown signal.
- [#4284](https://github.com/influxdata/telegraf/pull/4284): Fix selection of tags under nested objects in the JSON parser.
@@ -567,7 +1319,7 @@
- [#3995](https://github.com/influxdata/telegraf/pull/3995): Add passive mode exchange declaration option to amqp consumer input.
- [#4216](https://github.com/influxdata/telegraf/pull/4216): Add counter fields to pf input.
-### Bugfixes
+### Bug Fixes
- [#4018](https://github.com/influxdata/telegraf/pull/4018): Write to working file outputs if any files are not writeable.
- [#4036](https://github.com/influxdata/telegraf/pull/4036): Add all win_perf_counters fields for a series in a single metric.
@@ -581,7 +1333,7 @@
## v1.6.4 [2018-06-05]
-### Bugfixes
+### Bug Fixes
- [#4203](https://github.com/influxdata/telegraf/issues/4203): Fix snmp overriding of auto-configured table fields.
- [#4218](https://github.com/influxdata/telegraf/issues/4218): Fix uint support in cloudwatch output.
@@ -590,7 +1342,7 @@
## v1.6.3 [2018-05-21]
-### Bugfixes
+### Bug Fixes
- [#4127](https://github.com/influxdata/telegraf/issues/4127): Fix intermittent panic in aerospike input.
- [#4130](https://github.com/influxdata/telegraf/issues/4130): Fix connection leak in jolokia2_agent.
@@ -601,7 +1353,7 @@
## v1.6.2 [2018-05-08]
-### Bugfixes
+### Bug Fixes
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
- [#4091](https://github.com/influxdata/telegraf/pull/4091): Fix handling of uint64 in datadog output.
@@ -611,7 +1363,7 @@
## v1.6.1 [2018-04-23]
-### Bugfixes
+### Bug Fixes
- [#3835](https://github.com/influxdata/telegraf/issues/3835): Report mem input fields as gauges instead counters.
- [#4030](https://github.com/influxdata/telegraf/issues/4030): Fix graphite outputs unsigned integers in wrong format.
@@ -628,7 +1380,7 @@
### Release Notes
-- The `mysql` input plugin has been updated fix a number of type convertion
+- The `mysql` input plugin has been updated fix a number of type conversion
issues. This may cause a `field type error` when inserting into InfluxDB due
the change of types.
@@ -720,7 +1472,7 @@
- [#3811](https://github.com/influxdata/telegraf/pull/3811): Add TLS support to zookeeper input.
- [#2737](https://github.com/influxdata/telegraf/issues/2737): Add filters for container state to docker input.
-### Bugfixes
+### Bug Fixes
- [#1896](https://github.com/influxdata/telegraf/issues/1896): Fix various mysql data type conversions.
- [#3810](https://github.com/influxdata/telegraf/issues/3810): Fix metric buffer limit in internal plugin after reload.
@@ -737,7 +1489,7 @@
## v1.5.3 [2018-03-14]
-### Bugfixes
+### Bug Fixes
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
@@ -749,7 +1501,7 @@
## v1.5.2 [2018-01-30]
-### Bugfixes
+### Bug Fixes
- [#3684](https://github.com/influxdata/telegraf/pull/3684): Ignore empty lines in Graphite plaintext.
- [#3604](https://github.com/influxdata/telegraf/issues/3604): Fix index out of bounds error in solr input plugin.
@@ -763,7 +1515,7 @@
## v1.5.1 [2018-01-10]
-### Bugfixes
+### Bug Fixes
- [#3624](https://github.com/influxdata/telegraf/pull/3624): Fix name error in jolokia2_agent sample config.
- [#3625](https://github.com/influxdata/telegraf/pull/3625): Fix DC/OS login expiration time.
@@ -859,7 +1611,7 @@
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
-### Bugfixes
+### Bug Fixes
- [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload.
- [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock.
@@ -878,7 +1630,7 @@
## v1.4.5 [2017-12-01]
-### Bugfixes
+### Bug Fixes
- [#3500](https://github.com/influxdata/telegraf/issues/3500): Fix global variable collection when using interval_slow option in mysql input.
- [#3486](https://github.com/influxdata/telegraf/issues/3486): Fix error getting net connections info in netstat input.
@@ -886,7 +1638,7 @@
## v1.4.4 [2017-11-08]
-### Bugfixes
+### Bug Fixes
- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input.
- [#3419](https://github.com/influxdata/telegraf/issues/3419): Redact datadog API key in log output.
@@ -896,7 +1648,7 @@
## v1.4.3 [2017-10-25]
-### Bugfixes
+### Bug Fixes
- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input.
- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input.
@@ -912,7 +1664,7 @@
## v1.4.2 [2017-10-10]
-### Bugfixes
+### Bug Fixes
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
@@ -926,7 +1678,7 @@
## v1.4.1 [2017-09-26]
-### Bugfixes
+### Bug Fixes
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
@@ -1003,7 +1755,7 @@
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
-### Bugfixes
+### Bug Fixes
- [#2607](https://github.com/influxdata/telegraf/issues/2607): Improve logging of errors in Cassandra input.
- [#2819](https://github.com/influxdata/telegraf/pull/2819): [enh] set db_version at 0 if query version fails
@@ -1030,7 +1782,7 @@
- [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text.
- [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric.
- [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output.
-- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
+- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilation of logparser and tail on solaris.
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
@@ -1046,7 +1798,7 @@
## v1.3.5 [2017-07-26]
-### Bugfixes
+### Bug Fixes
- [#3049](https://github.com/influxdata/telegraf/issues/3049): Fix prometheus output cannot be reloaded.
- [#3037](https://github.com/influxdata/telegraf/issues/3037): Fix filestat reporting exists when cannot list directory.
@@ -1055,7 +1807,7 @@
## v1.3.4 [2017-07-12]
-### Bugfixes
+### Bug Fixes
- [#3001](https://github.com/influxdata/telegraf/issues/3001): Fix handling of escape characters within fields.
- [#2988](https://github.com/influxdata/telegraf/issues/2988): Fix chrony plugin does not track system time offset.
@@ -1064,7 +1816,7 @@
## v1.3.3 [2017-06-28]
-### Bugfixes
+### Bug Fixes
- [#2915](https://github.com/influxdata/telegraf/issues/2915): Allow dos line endings in tail and logparser.
- [#2937](https://github.com/influxdata/telegraf/issues/2937): Remove label value sanitization in prometheus output.
@@ -1073,7 +1825,7 @@
## v1.3.2 [2017-06-14]
-### Bugfixes
+### Bug Fixes
- [#2862](https://github.com/influxdata/telegraf/issues/2862): Fix InfluxDB UDP metric splitting.
- [#2888](https://github.com/influxdata/telegraf/issues/2888): Fix mongodb/leofs urls without scheme.
@@ -1081,7 +1833,7 @@
## v1.3.1 [2017-05-31]
-### Bugfixes
+### Bug Fixes
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
@@ -1176,7 +1928,7 @@ be deprecated eventually.
- [#2712](https://github.com/influxdata/telegraf/issues/2712): Documentation for rabbitmq input plugin
- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files.
-### Bugfixes
+### Bug Fixes
- [#2633](https://github.com/influxdata/telegraf/pull/2633): ipmi_sensor: allow @ symbol in password
- [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
@@ -1218,7 +1970,7 @@ be deprecated eventually.
## v1.2.1 [2017-02-01]
-### Bugfixes
+### Bug Fixes
- [#2317](https://github.com/influxdata/telegraf/issues/2317): Fix segfault on nil metrics with influxdb output.
- [#2324](https://github.com/influxdata/telegraf/issues/2324): Fix negative number handling.
@@ -1275,7 +2027,7 @@ plugins, not just statsd.
- [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin
- [#2255](https://github.com/influxdata/telegraf/pull/2255): Allow changing jolokia attribute delimiter
-### Bugfixes
+### Bug Fixes
- [#2049](https://github.com/influxdata/telegraf/pull/2049): Fix the Value data format not trimming null characters from input.
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
@@ -1303,14 +2055,14 @@ plugins, not just statsd.
## v1.1.2 [2016-12-12]
-### Bugfixes
+### Bug Fixes
- [#2007](https://github.com/influxdata/telegraf/issues/2007): Make snmptranslate not required when using numeric OID.
- [#2104](https://github.com/influxdata/telegraf/issues/2104): Add a global snmp translation cache.
## v1.1.1 [2016-11-14]
-### Bugfixes
+### Bug Fixes
- [#2023](https://github.com/influxdata/telegraf/issues/2023): Fix issue parsing toml durations with single quotes.
@@ -1357,7 +2109,7 @@ continue sending logs to /var/log/telegraf/telegraf.log.
- [#1700](https://github.com/influxdata/telegraf/pull/1700): HAProxy plugin socket glob matching.
- [#1847](https://github.com/influxdata/telegraf/pull/1847): Add Kubernetes plugin for retrieving pod metrics.
-### Bugfixes
+### Bug Fixes
- [#1955](https://github.com/influxdata/telegraf/issues/1955): Fix NATS plug-ins reconnection logic.
- [#1936](https://github.com/influxdata/telegraf/issues/1936): Set required default values in udp_listener & tcp_listener.
@@ -1393,7 +2145,7 @@ continue sending logs to /var/log/telegraf/telegraf.log.
## v1.0.1 [2016-09-26]
-### Bugfixes
+### Bug Fixes
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Prometheus output: Fix bug with multi-batch writes.
- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags.
@@ -1477,7 +2229,7 @@ consistent with the behavior of `collection_jitter`.
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
-- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
+- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input plugin.
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
@@ -1496,7 +2248,7 @@ consistent with the behavior of `collection_jitter`.
- [#1414](https://github.com/influxdata/telegraf/pull/1414): Forking sensors command to remove C package dependency.
- [#1389](https://github.com/influxdata/telegraf/pull/1389): Add a new SNMP plugin.
-### Bugfixes
+### Bug Fixes
- [#1619](https://github.com/influxdata/telegraf/issues/1619): Fix `make windows` build target
- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures.
@@ -1570,7 +2322,7 @@ to "stdout".
- [#479](https://github.com/influxdata/telegraf/issues/479): per-plugin execution time added to debug output.
- [#1249](https://github.com/influxdata/telegraf/issues/1249): influxdb output: added write_consistency argument.
-### Bugfixes
+### Bug Fixes
- [#1195](https://github.com/influxdata/telegraf/pull/1195): Docker panic on timeout. Thanks @zstyblik!
- [#1211](https://github.com/influxdata/telegraf/pull/1211): mongodb input. Fix possible panic. Thanks @kols!
@@ -1656,7 +2408,7 @@ It is not included on the report path. This is necessary for reporting host disk
- [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin.
- [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat.
- [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin
-- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman!
+- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengelman!
- [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers.
- [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements.
- [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja!
@@ -1664,7 +2416,7 @@ It is not included on the report path. This is necessary for reporting host disk
- [#1128](https://github.com/influxdata/telegraf/pull/1128): MongoDB jumbo chunks metric for MongoDB input plugin
- [#1146](https://github.com/influxdata/telegraf/pull/1146): HAProxy socket support. Thanks weshmashian!
-### Bugfixes
+### Bug Fixes
- [#1050](https://github.com/influxdata/telegraf/issues/1050): jolokia plugin - do not overwrite host tag. Thanks @saiello!
- [#921](https://github.com/influxdata/telegraf/pull/921): mqtt_consumer stops gathering metrics. Thanks @chaton78!
@@ -1710,7 +2462,7 @@ because the `value` field is redundant in the graphite/librato context.
- [#1001](https://github.com/influxdata/telegraf/pull/1001): Graphite serializer templates.
- [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin.
-### Bugfixes
+### Bug Fixes
- [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name)
- [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw!
- [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj!
@@ -1737,14 +2489,14 @@ because the `value` field is redundant in the graphite/librato context.
- [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug!
- [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere!
-### Bugfixes
+### Bug Fixes
- [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided.
- [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write.
- [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name.
- [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue.
- [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key.
- [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic.
-- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert!
+- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titilambert!
- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk!
- [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout.
- [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF!
@@ -1755,10 +2507,10 @@ because the `value` field is redundant in the graphite/librato context.
- Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859)
### Features
-- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref!
+- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @PierreF!
- [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou!
-### Bugfixes
+### Bug Fixes
- [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix
- [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic
@@ -1783,7 +2535,7 @@ because the `value` field is redundant in the graphite/librato context.
- [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.)
- [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics.
-### Bugfixes
+### Bug Fixes
- [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":"
- [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty!
- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert!
@@ -1801,7 +2553,7 @@ because the `value` field is redundant in the graphite/librato context.
### Release Notes
- Bug in the build script broke deb and rpm packages.
-### Bugfixes
+### Bug Fixes
- [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken
- [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken
@@ -1821,7 +2573,7 @@ on the metric _name_.
- [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF!
- [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath!
-### Bugfixes
+### Bug Fixes
- [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode.
- [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters.
@@ -1857,7 +2609,7 @@ and is in the `[agent]` config section.
- [#699](https://github.com/influxdata/telegraf/pull/699): Flush based on buffer size rather than time.
- [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes!
-### Bugfixes
+### Bug Fixes
- [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux.
- [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug.
- [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues.
@@ -1884,7 +2636,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by
- [#617](https://github.com/influxdata/telegraf/pull/617): exec plugin: parse influx line protocol in addition to JSON.
- [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support
-### Bugfixes
+### Bug Fixes
- [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements.
- [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working.
- [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong.
@@ -1929,7 +2681,7 @@ specifying a docker endpoint to get metrics from.
- [#573](https://github.com/influxdata/telegraf/pull/573): Github webhooks consumer input. Thanks @jackzampolin!
- [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso!
-### Bugfixes
+### Bug Fixes
- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
@@ -1975,7 +2727,7 @@ configurations overwritten by the upgrade. There is a backup stored at
(`name_prefix` and `name_suffix`)
- Added ability to override base plugin measurement name. (`name_override`)
-### Bugfixes
+### Bug Fixes
## v0.2.5 [unreleased]
@@ -1984,7 +2736,7 @@ configurations overwritten by the upgrade. There is a backup stored at
- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
-### Bugfixes
+### Bug Fixes
- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
@@ -2000,7 +2752,7 @@ configurations overwritten by the upgrade. There is a backup stored at
- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
-### Bugfixes
+### Bug Fixes
- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
@@ -2037,7 +2789,7 @@ same type can be specified, like this:
- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
-### Bugfixes
+### Bug Fixes
- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
@@ -2047,7 +2799,7 @@ same type can be specified, like this:
- 0.2.1 has a bug where all lists within plugins get duplicated, this includes
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
-### Bugfixes
+### Bug Fixes
- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
## v0.2.1 [2015-11-16]
@@ -2076,7 +2828,7 @@ same type.
- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
-### Bugfixes
+### Bug Fixes
- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
@@ -2124,7 +2876,7 @@ of metrics collected and from how many inputs.
- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
-### Bugfixes
+### Bug Fixes
- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
@@ -2167,7 +2919,7 @@ re-added in a "verbose" mode if there is demand for it.
- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
and filtering when specifying a config file.
-### Bugfixes
+### Bug Fixes
- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
@@ -2195,7 +2947,7 @@ and filtering when specifying a config file.
- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
-### Bugfixes
+### Bug Fixes
## v0.1.7 [2015-08-28]
@@ -2208,7 +2960,7 @@ and filtering when specifying a config file.
- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
- Indent the toml config file for readability
-### Bugfixes
+### Bug Fixes
- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
@@ -2221,7 +2973,7 @@ and filtering when specifying a config file.
- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
-### Bugfixes
+### Bug Fixes
- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
@@ -2243,11 +2995,11 @@ and filtering when specifying a config file.
- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
-- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
+- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyond influxdb. Thanks @jipperinbham!
- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
-### Bugfixes
+### Bug Fixes
- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
@@ -2259,7 +3011,7 @@ and filtering when specifying a config file.
### Features
- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
-### Bugfixes
+### Bug Fixes
- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
@@ -2269,7 +3021,7 @@ and filtering when specifying a config file.
- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
-### Bugfixes
+### Bug Fixes
- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
@@ -2281,7 +3033,7 @@ and filtering when specifying a config file.
- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
-### Bugfixes
+### Bug Fixes
- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index badf71c120eb3..897ac1377e6e7 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -13,6 +13,19 @@
1. Ensure you have added proper unit tests and documentation.
1. Open a new [pull request][].
+#### Contributing an External Plugin *(experimental)*
+Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/inputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code.
+
+Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin.
+Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`.
+
+
+#### Security Vulnerability Reporting
+InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our
+open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about
+security vulnerability reporting,
+including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/).
+
### GoDoc
Public interfaces for inputs, outputs, processors, aggregators, metrics,
@@ -24,30 +37,24 @@ and the accumulator can be found in the GoDoc:
**Adding a dependency:**
-Assuming you can already build the project, run these in the telegraf directory:
+Telegraf uses Go modules. Assuming you can already build the project, run this in the telegraf directory:
-1. `dep ensure -vendor-only`
-2. `dep ensure -add github.com/[dependency]/[new-package]`
+1. `go get github.com/[dependency]/[new-package]`
**Unit Tests:**
Before opening a pull request you should run the linter checks and
the short tests.
-**Run static analysis:**
-
```
make check
-```
-
-**Run short tests:**
-
-```
make test
```
**Execute integration tests:**
+(Optional)
+
Running the integration tests requires several docker containers to be
running. You can start the containers with:
```
diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md
new file mode 100644
index 0000000000000..a73c4308c4c1c
--- /dev/null
+++ b/EXTERNAL_PLUGINS.md
@@ -0,0 +1,14 @@
+# External Plugins
+
+This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd).
+Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin.
+
+Pull requests welcome.
+
+
+## Inputs
+- [rand](https://github.com/ssoroka/rand) - Generate random numbers
+- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts
+- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels
+- [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS.
+- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API.
diff --git a/Gopkg.lock b/Gopkg.lock
deleted file mode 100644
index 1521eb2cd98cf..0000000000000
--- a/Gopkg.lock
+++ /dev/null
@@ -1,1675 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- digest = "1:972f38a9c879a4920d1e3a3d3438104b6c06163bfa3e6f4064adb00468d40587"
- name = "cloud.google.com/go"
- packages = [
- "civil",
- "compute/metadata",
- "iam",
- "internal/optional",
- "internal/version",
- "monitoring/apiv3",
- "pubsub",
- "pubsub/apiv1",
- "pubsub/internal/distribution",
- ]
- pruneopts = ""
- revision = "c728a003b238b26cef9ab6753a5dc424b331c3ad"
- version = "v0.27.0"
-
-[[projects]]
- branch = "master"
- digest = "1:fc0802104acded1f48e4860a9f2db85b82b4a754fca9eae750ff4e8b8cdf2116"
- name = "code.cloudfoundry.org/clock"
- packages = ["."]
- pruneopts = ""
- revision = "02e53af36e6c978af692887ed449b74026d76fec"
-
-[[projects]]
- digest = "1:ca3acef20fd660d4df327accbf3ca2df9a12213d914f3113305dcd56579324b9"
- name = "collectd.org"
- packages = [
- "api",
- "cdtime",
- "network",
- ]
- pruneopts = ""
- revision = "2ce144541b8903101fb8f1483cc0497a68798122"
- version = "v0.3.0"
-
-[[projects]]
- digest = "1:5f61d4466cef935862c262f6bc00e24beb5b39b551e906f3cfb180dfac096d57"
- name = "contrib.go.opencensus.io/exporter/stackdriver"
- packages = ["propagation"]
- pruneopts = ""
- revision = "2b93072101d466aa4120b3c23c2e1b08af01541c"
- version = "v0.6.0"
-
-[[projects]]
- digest = "1:5923e22a060ab818a015593422f9e8a35b9d881d4cfcfed0669a82959b11c7ee"
- name = "github.com/Azure/go-autorest"
- packages = [
- "autorest",
- "autorest/adal",
- "autorest/azure",
- "autorest/azure/auth",
- "autorest/date",
- ]
- pruneopts = ""
- revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318"
- version = "v10.12.0"
-
-[[projects]]
- branch = "master"
- digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6"
- name = "github.com/Microsoft/ApplicationInsights-Go"
- packages = [
- "appinsights",
- "appinsights/contracts",
- ]
- pruneopts = ""
- revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5"
-
-[[projects]]
- digest = "1:45ec6eb579713a01991ad07f538fed3b576ee55f5ce9f248320152a9270d9258"
- name = "github.com/Microsoft/go-winio"
- packages = ["."]
- pruneopts = ""
- revision = "a6d595ae73cf27a1b8fc32930668708f45ce1c85"
- version = "v0.4.9"
-
-[[projects]]
- digest = "1:213b41361ad1cb4768add9d26c2e27794c65264eefdb24ed6ea34cdfeeff3f3c"
- name = "github.com/Shopify/sarama"
- packages = ["."]
- pruneopts = ""
- revision = "a6144ae922fd99dd0ea5046c8137acfb7fab0914"
- version = "v1.18.0"
-
-[[projects]]
- digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e"
- name = "github.com/StackExchange/wmi"
- packages = ["."]
- pruneopts = ""
- revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
- version = "1.0.0"
-
-[[projects]]
- digest = "1:f296e8b29c60c94efed3b8cfae08d793cb95149cdd7343e6a9834b4ac7136475"
- name = "github.com/aerospike/aerospike-client-go"
- packages = [
- ".",
- "internal/lua",
- "internal/lua/resources",
- "logger",
- "pkg/bcrypt",
- "pkg/ripemd160",
- "types",
- "types/atomic",
- "types/particle_type",
- "types/rand",
- "utils/buffer",
- ]
- pruneopts = ""
- revision = "1dc8cf203d24cd454e71ce40ab4cd0bf3112df90"
- version = "v1.27.0"
-
-[[projects]]
- branch = "master"
- digest = "1:8483994d21404c8a1d489f6be756e25bfccd3b45d65821f25695577791a08e68"
- name = "github.com/alecthomas/units"
- packages = ["."]
- pruneopts = ""
- revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
-
-[[projects]]
- branch = "master"
- digest = "1:7f21a8f175ee7f91c659f919c61032e11889fba5dc25c0cec555087cbb87435a"
- name = "github.com/amir/raidman"
- packages = [
- ".",
- "proto",
- ]
- pruneopts = ""
- revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b"
-
-[[projects]]
- branch = "master"
- digest = "1:0828d8c0f95689f832cf348fe23827feb7640cd698d612ef59e2f9d041f54c68"
- name = "github.com/apache/thrift"
- packages = ["lib/go/thrift"]
- pruneopts = ""
- revision = "f2867c24984aa53edec54a138c03db934221bdea"
-
-[[projects]]
- digest = "1:996727880e06dcf037f712c4d046e241d1b1b01844636fefb0fbaa480cfd230e"
- name = "github.com/aws/aws-sdk-go"
- packages = [
- "aws",
- "aws/awserr",
- "aws/awsutil",
- "aws/client",
- "aws/client/metadata",
- "aws/corehandlers",
- "aws/credentials",
- "aws/credentials/ec2rolecreds",
- "aws/credentials/endpointcreds",
- "aws/credentials/stscreds",
- "aws/csm",
- "aws/defaults",
- "aws/ec2metadata",
- "aws/endpoints",
- "aws/request",
- "aws/session",
- "aws/signer/v4",
- "internal/sdkio",
- "internal/sdkrand",
- "internal/sdkuri",
- "internal/shareddefaults",
- "private/protocol",
- "private/protocol/json/jsonutil",
- "private/protocol/jsonrpc",
- "private/protocol/query",
- "private/protocol/query/queryutil",
- "private/protocol/rest",
- "private/protocol/xml/xmlutil",
- "service/cloudwatch",
- "service/dynamodb",
- "service/dynamodb/dynamodbattribute",
- "service/dynamodb/dynamodbiface",
- "service/kinesis",
- "service/kinesis/kinesisiface",
- "service/sts",
- ]
- pruneopts = ""
- revision = "bf8067ceb6e7f51e150c218972dccfeeed892b85"
- version = "v1.15.54"
-
-[[projects]]
- branch = "master"
- digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb"
- name = "github.com/beorn7/perks"
- packages = ["quantile"]
- pruneopts = ""
- revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
-
-[[projects]]
- digest = "1:c5978131c797af795972c27c25396c81d1bf53b7b6e8e3e0259e58375765c071"
- name = "github.com/bsm/sarama-cluster"
- packages = ["."]
- pruneopts = ""
- revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3"
- version = "v2.1.13"
-
-[[projects]]
- digest = "1:e5691038f8e87e7da05280095d968e50c17d624e25cca095d4e4cd947a805563"
- name = "github.com/caio/go-tdigest"
- packages = ["."]
- pruneopts = ""
- revision = "f3c8d94f65d3096ac96eda54ffcd10c0fe1477f1"
- version = "v2.3.0"
-
-[[projects]]
- digest = "1:f619cb9b07aebe5416262cdd8b86082e8d5bdc5264cb3b615ff858df0b645f97"
- name = "github.com/cenkalti/backoff"
- packages = ["."]
- pruneopts = ""
- revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e"
- version = "v2.0.0"
-
-[[projects]]
- branch = "master"
- digest = "1:298e42868718da06fc0899ae8fdb99c48a14477045234c9274d81caa79af6a8f"
- name = "github.com/couchbase/go-couchbase"
- packages = ["."]
- pruneopts = ""
- revision = "16db1f1fe037412f12738fa4d8448c549c4edd77"
-
-[[projects]]
- branch = "master"
- digest = "1:c734658274a6be88870a36742fdea96a3fce4fc99a7b90946c9e84335ceae71a"
- name = "github.com/couchbase/gomemcached"
- packages = [
- ".",
- "client",
- ]
- pruneopts = ""
- revision = "0da75df145308b9a4e6704d762ca9d9b77752efc"
-
-[[projects]]
- branch = "master"
- digest = "1:c1195c02bc8fbf5307cfb95bc79eddaa1351ee3587cc4a7bbe6932e2fb966ff2"
- name = "github.com/couchbase/goutils"
- packages = [
- "logging",
- "scramsha",
- ]
- pruneopts = ""
- revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873"
-
-[[projects]]
- digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- pruneopts = ""
- revision = "346938d642f2ec3594ed81d874461961cd0faa76"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- digest = "1:7fdc54859cd901c25b9d8db964410a4e0d98fa0dca267fe4cf49c0eede5e06c2"
- name = "github.com/denisenkom/go-mssqldb"
- packages = [
- ".",
- "internal/cp",
- ]
- pruneopts = ""
- revision = "1eb28afdf9b6e56cf673badd47545f844fe81103"
-
-[[projects]]
- digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6"
- name = "github.com/dgrijalva/jwt-go"
- packages = ["."]
- pruneopts = ""
- revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
- version = "v3.2.0"
-
-[[projects]]
- branch = "master"
- digest = "1:654ac9799e7a8a586d8690bb2229a4f3408bbfe2c5494bf4dfe043053eeb5496"
- name = "github.com/dimchansky/utfbom"
- packages = ["."]
- pruneopts = ""
- revision = "6c6132ff69f0f6c088739067407b5d32c52e1d0f"
-
-[[projects]]
- digest = "1:522eff2a1f014a64fb403db60fc0110653e4dc5b59779894d208e697b0708ddc"
- name = "github.com/docker/distribution"
- packages = [
- "digestset",
- "reference",
- ]
- pruneopts = ""
- revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
-
-[[projects]]
- digest = "1:d149605f1b00713fdc48150122892d77d49d30c825f690dd92f497aeb6cf18f5"
- name = "github.com/docker/docker"
- packages = [
- "api",
- "api/types",
- "api/types/blkiodev",
- "api/types/container",
- "api/types/events",
- "api/types/filters",
- "api/types/image",
- "api/types/mount",
- "api/types/network",
- "api/types/registry",
- "api/types/strslice",
- "api/types/swarm",
- "api/types/swarm/runtime",
- "api/types/time",
- "api/types/versions",
- "api/types/volume",
- "client",
- ]
- pruneopts = ""
- revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2"
-
-[[projects]]
- digest = "1:a5ecc2e70260a87aa263811281465a5effcfae8a54bac319cee87c4625f04d63"
- name = "github.com/docker/go-connections"
- packages = [
- "nat",
- "sockets",
- "tlsconfig",
- ]
- pruneopts = ""
- revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
- version = "v0.3.0"
-
-[[projects]]
- digest = "1:582d54fcb7233da8dde1dfd2210a5b9675d0685f84246a8d317b07d680c18b1b"
- name = "github.com/docker/go-units"
- packages = ["."]
- pruneopts = ""
- revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
- version = "v0.3.3"
-
-[[projects]]
- branch = "master"
- digest = "1:809792497a26f3936462cc5787a0d644b4d3cbfd59587e4f8845a9396ca2eb8a"
- name = "github.com/docker/libnetwork"
- packages = ["ipvs"]
- pruneopts = ""
- revision = "d7b61745d16675c9f548b19f06fda80d422a74f0"
-
-[[projects]]
- digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12"
- name = "github.com/eapache/go-resiliency"
- packages = ["breaker"]
- pruneopts = ""
- revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- digest = "1:7b12ea8b50040c6c2378ec5b5a1ab722730b2bfb46e8724ded57f2c3905431fa"
- name = "github.com/eapache/go-xerial-snappy"
- packages = ["."]
- pruneopts = ""
- revision = "040cc1a32f578808623071247fdbd5cc43f37f5f"
-
-[[projects]]
- digest = "1:d8d46d21073d0f65daf1740ebf4629c65e04bf92e14ce93c2201e8624843c3d3"
- name = "github.com/eapache/queue"
- packages = ["."]
- pruneopts = ""
- revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:3fa846cb3feb4e65371fe3c347c299de9b5bc3e71e256c0d940cd19b767a6ba0"
- name = "github.com/eclipse/paho.mqtt.golang"
- packages = [
- ".",
- "packets",
- ]
- pruneopts = ""
- revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:99a0607f79d36202b64b674c0464781549917cfc4bfb88037aaa98b31e124a18"
- name = "github.com/ericchiang/k8s"
- packages = [
- ".",
- "apis/apiextensions/v1beta1",
- "apis/apps/v1beta1",
- "apis/apps/v1beta2",
- "apis/core/v1",
- "apis/meta/v1",
- "apis/policy/v1beta1",
- "apis/resource",
- "runtime",
- "runtime/schema",
- "util/intstr",
- "watch/versioned",
- ]
- pruneopts = ""
- revision = "d1bbc0cffaf9849ddcae7b9efffae33e2dd52e9a"
- version = "v1.2.0"
-
-[[projects]]
- branch = "master"
- digest = "1:ec95c1c49fbec27ab5383b9c47fae5c2fe1d97ac5b41d36d78e17588a44e9f3f"
- name = "github.com/ghodss/yaml"
- packages = ["."]
- pruneopts = ""
- revision = "25d852aebe32c875e9c044af3eef9c7dc6bc777f"
-
-[[projects]]
- digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356"
- name = "github.com/go-ini/ini"
- packages = ["."]
- pruneopts = ""
- revision = "358ee7663966325963d4e8b2e1fbd570c5195153"
- version = "v1.38.1"
-
-[[projects]]
- digest = "1:df89444601379b2e1ee82bf8e6b72af9901cbeed4b469fa380a519c89c339310"
- name = "github.com/go-logfmt/logfmt"
- packages = ["."]
- pruneopts = ""
- revision = "07c9b44f60d7ffdfb7d8efe1ad539965737836dc"
- version = "v0.4.0"
-
-[[projects]]
- digest = "1:96c4a6ff4206086347bfe28e96e092642882128f45ecb8dc8f15f3e6f6703af0"
- name = "github.com/go-ole/go-ole"
- packages = [
- ".",
- "oleutil",
- ]
- pruneopts = ""
- revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
- version = "v1.2.1"
-
-[[projects]]
- digest = "1:3dfd659219b6f63dc0677a62b8d4e8f10b5cf53900aef40858db10a19407e41d"
- name = "github.com/go-redis/redis"
- packages = [
- ".",
- "internal",
- "internal/consistenthash",
- "internal/hashtag",
- "internal/pool",
- "internal/proto",
- "internal/singleflight",
- "internal/util",
- ]
- pruneopts = ""
- revision = "83fb42932f6145ce52df09860384a4653d2d332a"
- version = "v6.12.0"
-
-[[projects]]
- digest = "1:c07de423ca37dc2765396d6971599ab652a339538084b9b58c9f7fc533b28525"
- name = "github.com/go-sql-driver/mysql"
- packages = ["."]
- pruneopts = ""
- revision = "d523deb1b23d913de5bdada721a6071e71283618"
- version = "v1.4.0"
-
-[[projects]]
- digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c"
- name = "github.com/gobwas/glob"
- packages = [
- ".",
- "compiler",
- "match",
- "syntax",
- "syntax/ast",
- "syntax/lexer",
- "util/runes",
- "util/strings",
- ]
- pruneopts = ""
- revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
- version = "v0.2.3"
-
-[[projects]]
- digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918"
- name = "github.com/gogo/protobuf"
- packages = ["proto"]
- pruneopts = ""
- revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b"
- name = "github.com/golang/protobuf"
- packages = [
- "proto",
- "protoc-gen-go/descriptor",
- "ptypes",
- "ptypes/any",
- "ptypes/duration",
- "ptypes/empty",
- "ptypes/struct",
- "ptypes/timestamp",
- "ptypes/wrappers",
- ]
- pruneopts = ""
- revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf"
- name = "github.com/golang/snappy"
- packages = ["."]
- pruneopts = ""
- revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
-
-[[projects]]
- digest = "1:f9f45f75f332e03fc7e9fe9188ea4e1ce4d14779ef34fa1b023da67518e36327"
- name = "github.com/google/go-cmp"
- packages = [
- "cmp",
- "cmp/internal/diff",
- "cmp/internal/function",
- "cmp/internal/value",
- ]
- pruneopts = ""
- revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
- version = "v0.2.0"
-
-[[projects]]
- digest = "1:e38ad2825940d58bd8425be40bcd4211099d0c1988c158c35828197413b3cf85"
- name = "github.com/google/go-github"
- packages = ["github"]
- pruneopts = ""
- revision = "7462feb2032c2da9e3b85e9b04e6853a6e9e14ca"
- version = "v24.0.1"
-
-[[projects]]
- digest = "1:cea4aa2038169ee558bf507d5ea02c94ca85bcca28a4c7bb99fd59b31e43a686"
- name = "github.com/google/go-querystring"
- packages = ["query"]
- pruneopts = ""
- revision = "44c6ddd0a2342c386950e880b658017258da92fc"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986"
- name = "github.com/google/uuid"
- packages = ["."]
- pruneopts = ""
- revision = "064e2069ce9c359c118179501254f67d7d37ba24"
- version = "0.2"
-
-[[projects]]
- digest = "1:e097a364f4e8d8d91b9b9eeafb992d3796a41fde3eb548c1a87eb9d9f60725cf"
- name = "github.com/googleapis/gax-go"
- packages = ["."]
- pruneopts = ""
- revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
- version = "v2.0.0"
-
-[[projects]]
- digest = "1:dbbeb8ddb0be949954c8157ee8439c2adfd8dc1c9510eb44a6e58cb68c3dce28"
- name = "github.com/gorilla/context"
- packages = ["."]
- pruneopts = ""
- revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:c2c8666b4836c81a1d247bdf21c6a6fc1ab586538ab56f74437c2e0df5c375e1"
- name = "github.com/gorilla/mux"
- packages = ["."]
- pruneopts = ""
- revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
- version = "v1.6.2"
-
-[[projects]]
- branch = "master"
- digest = "1:60b7bc5e043a11213472ae05252527287d20e0a6ccc18f6ae67fad88e41004de"
- name = "github.com/hailocab/go-hostpool"
- packages = ["."]
- pruneopts = ""
- revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478"
-
-[[projects]]
- branch = "master"
- digest = "1:c191ec4c50122cdfeedba867d25bbe2ed63ed6dd2130729220c6c0d654361ea4"
- name = "github.com/harlow/kinesis-consumer"
- packages = [
- ".",
- "checkpoint/ddb",
- ]
- pruneopts = ""
- revision = "2f58b136fee036f5de256b81a8461cc724fdf9df"
-
-[[projects]]
- digest = "1:e7224669901bab4094e6d6697c136557b7177db6ceb01b7fc8b20d08f4b5aacd"
- name = "github.com/hashicorp/consul"
- packages = ["api"]
- pruneopts = ""
- revision = "39f93f011e591c842acc8053a7f5972aa6e592fd"
- version = "v1.2.1"
-
-[[projects]]
- branch = "master"
- digest = "1:f5d25fd7bdda08e39e01193ef94a1ebf7547b1b931bcdec785d08050598f306c"
- name = "github.com/hashicorp/go-cleanhttp"
- packages = ["."]
- pruneopts = ""
- revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
-
-[[projects]]
- branch = "master"
- digest = "1:ff65bf6fc4d1116f94ac305342725c21b55c16819c2606adc8f527755716937f"
- name = "github.com/hashicorp/go-rootcerts"
- packages = ["."]
- pruneopts = ""
- revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
-
-[[projects]]
- digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad"
- name = "github.com/hashicorp/serf"
- packages = ["coordinate"]
- pruneopts = ""
- revision = "d6574a5bb1226678d7010325fb6c985db20ee458"
- version = "v0.8.1"
-
-[[projects]]
- digest = "1:824c4cd143ee15735f1c75d9072aad46e51dd27a4ef8bf6ce723a138265b7fb3"
- name = "github.com/influxdata/go-syslog"
- packages = [
- ".",
- "nontransparent",
- "octetcounting",
- "rfc5424",
- ]
- pruneopts = ""
- revision = "0cd00a9f0a5e5607d5ef9a294c260f77a74e3b5a"
- version = "v2.0.0"
-
-[[projects]]
- branch = "master"
- digest = "1:bc3eb5ddfd59781ea1183f2b3d1eb105a1495d421f09b2ccd360c7fced0b612d"
- name = "github.com/influxdata/tail"
- packages = [
- ".",
- "ratelimiter",
- "util",
- "watch",
- "winfile",
- ]
- pruneopts = ""
- revision = "c43482518d410361b6c383d7aebce33d0471d7bc"
-
-[[projects]]
- branch = "master"
- digest = "1:7fb6cc9607eaa6ef309edebc42b57f704244bd4b9ab23bff128829c4ad09b95d"
- name = "github.com/influxdata/toml"
- packages = [
- ".",
- "ast",
- ]
- pruneopts = ""
- revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b"
-
-[[projects]]
- branch = "master"
- digest = "1:a0c157916be0b4de1d4565b1f094b8d746109f94968140dff40a42780fa6ccef"
- name = "github.com/influxdata/wlog"
- packages = ["."]
- pruneopts = ""
- revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec"
-
-[[projects]]
- digest = "1:5544f7badae00bc5b9ec6829857bc08f88fce4d3ef73fb616ee57d49abbf7f48"
- name = "github.com/jackc/pgx"
- packages = [
- ".",
- "chunkreader",
- "internal/sanitize",
- "pgio",
- "pgproto3",
- "pgtype",
- "stdlib",
- ]
- pruneopts = ""
- revision = "89f1e6ac7276b61d885db5e5aed6fcbedd1c7e31"
- version = "v3.2.0"
-
-[[projects]]
- digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e"
- name = "github.com/jmespath/go-jmespath"
- packages = ["."]
- pruneopts = ""
- revision = "0b12d6b5"
-
-[[projects]]
- branch = "master"
- digest = "1:2c5ad58492804c40bdaf5d92039b0cde8b5becd2b7feeb37d7d1cc36a8aa8dbe"
- name = "github.com/kardianos/osext"
- packages = ["."]
- pruneopts = ""
- revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
-
-[[projects]]
- branch = "master"
- digest = "1:fed90fa725d3b1bac0a760de64426834dfef4546474cf182f2ec94285afa74a8"
- name = "github.com/kardianos/service"
- packages = ["."]
- pruneopts = ""
- revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197"
-
-[[projects]]
- digest = "1:a12b6f20a7e5eb7412d2e5cd15e1262a021f735fa958d664d9e7ba2160eefd0a"
- name = "github.com/karrick/godirwalk"
- packages = ["."]
- pruneopts = ""
- revision = "2de2192f9e35ce981c152a873ed943b93b79ced4"
- version = "v1.7.5"
-
-[[projects]]
- branch = "master"
- digest = "1:63e7368fcf6b54804076eaec26fd9cf0c4466166b272393db4b93102e1e962df"
- name = "github.com/kballard/go-shellquote"
- packages = ["."]
- pruneopts = ""
- revision = "95032a82bc518f77982ea72343cc1ade730072f0"
-
-[[projects]]
- branch = "master"
- digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a"
- name = "github.com/kr/logfmt"
- packages = ["."]
- pruneopts = ""
- revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
-
-[[projects]]
- branch = "master"
- digest = "1:e7737c09200582508f4f67227c39e7c4667cc6067a6d2b2e679654e43e8a8cb4"
- name = "github.com/kubernetes/apimachinery"
- packages = ["pkg/api/resource"]
- pruneopts = ""
- revision = "d41becfba9ee9bf8e55cec1dd3934cd7cfc04b99"
-
-[[projects]]
- branch = "develop"
- digest = "1:3e66a61a57bbbe832c338edb3a623be0deb3dec650c2f3515149658898287e37"
- name = "github.com/leodido/ragel-machinery"
- packages = [
- ".",
- "parser",
- ]
- pruneopts = ""
- revision = "299bdde78165d4ca4bc7d064d8d6a4f39ac6de8c"
-
-[[projects]]
- branch = "master"
- digest = "1:7e9956922e349af0190afa0b6621befcd201072679d8e51a9047ff149f2afe93"
- name = "github.com/mailru/easyjson"
- packages = [
- ".",
- "buffer",
- "jlexer",
- "jwriter",
- ]
- pruneopts = ""
- revision = "efc7eb8984d6655c26b5c9d2e65c024e5767c37c"
-
-[[projects]]
- digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28"
- name = "github.com/matttproud/golang_protobuf_extensions"
- packages = ["pbutil"]
- pruneopts = ""
- revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890"
- name = "github.com/miekg/dns"
- packages = ["."]
- pruneopts = ""
- revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1"
- version = "v1.0.8"
-
-[[projects]]
- branch = "master"
- digest = "1:99651e95333755cbe5c9768c1b80031300acca64a80870b40309202b32585a5a"
- name = "github.com/mitchellh/go-homedir"
- packages = ["."]
- pruneopts = ""
- revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
-
-[[projects]]
- branch = "master"
- digest = "1:f43ed2c836208c14f45158fd01577c985688a4d11cf9fd475a939819fef3b321"
- name = "github.com/mitchellh/mapstructure"
- packages = ["."]
- pruneopts = ""
- revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac"
-
-[[projects]]
- digest = "1:ee2e62b00a9ccc2dba1525f93396e35c847f90f87939df6f361b86315ea5f69a"
- name = "github.com/multiplay/go-ts3"
- packages = ["."]
- pruneopts = ""
- revision = "d0d44555495c8776880a17e439399e715a4ef319"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:ccd0def9f0b82b61c5e54fcbfccf528eabb13b489d008e46dc16b808c2e1f765"
- name = "github.com/naoina/go-stringutil"
- packages = ["."]
- pruneopts = ""
- revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b"
- version = "v0.1.0"
-
-[[projects]]
- digest = "1:e5ec850ce66beb0014fc40d8e64b7482172eee71d86d734d66def5e9eac16797"
- name = "github.com/nats-io/gnatsd"
- packages = [
- "conf",
- "logger",
- "server",
- "server/pse",
- "util",
- ]
- pruneopts = ""
- revision = "6608e9ac3be979dcb0614b772cc86a87b71acaa3"
- version = "v1.2.0"
-
-[[projects]]
- digest = "1:665af347df4c5d1ae4c3eacd0754f5337a301f6a3f2444c9993b996605c8c02b"
- name = "github.com/nats-io/go-nats"
- packages = [
- ".",
- "encoders/builtin",
- "util",
- ]
- pruneopts = ""
- revision = "062418ea1c2181f52dc0f954f6204370519a868b"
- version = "v1.5.0"
-
-[[projects]]
- digest = "1:be61e8224b84064109eaba8157cbb4bbe6ca12443e182b6624fdfa1c0dcf53d9"
- name = "github.com/nats-io/nuid"
- packages = ["."]
- pruneopts = ""
- revision = "289cccf02c178dc782430d534e3c1f5b72af807f"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:7a69f6a3a33929f8b66aa39c93868ad1698f06417fe627ae067559beb94504bd"
- name = "github.com/nsqio/go-nsq"
- packages = ["."]
- pruneopts = ""
- revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f"
- version = "v1.0.7"
-
-[[projects]]
- digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11"
- name = "github.com/opencontainers/go-digest"
- packages = ["."]
- pruneopts = ""
- revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf"
- version = "v1.0.0-rc1"
-
-[[projects]]
- digest = "1:f26c8670b11e29a49c8e45f7ec7f2d5bac62e8fd4e3c0ae1662baa4a697f984a"
- name = "github.com/opencontainers/image-spec"
- packages = [
- "specs-go",
- "specs-go/v1",
- ]
- pruneopts = ""
- revision = "d60099175f88c47cd379c4738d158884749ed235"
- version = "v1.0.1"
-
-[[projects]]
- branch = "master"
- digest = "1:2da0e5077ed40453dc281b9a2428d84cf6ad14063aed189f6296ca5dd25cf13d"
- name = "github.com/opentracing-contrib/go-observer"
- packages = ["."]
- pruneopts = ""
- revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
-
-[[projects]]
- digest = "1:78fb99d6011c2ae6c72f3293a83951311147b12b06a5ffa43abf750c4fab6ac5"
- name = "github.com/opentracing/opentracing-go"
- packages = [
- ".",
- "ext",
- "log",
- ]
- pruneopts = ""
- revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
- version = "v1.0.2"
-
-[[projects]]
- digest = "1:fea0e67285d900e5a0a7ec19ff4b4c82865a28dddbee8454c5360ad908f7069c"
- name = "github.com/openzipkin/zipkin-go-opentracing"
- packages = [
- ".",
- "flag",
- "thrift/gen-go/scribe",
- "thrift/gen-go/zipkincore",
- "types",
- "wire",
- ]
- pruneopts = ""
- revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
- version = "v0.3.4"
-
-[[projects]]
- digest = "1:29e34e58f26655c4d73135cdfc0517ea2ff1483eff34e5d5ef4b6fddbb81e31b"
- name = "github.com/pierrec/lz4"
- packages = [
- ".",
- "internal/xxh32",
- ]
- pruneopts = ""
- revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00"
- version = "v2.0.3"
-
-[[projects]]
- digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca"
- name = "github.com/pkg/errors"
- packages = ["."]
- pruneopts = ""
- revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
- version = "v0.8.0"
-
-[[projects]]
- digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
- name = "github.com/pmezard/go-difflib"
- packages = ["difflib"]
- pruneopts = ""
- revision = "792786c7400a136282c1664665ae0a8db921c6c2"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:6f218995d6a74636cfcab45ce03005371e682b4b9bee0e5eb0ccfd83ef85364f"
- name = "github.com/prometheus/client_golang"
- packages = [
- "prometheus",
- "prometheus/internal",
- "prometheus/promhttp",
- ]
- pruneopts = ""
- revision = "505eaef017263e299324067d40ca2c48f6a2cf50"
- version = "v0.9.2"
-
-[[projects]]
- branch = "master"
- digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561"
- name = "github.com/prometheus/client_model"
- packages = ["go"]
- pruneopts = ""
- revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
-
-[[projects]]
- branch = "master"
- digest = "1:bfbc121ef802d245ef67421cff206615357d9202337a3d492b8f668906b485a8"
- name = "github.com/prometheus/common"
- packages = [
- "expfmt",
- "internal/bitbucket.org/ww/goautoneg",
- "model",
- ]
- pruneopts = ""
- revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
-
-[[projects]]
- branch = "master"
- digest = "1:b694a6bdecdace488f507cff872b30f6f490fdaf988abd74d87ea56406b23b6e"
- name = "github.com/prometheus/procfs"
- packages = [
- ".",
- "internal/util",
- "nfs",
- "xfs",
- ]
- pruneopts = ""
- revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
-
-[[projects]]
- branch = "master"
- digest = "1:15bcdc717654ef21128e8af3a63eec39a6d08a830e297f93d65163f87c8eb523"
- name = "github.com/rcrowley/go-metrics"
- packages = ["."]
- pruneopts = ""
- revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
-
-[[projects]]
- branch = "master"
- digest = "1:7fc2f428767a2521abc63f1a663d981f61610524275d6c0ea645defadd4e916f"
- name = "github.com/samuel/go-zookeeper"
- packages = ["zk"]
- pruneopts = ""
- revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47"
-
-[[projects]]
- digest = "1:7f569d906bdd20d906b606415b7d794f798f91a62fcfb6a4daa6d50690fb7a3f"
- name = "github.com/satori/go.uuid"
- packages = ["."]
- pruneopts = ""
- revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
- version = "v1.2.0"
-
-[[projects]]
- digest = "1:d77a85cf43b70ae61fa2543d402d782b40dca0f5f41413839b5f916782b0fab9"
- name = "github.com/shirou/gopsutil"
- packages = [
- "cpu",
- "disk",
- "host",
- "internal/common",
- "load",
- "mem",
- "net",
- "process",
- ]
- pruneopts = ""
- revision = "6c6abd6d1666d6b27f1c261e0f850441ba22aa3a"
- version = "v2.19.02"
-
-[[projects]]
- branch = "master"
- digest = "1:99c6a6dab47067c9b898e8c8b13d130c6ab4ffbcc4b7cc6236c2cd0b1e344f5b"
- name = "github.com/shirou/w32"
- packages = ["."]
- pruneopts = ""
- revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b"
-
-[[projects]]
- digest = "1:8cf46b6c18a91068d446e26b67512cf16f1540b45d90b28b9533706a127f0ca6"
- name = "github.com/sirupsen/logrus"
- packages = ["."]
- pruneopts = ""
- revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
- version = "v1.0.5"
-
-[[projects]]
- branch = "master"
- digest = "1:4b0cabe65ca903a7b2a3e6272c5304eb788ce196d35ecb901c6563e5e7582443"
- name = "github.com/soniah/gosnmp"
- packages = ["."]
- pruneopts = ""
- revision = "96b86229e9b3ffb4b954144cdc7f98fe3ee1003f"
-
-[[projects]]
- branch = "master"
- digest = "1:4e8f1cae8e6d83af9000d82566efb8823907dae77ba4f1d76ff28fdd197c3c90"
- name = "github.com/streadway/amqp"
- packages = ["."]
- pruneopts = ""
- revision = "e5adc2ada8b8efff032bf61173a233d143e9318e"
-
-[[projects]]
- digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6"
- name = "github.com/stretchr/objx"
- packages = ["."]
- pruneopts = ""
- revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
- version = "v0.1.1"
-
-[[projects]]
- digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75"
- name = "github.com/stretchr/testify"
- packages = [
- "assert",
- "mock",
- "require",
- ]
- pruneopts = ""
- revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
- version = "v1.2.2"
-
-[[projects]]
- digest = "1:e139a0dfe24e723193005b291ed82a975041718cfcab9136aa6c9540df70a4ff"
- name = "github.com/tidwall/gjson"
- packages = ["."]
- pruneopts = ""
- revision = "f123b340873a0084cb27267eddd8ff615115fbff"
- version = "v1.1.2"
-
-[[projects]]
- branch = "master"
- digest = "1:4db4f92bb9cb04cfc4fccb36aba2598b02a988008c4cc0692b241214ad8ac96e"
- name = "github.com/tidwall/match"
- packages = ["."]
- pruneopts = ""
- revision = "1731857f09b1f38450e2c12409748407822dc6be"
-
-[[projects]]
- digest = "1:026b6ceaabbacaa147e94a63579efc3d3c73e00c73b67fa5c43ab46191ed04eb"
- name = "github.com/vishvananda/netlink"
- packages = ["nl"]
- pruneopts = ""
- revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e"
-
-[[projects]]
- branch = "master"
- digest = "1:c09fddfdd491edaa4383396503e57023a26e5a824283a78c2310613a1252c649"
- name = "github.com/vishvananda/netns"
- packages = ["."]
- pruneopts = ""
- revision = "13995c7128ccc8e51e9a6bd2b551020a27180abd"
-
-[[projects]]
- digest = "1:343f20460c11a0d0529fe532553bfef9446918d1a1fda6d8661eb27d5b1a68b8"
- name = "github.com/vjeantet/grok"
- packages = ["."]
- pruneopts = ""
- revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:6af52ce6dae9a912aa3113f247a63cd82599760ddc328a6721c3ef0426d31ca2"
- name = "github.com/vmware/govmomi"
- packages = [
- ".",
- "find",
- "list",
- "nfc",
- "object",
- "performance",
- "property",
- "session",
- "simulator",
- "simulator/esx",
- "simulator/vpx",
- "task",
- "view",
- "vim25",
- "vim25/debug",
- "vim25/methods",
- "vim25/mo",
- "vim25/progress",
- "vim25/soap",
- "vim25/types",
- "vim25/xml",
- ]
- pruneopts = ""
- revision = "3617f28d167d448f93f282a867870f109516d2a5"
- version = "v0.19.0"
-
-[[projects]]
- digest = "1:4cb7eb45ed9a5129bc77c726328c130abcbaae566c1fe4d82693fae86c8c621d"
- name = "github.com/wavefronthq/wavefront-sdk-go"
- packages = [
- "histogram",
- "internal",
- "senders",
- ]
- pruneopts = ""
- revision = "fa87530cd02a8ad08bd179e1c39fb319a0cc0dae"
- version = "v0.9.2"
-
-[[projects]]
- branch = "master"
- digest = "1:98ed05e9796df287b90c1d96854e3913c8e349dbc546412d3cabb472ecf4b417"
- name = "github.com/wvanbergen/kafka"
- packages = ["consumergroup"]
- pruneopts = ""
- revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f"
-
-[[projects]]
- branch = "master"
- digest = "1:12aff3cc417907bf9f683a6bf1dc78ffb08e41bc69f829491e593ea9b951a3cf"
- name = "github.com/wvanbergen/kazoo-go"
- packages = ["."]
- pruneopts = ""
- revision = "f72d8611297a7cf105da904c04198ad701a60101"
-
-[[projects]]
- branch = "master"
- digest = "1:c5918689b7e187382cc1066bf0260de54ba9d1b323105f46ed2551d2fb4a17c7"
- name = "github.com/yuin/gopher-lua"
- packages = [
- ".",
- "ast",
- "parse",
- "pm",
- ]
- pruneopts = ""
- revision = "46796da1b0b4794e1e341883a399f12cc7574b55"
-
-[[projects]]
- digest = "1:8c8ec859c77fccd10a347b7219b597c4c21c448949e8bdf3fc3e6f4c78f952b4"
- name = "go.opencensus.io"
- packages = [
- ".",
- "internal",
- "internal/tagencoding",
- "plugin/ocgrpc",
- "plugin/ochttp",
- "plugin/ochttp/propagation/b3",
- "stats",
- "stats/internal",
- "stats/view",
- "tag",
- "trace",
- "trace/internal",
- "trace/propagation",
- "trace/tracestate",
- ]
- pruneopts = ""
- revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6"
- version = "v0.17.0"
-
-[[projects]]
- branch = "master"
- digest = "1:0773b5c3be42874166670a20aa177872edb450cd9fc70b1df97303d977702a50"
- name = "golang.org/x/crypto"
- packages = [
- "bcrypt",
- "blowfish",
- "ed25519",
- "ed25519/internal/edwards25519",
- "md4",
- "pbkdf2",
- "pkcs12",
- "pkcs12/internal/rc2",
- "ssh/terminal",
- ]
- pruneopts = ""
- revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9"
- source = "https://github.com/golang/crypto.git"
-
-[[projects]]
- branch = "master"
- digest = "1:00ff990baae4665bb0a8174af5ff78228574227ed96c89671247a56852a50e21"
- name = "golang.org/x/net"
- packages = [
- "bpf",
- "context",
- "context/ctxhttp",
- "html",
- "html/atom",
- "html/charset",
- "http/httpguts",
- "http2",
- "http2/hpack",
- "idna",
- "internal/iana",
- "internal/socket",
- "internal/socks",
- "internal/timeseries",
- "ipv4",
- "ipv6",
- "proxy",
- "trace",
- "websocket",
- ]
- pruneopts = ""
- revision = "a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1"
- source = "https://github.com/golang/net.git"
-
-[[projects]]
- branch = "master"
- digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734"
- name = "golang.org/x/oauth2"
- packages = [
- ".",
- "clientcredentials",
- "google",
- "internal",
- "jws",
- "jwt",
- ]
- pruneopts = ""
- revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
- source = "https://github.com/golang/oauth2.git"
-
-[[projects]]
- branch = "master"
- digest = "1:88ecca26e54f601a8733c9a31d9f0883b915216a177673f0467f6b864fd0d90f"
- name = "golang.org/x/sync"
- packages = [
- "errgroup",
- "semaphore",
- ]
- pruneopts = ""
- revision = "42b317875d0fa942474b76e1b46a6060d720ae6e"
- source = "https://github.com/golang/sync.git"
-
-[[projects]]
- branch = "master"
- digest = "1:6a6eed3727d0e15703d9e930d8dbe333bea09eda309d75a015d3c6dc4e5c92a6"
- name = "golang.org/x/sys"
- packages = [
- "unix",
- "windows",
- "windows/registry",
- "windows/svc",
- "windows/svc/debug",
- "windows/svc/eventlog",
- "windows/svc/mgr",
- ]
- pruneopts = ""
- revision = "7c4c994c65f702f41ed7d6620a2cb34107576a77"
- source = "https://github.com/golang/sys.git"
-
-[[projects]]
- digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
- name = "golang.org/x/text"
- packages = [
- "collate",
- "collate/build",
- "encoding",
- "encoding/charmap",
- "encoding/htmlindex",
- "encoding/internal",
- "encoding/internal/identifier",
- "encoding/japanese",
- "encoding/korean",
- "encoding/simplifiedchinese",
- "encoding/traditionalchinese",
- "encoding/unicode",
- "internal/colltab",
- "internal/gen",
- "internal/tag",
- "internal/triegen",
- "internal/ucd",
- "internal/utf8internal",
- "language",
- "runes",
- "secure/bidirule",
- "transform",
- "unicode/bidi",
- "unicode/cldr",
- "unicode/norm",
- "unicode/rangetable",
- ]
- pruneopts = ""
- revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
- source = "https://github.com/golang/text.git"
- version = "v0.3.0"
-
-[[projects]]
- branch = "master"
- digest = "1:2d878ecef4b17dbdd067b8fb98eb64f768f0802b1176b91b9e3c01b457efd01f"
- name = "google.golang.org/api"
- packages = [
- "googleapi/transport",
- "internal",
- "iterator",
- "option",
- "support/bundler",
- "transport",
- "transport/grpc",
- "transport/http",
- ]
- pruneopts = ""
- revision = "19ff8768a5c0b8e46ea281065664787eefc24121"
-
-[[projects]]
- digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472"
- name = "google.golang.org/appengine"
- packages = [
- ".",
- "cloudsql",
- "internal",
- "internal/app_identity",
- "internal/base",
- "internal/datastore",
- "internal/log",
- "internal/modules",
- "internal/remote_api",
- "internal/socket",
- "internal/urlfetch",
- "socket",
- "urlfetch",
- ]
- pruneopts = ""
- revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- digest = "1:b1443b4e3cc990c84d27fcdece9d3302158c67dba870e33a6937a2c0076388c2"
- name = "google.golang.org/genproto"
- packages = [
- "googleapis/api/annotations",
- "googleapis/api/distribution",
- "googleapis/api/label",
- "googleapis/api/metric",
- "googleapis/api/monitoredres",
- "googleapis/iam/v1",
- "googleapis/monitoring/v3",
- "googleapis/pubsub/v1",
- "googleapis/rpc/status",
- "protobuf/field_mask",
- ]
- pruneopts = ""
- revision = "fedd2861243fd1a8152376292b921b394c7bef7e"
-
-[[projects]]
- digest = "1:5f31b45ee9da7a87f140bef3ed0a7ca34ea2a6d38eb888123b8e28170e8aa4f2"
- name = "google.golang.org/grpc"
- packages = [
- ".",
- "balancer",
- "balancer/base",
- "balancer/roundrobin",
- "codes",
- "connectivity",
- "credentials",
- "credentials/oauth",
- "encoding",
- "encoding/proto",
- "grpclog",
- "internal",
- "internal/backoff",
- "internal/channelz",
- "internal/grpcrand",
- "keepalive",
- "metadata",
- "naming",
- "peer",
- "resolver",
- "resolver/dns",
- "resolver/passthrough",
- "stats",
- "status",
- "tap",
- "transport",
- ]
- pruneopts = ""
- revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
- version = "v1.13.0"
-
-[[projects]]
- digest = "1:3cad99e0d1f94b8c162787c12e59d0a0b9df1ef75590eb145cdd625479091efe"
- name = "gopkg.in/asn1-ber.v1"
- packages = ["."]
- pruneopts = ""
- revision = "379148ca0225df7a432012b8df0355c2a2063ac0"
- version = "v1.2"
-
-[[projects]]
- digest = "1:581450ae66d7970d91ef9132459fa583e937c6e502f1b96e4ee7783a56fa0b44"
- name = "gopkg.in/fatih/pool.v2"
- packages = ["."]
- pruneopts = ""
- revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f"
- version = "v2.0.0"
-
-[[projects]]
- digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
- name = "gopkg.in/fsnotify.v1"
- packages = ["."]
- pruneopts = ""
- revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
- source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
- version = "v1.4.7"
-
-[[projects]]
- digest = "1:960720207d3d0992995f4576e1366fd9e9b1483473b07fb7243144f75f5b1546"
- name = "gopkg.in/gorethink/gorethink.v3"
- packages = [
- ".",
- "encoding",
- "ql2",
- "types",
- ]
- pruneopts = ""
- revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b"
- version = "v3.0.5"
-
-[[projects]]
- digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6"
- name = "gopkg.in/inf.v0"
- packages = ["."]
- pruneopts = ""
- revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
- version = "v0.9.1"
-
-[[projects]]
- digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133"
- name = "gopkg.in/ldap.v2"
- packages = ["."]
- pruneopts = ""
- revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9"
- version = "v2.5.1"
-
-[[projects]]
- branch = "v2"
- digest = "1:f54ba71a035aac92ced3e902d2bff3734a15d1891daff73ec0f90ef236750139"
- name = "gopkg.in/mgo.v2"
- packages = [
- ".",
- "bson",
- "internal/json",
- "internal/sasl",
- "internal/scram",
- ]
- pruneopts = ""
- revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5"
-
-[[projects]]
- digest = "1:b49c4d3115800eace659c9a6a5c384a922f5b210178b24a01abb10731f404ea2"
- name = "gopkg.in/olivere/elastic.v5"
- packages = [
- ".",
- "config",
- "uritemplates",
- ]
- pruneopts = ""
- revision = "52741dc2ce53629cbe1e673869040d886cba2cd5"
- version = "v5.0.70"
-
-[[projects]]
- branch = "v1"
- digest = "1:a96d16bd088460f2e0685d46c39bcf1208ba46e0a977be2df49864ec7da447dd"
- name = "gopkg.in/tomb.v1"
- packages = ["."]
- pruneopts = ""
- revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
-
-[[projects]]
- digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2"
- name = "gopkg.in/yaml.v2"
- packages = ["."]
- pruneopts = ""
- revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
- version = "v2.2.1"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- input-imports = [
- "cloud.google.com/go/monitoring/apiv3",
- "cloud.google.com/go/pubsub",
- "collectd.org/api",
- "collectd.org/network",
- "github.com/Azure/go-autorest/autorest",
- "github.com/Azure/go-autorest/autorest/azure/auth",
- "github.com/Microsoft/ApplicationInsights-Go/appinsights",
- "github.com/Shopify/sarama",
- "github.com/StackExchange/wmi",
- "github.com/aerospike/aerospike-client-go",
- "github.com/alecthomas/units",
- "github.com/amir/raidman",
- "github.com/apache/thrift/lib/go/thrift",
- "github.com/aws/aws-sdk-go/aws",
- "github.com/aws/aws-sdk-go/aws/client",
- "github.com/aws/aws-sdk-go/aws/credentials",
- "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
- "github.com/aws/aws-sdk-go/aws/session",
- "github.com/aws/aws-sdk-go/service/cloudwatch",
- "github.com/aws/aws-sdk-go/service/dynamodb",
- "github.com/aws/aws-sdk-go/service/kinesis",
- "github.com/bsm/sarama-cluster",
- "github.com/couchbase/go-couchbase",
- "github.com/denisenkom/go-mssqldb",
- "github.com/dgrijalva/jwt-go",
- "github.com/docker/docker/api/types",
- "github.com/docker/docker/api/types/container",
- "github.com/docker/docker/api/types/filters",
- "github.com/docker/docker/api/types/registry",
- "github.com/docker/docker/api/types/swarm",
- "github.com/docker/docker/client",
- "github.com/docker/libnetwork/ipvs",
- "github.com/eclipse/paho.mqtt.golang",
- "github.com/ericchiang/k8s",
- "github.com/ericchiang/k8s/apis/apps/v1beta1",
- "github.com/ericchiang/k8s/apis/apps/v1beta2",
- "github.com/ericchiang/k8s/apis/core/v1",
- "github.com/ericchiang/k8s/apis/meta/v1",
- "github.com/ericchiang/k8s/apis/resource",
- "github.com/ericchiang/k8s/util/intstr",
- "github.com/ghodss/yaml",
- "github.com/go-logfmt/logfmt",
- "github.com/go-redis/redis",
- "github.com/go-sql-driver/mysql",
- "github.com/gobwas/glob",
- "github.com/golang/protobuf/proto",
- "github.com/golang/protobuf/ptypes/duration",
- "github.com/golang/protobuf/ptypes/empty",
- "github.com/golang/protobuf/ptypes/timestamp",
- "github.com/google/go-cmp/cmp",
- "github.com/google/go-github/github",
- "github.com/gorilla/mux",
- "github.com/harlow/kinesis-consumer",
- "github.com/harlow/kinesis-consumer/checkpoint/ddb",
- "github.com/hashicorp/consul/api",
- "github.com/influxdata/go-syslog",
- "github.com/influxdata/go-syslog/nontransparent",
- "github.com/influxdata/go-syslog/octetcounting",
- "github.com/influxdata/go-syslog/rfc5424",
- "github.com/influxdata/tail",
- "github.com/influxdata/toml",
- "github.com/influxdata/toml/ast",
- "github.com/influxdata/wlog",
- "github.com/jackc/pgx",
- "github.com/jackc/pgx/pgtype",
- "github.com/jackc/pgx/stdlib",
- "github.com/kardianos/service",
- "github.com/karrick/godirwalk",
- "github.com/kballard/go-shellquote",
- "github.com/kubernetes/apimachinery/pkg/api/resource",
- "github.com/matttproud/golang_protobuf_extensions/pbutil",
- "github.com/miekg/dns",
- "github.com/multiplay/go-ts3",
- "github.com/nats-io/gnatsd/server",
- "github.com/nats-io/go-nats",
- "github.com/nsqio/go-nsq",
- "github.com/openzipkin/zipkin-go-opentracing",
- "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore",
- "github.com/pkg/errors",
- "github.com/prometheus/client_golang/prometheus",
- "github.com/prometheus/client_golang/prometheus/promhttp",
- "github.com/prometheus/client_model/go",
- "github.com/prometheus/common/expfmt",
- "github.com/satori/go.uuid",
- "github.com/shirou/gopsutil/cpu",
- "github.com/shirou/gopsutil/disk",
- "github.com/shirou/gopsutil/host",
- "github.com/shirou/gopsutil/load",
- "github.com/shirou/gopsutil/mem",
- "github.com/shirou/gopsutil/net",
- "github.com/shirou/gopsutil/process",
- "github.com/soniah/gosnmp",
- "github.com/streadway/amqp",
- "github.com/stretchr/testify/assert",
- "github.com/stretchr/testify/mock",
- "github.com/stretchr/testify/require",
- "github.com/tidwall/gjson",
- "github.com/vjeantet/grok",
- "github.com/vmware/govmomi",
- "github.com/vmware/govmomi/object",
- "github.com/vmware/govmomi/performance",
- "github.com/vmware/govmomi/property",
- "github.com/vmware/govmomi/session",
- "github.com/vmware/govmomi/simulator",
- "github.com/vmware/govmomi/view",
- "github.com/vmware/govmomi/vim25",
- "github.com/vmware/govmomi/vim25/methods",
- "github.com/vmware/govmomi/vim25/mo",
- "github.com/vmware/govmomi/vim25/soap",
- "github.com/vmware/govmomi/vim25/types",
- "github.com/wavefronthq/wavefront-sdk-go/senders",
- "github.com/wvanbergen/kafka/consumergroup",
- "golang.org/x/net/context",
- "golang.org/x/net/html/charset",
- "golang.org/x/oauth2",
- "golang.org/x/oauth2/clientcredentials",
- "golang.org/x/oauth2/google",
- "golang.org/x/sys/unix",
- "golang.org/x/sys/windows",
- "golang.org/x/sys/windows/svc",
- "golang.org/x/sys/windows/svc/mgr",
- "google.golang.org/api/iterator",
- "google.golang.org/api/option",
- "google.golang.org/api/support/bundler",
- "google.golang.org/genproto/googleapis/api/distribution",
- "google.golang.org/genproto/googleapis/api/metric",
- "google.golang.org/genproto/googleapis/api/monitoredres",
- "google.golang.org/genproto/googleapis/monitoring/v3",
- "google.golang.org/grpc",
- "google.golang.org/grpc/codes",
- "google.golang.org/grpc/credentials",
- "google.golang.org/grpc/metadata",
- "google.golang.org/grpc/status",
- "gopkg.in/gorethink/gorethink.v3",
- "gopkg.in/ldap.v2",
- "gopkg.in/mgo.v2",
- "gopkg.in/mgo.v2/bson",
- "gopkg.in/olivere/elastic.v5",
- "gopkg.in/yaml.v2",
- ]
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
deleted file mode 100644
index 057af5e3b3d24..0000000000000
--- a/Gopkg.toml
+++ /dev/null
@@ -1,290 +0,0 @@
-[[constraint]]
- name = "collectd.org"
- version = "0.3.0"
-
-[[constraint]]
- name = "github.com/aerospike/aerospike-client-go"
- version = "<=1.27.0"
-
-[[constraint]]
- name = "github.com/amir/raidman"
- branch = "master"
-
-[[constraint]]
- name = "github.com/apache/thrift"
- branch = "master"
-
-[[constraint]]
- name = "github.com/aws/aws-sdk-go"
- version = "1.15.54"
-
-[[constraint]]
- name = "github.com/bsm/sarama-cluster"
- version = "2.1.13"
-
-[[constraint]]
- name = "github.com/couchbase/go-couchbase"
- branch = "master"
-
-[[constraint]]
- name = "github.com/dgrijalva/jwt-go"
- version = "3.2.0"
-
-[[constraint]]
- name = "github.com/docker/docker"
- revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" # v18.05.0-ce
-
-[[override]]
- name = "github.com/docker/distribution"
- revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" # v18.05.0-ce
-
-[[constraint]]
- name = "github.com/eclipse/paho.mqtt.golang"
- version = "~1.1.1"
-
-[[constraint]]
- name = "github.com/go-sql-driver/mysql"
- version = "1.4.0"
-
-[[constraint]]
- name = "github.com/gobwas/glob"
- version = "0.2.3"
-
-[[constraint]]
- name = "github.com/golang/protobuf"
- version = "1.1.0"
-
-[[constraint]]
- name = "github.com/google/go-cmp"
- version = "0.2.0"
-
-[[constraint]]
- name = "github.com/gorilla/mux"
- version = "1.6.2"
-
-[[constraint]]
- name = "github.com/go-redis/redis"
- version = "6.12.0"
-
-[[constraint]]
- name = "github.com/hashicorp/consul"
- version = "1.1.0"
-
-[[constraint]]
- name = "github.com/influxdata/go-syslog"
- version = "2.0.0"
-
-[[constraint]]
- name = "github.com/influxdata/tail"
- branch = "master"
-
-[[constraint]]
- name = "github.com/influxdata/toml"
- branch = "master"
-
-[[constraint]]
- name = "github.com/influxdata/wlog"
- branch = "master"
-
-[[constraint]]
- name = "github.com/jackc/pgx"
- version = "3.2.0"
-
-[[constraint]]
- name = "github.com/kardianos/service"
- branch = "master"
-
-[[constraint]]
- name = "github.com/kballard/go-shellquote"
- branch = "master"
-
-[[constraint]]
- name = "github.com/matttproud/golang_protobuf_extensions"
- version = "1.0.1"
-
-[[constraint]]
- name = "github.com/Microsoft/ApplicationInsights-Go"
- branch = "master"
-
-[[constraint]]
- name = "github.com/miekg/dns"
- version = "1.0.8"
-
-[[constraint]]
- name = "github.com/multiplay/go-ts3"
- version = "1.0.0"
-
-[[constraint]]
- name = "github.com/nats-io/gnatsd"
- version = "1.1.0"
-
-[[constraint]]
- name = "github.com/nats-io/go-nats"
- version = "1.5.0"
-
-[[constraint]]
- name = "github.com/nsqio/go-nsq"
- version = "1.0.7"
-
-[[constraint]]
- name = "github.com/openzipkin/zipkin-go-opentracing"
- version = "0.3.4"
-
-[[constraint]]
- name = "github.com/prometheus/client_golang"
- version = "0.9.2"
-
-[[constraint]]
- name = "github.com/prometheus/client_model"
- branch = "master"
-
-[[constraint]]
- name = "github.com/prometheus/common"
- branch = "master"
-
-[[constraint]]
- name = "github.com/satori/go.uuid"
- version = "1.2.0"
-
-[[constraint]]
- name = "github.com/shirou/gopsutil"
- version = "2.18.12"
-
-[[constraint]]
- name = "github.com/Shopify/sarama"
- version = "1.18.0"
-
-[[constraint]]
- name = "github.com/soniah/gosnmp"
- branch = "master"
-
-[[constraint]]
- name = "github.com/StackExchange/wmi"
- version = "1.0.0"
-
-[[constraint]]
- name = "github.com/streadway/amqp"
- branch = "master"
-
-[[constraint]]
- name = "github.com/stretchr/testify"
- version = "1.2.2"
-
-[[constraint]]
- name = "github.com/tidwall/gjson"
- version = "1.1.1"
-
-[[constraint]]
- name = "github.com/vjeantet/grok"
- version = "1.0.0"
-
-[[constraint]]
- name = "github.com/wvanbergen/kafka"
- branch = "master"
-
-[[constraint]]
- name = "github.com/denisenkom/go-mssqldb"
- branch = "master"
-
-[[constraint]]
- name = "golang.org/x/net"
- branch = "master"
- source = "https://github.com/golang/net.git"
-
-[[constraint]]
- name = "golang.org/x/sys"
- branch = "master"
- source = "https://github.com/golang/sys.git"
-
-[[constraint]]
- name = "google.golang.org/grpc"
- version = "1.12.2"
-
-[[constraint]]
- name = "gopkg.in/gorethink/gorethink.v3"
- version = "3.0.5"
-
-[[constraint]]
- name = "gopkg.in/ldap.v2"
- version = "2.5.1"
-
-[[constraint]]
- name = "gopkg.in/mgo.v2"
- branch = "v2"
-
-[[constraint]]
- name = "gopkg.in/olivere/elastic.v5"
- version = "^5.0.69"
-
-[[constraint]]
- name = "gopkg.in/yaml.v2"
- version = "^2.2.1"
-
-[[override]]
- source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
- name = "gopkg.in/fsnotify.v1"
-
-[[constraint]]
- branch = "master"
- name = "google.golang.org/genproto"
-
-[[constraint]]
- name = "github.com/vmware/govmomi"
- version = "0.19.0"
-
-[[constraint]]
- name = "github.com/Azure/go-autorest"
- version = "10.12.0"
-
-[[constraint]]
- branch = "master"
- name = "golang.org/x/oauth2"
- source = "https://github.com/golang/oauth2.git"
-
-[[constraint]]
- branch = "master"
- name = "github.com/docker/libnetwork"
-
-[[override]]
- name = "github.com/vishvananda/netlink"
- revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e"
-
-[[constraint]]
- name = "github.com/wavefronthq/wavefront-sdk-go"
- version = "^0.9.1"
-
-[[constraint]]
- name = "github.com/karrick/godirwalk"
- version = "1.7.5"
-
-[[override]]
- name = "github.com/harlow/kinesis-consumer"
- branch = "master"
-
-[[constraint]]
- branch = "master"
- name = "github.com/kubernetes/apimachinery"
-
-[[constraint]]
- name = "github.com/go-logfmt/logfmt"
- version = "0.4.0"
-
-[[constraint]]
- branch = "master"
- name = "github.com/ghodss/yaml"
-
-[[override]]
- name = "golang.org/x/crypto"
- source = "https://github.com/golang/crypto.git"
-
-[[override]]
- name = "golang.org/x/sync"
- source = "https://github.com/golang/sync.git"
-
-[[override]]
- name = "golang.org/x/text"
- source = "https://github.com/golang/text.git"
-
-[[constraint]]
- name = "github.com/google/go-github"
- version = "24.0.1"
diff --git a/LICENSE b/LICENSE
index 057cf997d6435..c3d76b9ca56d3 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
The MIT License (MIT)
-Copyright (c) 2015-2018 InfluxData Inc.
+Copyright (c) 2015-2020 InfluxData Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/Makefile b/Makefile
index 1008831987c90..61b5b77315b93 100644
--- a/Makefile
+++ b/Makefile
@@ -1,57 +1,101 @@
-ifeq ($(SHELL), cmd)
- VERSION := $(shell git describe --exact-match --tags 2>nil)
- HOME := $(HOMEPATH)
-else ifeq ($(SHELL), sh.exe)
- VERSION := $(shell git describe --exact-match --tags 2>nil)
- HOME := $(HOMEPATH)
+next_version := 1.16.0
+tag := $(shell git describe --exact-match --tags 2>git_describe_error.tmp; rm -f git_describe_error.tmp)
+branch := $(shell git rev-parse --abbrev-ref HEAD)
+commit := $(shell git rev-parse --short=8 HEAD)
+
+ifdef NIGHTLY
+ version := $(next_version)
+ rpm_version := nightly
+ rpm_iteration := 0
+ deb_version := nightly
+ deb_iteration := 0
+ tar_version := nightly
+else ifeq ($(tag),)
+ version := $(next_version)
+ rpm_version := $(version)~$(commit)-0
+ rpm_iteration := 0
+ deb_version := $(version)~$(commit)-0
+ deb_iteration := 0
+ tar_version := $(version)~$(commit)
+else ifneq ($(findstring -rc,$(tag)),)
+ version := $(word 1,$(subst -, ,$(tag)))
+ version := $(version:v%=%)
+ rc := $(word 2,$(subst -, ,$(tag)))
+ rpm_version := $(version)-0.$(rc)
+ rpm_iteration := 0.$(subst rc,,$(rc))
+ deb_version := $(version)~$(rc)-1
+ deb_iteration := 0
+ tar_version := $(version)~$(rc)
else
- VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
+ version := $(tag:v%=%)
+ rpm_version := $(version)-1
+ rpm_iteration := 1
+ deb_version := $(version)-1
+ deb_iteration := 1
+ tar_version := $(version)
endif
-PREFIX := /usr/local
-BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
-COMMIT := $(shell git rev-parse --short HEAD)
-GOFILES ?= $(shell git ls-files '*.go')
-GOFMT ?= $(shell gofmt -l -s $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
-BUILDFLAGS ?=
+MAKEFLAGS += --no-print-directory
+GOOS ?= $(shell go env GOOS)
+GOARCH ?= $(shell go env GOARCH)
+HOSTGO := env -u GOOS -u GOARCH -u GOARM -- go
-ifdef GOBIN
-PATH := $(GOBIN):$(PATH)
-else
-PATH := $(subst :,/bin:,$(shell go env GOPATH))/bin:$(PATH)
+LDFLAGS := $(LDFLAGS) -X main.commit=$(commit) -X main.branch=$(branch)
+ifneq ($(tag),)
+ LDFLAGS += -X main.version=$(version)
endif
-LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
-ifdef VERSION
- LDFLAGS += -X main.version=$(VERSION)
+# Go built-in race detector works only for 64 bits architectures.
+ifneq ($(GOARCH), 386)
+ race_detector := -race
endif
+
+GOFILES ?= $(shell git ls-files '*.go')
+GOFMT ?= $(shell gofmt -l -s $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
+
+prefix ?= /usr/local
+bindir ?= $(prefix)/bin
+sysconfdir ?= $(prefix)/etc
+localstatedir ?= $(prefix)/var
+pkgdir ?= build/dist
+
.PHONY: all
all:
- @$(MAKE) --no-print-directory deps
- @$(MAKE) --no-print-directory telegraf
+ @$(MAKE) deps
+ @$(MAKE) telegraf
+
+.PHONY: help
+help:
+ @echo 'Targets:'
+ @echo ' all - download dependencies and compile telegraf binary'
+ @echo ' deps - download dependencies'
+ @echo ' telegraf - compile telegraf binary'
+ @echo ' test - run short unit tests'
+ @echo ' fmt - format source files'
+ @echo ' tidy - tidy go modules'
+ @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md'
+ @echo ' clean - delete build artifacts'
+ @echo ''
+ @echo 'Package Targets:'
+ @$(foreach dist,$(dists),echo " $(dist)";)
.PHONY: deps
deps:
- dep ensure -vendor-only
+ go mod download
.PHONY: telegraf
telegraf:
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
+# Used by dockerfile builds
.PHONY: go-install
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
-.PHONY: install
-install: telegraf
- mkdir -p $(DESTDIR)$(PREFIX)/bin/
- cp telegraf $(DESTDIR)$(PREFIX)/bin/
-
-
.PHONY: test
test:
- go test -short ./...
+ go test -short $(race_detector) ./...
.PHONY: fmt
fmt:
@@ -69,11 +113,12 @@ fmtcheck:
.PHONY: test-windows
test-windows:
- go test -short ./plugins/inputs/ping/...
- go test -short ./plugins/inputs/win_perf_counters/...
- go test -short ./plugins/inputs/win_services/...
- go test -short ./plugins/inputs/procstat/...
- go test -short ./plugins/inputs/ntpq/...
+ go test -short $(race_detector) ./plugins/inputs/ping/...
+ go test -short $(race_detector) ./plugins/inputs/win_perf_counters/...
+ go test -short $(race_detector) ./plugins/inputs/win_services/...
+ go test -short $(race_detector) ./plugins/inputs/procstat/...
+ go test -short $(race_detector) ./plugins/inputs/ntpq/...
+ go test -short $(race_detector) ./plugins/processors/port_name/...
.PHONY: vet
vet:
@@ -85,63 +130,276 @@ vet:
exit 1; \
fi
+.PHONY: tidy
+tidy:
+ go mod verify
+ go mod tidy
+ @if ! git diff --quiet go.mod go.sum; then \
+ echo "please run go mod tidy and check in changes"; \
+ exit 1; \
+ fi
+
.PHONY: check
check: fmtcheck vet
+ @$(MAKE) --no-print-directory tidy
.PHONY: test-all
test-all: fmtcheck vet
- go test ./...
-
-.PHONY: package
-package:
- ./scripts/build.py --package --platform=all --arch=all
+ go test $(race_detector) ./...
-.PHONY: package-release
-package-release:
- ./scripts/build.py --release --package --platform=all --arch=all \
- --upload --bucket=dl.influxdata.com/telegraf/releases
-
-.PHONY: package-nightly
-package-nightly:
- ./scripts/build.py --nightly --package --platform=all --arch=all \
- --upload --bucket=dl.influxdata.com/telegraf/nightlies
+.PHONY: check-deps
+check-deps:
+ ./scripts/check-deps.sh
.PHONY: clean
clean:
rm -f telegraf
rm -f telegraf.exe
+ rm -rf build
.PHONY: docker-image
docker-image:
- docker build -f scripts/stretch.docker -t "telegraf:$(COMMIT)" .
+ docker build -f scripts/stretch.docker -t "telegraf:$(commit)" .
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
ragel -Z -G2 $^ -o $@
-.PHONY: static
-static:
- @echo "Building static linux binary..."
- @CGO_ENABLED=0 \
- GOOS=linux \
- GOARCH=amd64 \
- go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
-
.PHONY: plugin-%
plugin-%:
@echo "Starting dev environment for $${$(@)} input plugin..."
@docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up
-.PHONY: ci-1.11
-ci-1.11:
- docker build -t quay.io/influxdb/telegraf-ci:1.11.5 - < scripts/ci-1.11.docker
- docker push quay.io/influxdb/telegraf-ci:1.11.5
+.PHONY: ci-1.14
+ci-1.14:
+ docker build -t quay.io/influxdb/telegraf-ci:1.14.5 - < scripts/ci-1.14.docker
+ docker push quay.io/influxdb/telegraf-ci:1.14.5
+
+.PHONY: ci-1.13
+ci-1.13:
+ docker build -t quay.io/influxdb/telegraf-ci:1.13.13 - < scripts/ci-1.13.docker
+ docker push quay.io/influxdb/telegraf-ci:1.13.13
+
+.PHONY: install
+install: $(buildbin)
+ @mkdir -pv $(DESTDIR)$(bindir)
+ @mkdir -pv $(DESTDIR)$(sysconfdir)
+ @mkdir -pv $(DESTDIR)$(localstatedir)
+ @if [ $(GOOS) != "windows" ]; then mkdir -pv $(DESTDIR)$(sysconfdir)/logrotate.d; fi
+ @if [ $(GOOS) != "windows" ]; then mkdir -pv $(DESTDIR)$(localstatedir)/log/telegraf; fi
+ @if [ $(GOOS) != "windows" ]; then mkdir -pv $(DESTDIR)$(sysconfdir)/telegraf/telegraf.d; fi
+ @cp -fv $(buildbin) $(DESTDIR)$(bindir)
+ @if [ $(GOOS) != "windows" ]; then cp -fv etc/telegraf.conf $(DESTDIR)$(sysconfdir)/telegraf/telegraf.conf$(conf_suffix); fi
+ @if [ $(GOOS) != "windows" ]; then cp -fv etc/logrotate.d/telegraf $(DESTDIR)$(sysconfdir)/logrotate.d; fi
+ @if [ $(GOOS) = "windows" ]; then cp -fv etc/telegraf_windows.conf $(DESTDIR)/telegraf.conf; fi
+ @if [ $(GOOS) = "linux" ]; then mkdir -pv $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi
+ @if [ $(GOOS) = "linux" ]; then cp -fv scripts/telegraf.service $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi
+ @if [ $(GOOS) = "linux" ]; then cp -fv scripts/init.sh $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi
+
+# Telegraf build per platform. This improves package performance by sharing
+# the bin between deb/rpm/tar packages over building directly into the package
+# directory.
+$(buildbin):
+ @mkdir -pv $(dir $@)
+ go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf
+
+debs := telegraf_$(deb_version)_amd64.deb
+debs += telegraf_$(deb_version)_arm64.deb
+debs += telegraf_$(deb_version)_armel.deb
+debs += telegraf_$(deb_version)_armhf.deb
+debs += telegraf_$(deb_version)_i386.deb
+debs += telegraf_$(deb_version)_mips.deb
+debs += telegraf_$(deb_version)_mipsel.deb
+debs += telegraf_$(deb_version)_s390x.deb
+
+rpms += telegraf-$(rpm_version).aarch64.rpm
+rpms += telegraf-$(rpm_version).armel.rpm
+rpms += telegraf-$(rpm_version).armv6hl.rpm
+rpms += telegraf-$(rpm_version).i386.rpm
+rpms += telegraf-$(rpm_version).s390x.rpm
+rpms += telegraf-$(rpm_version).x86_64.rpm
+
+tars += telegraf-$(tar_version)_darwin_amd64.tar.gz
+tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz
+tars += telegraf-$(tar_version)_freebsd_i386.tar.gz
+tars += telegraf-$(tar_version)_linux_amd64.tar.gz
+tars += telegraf-$(tar_version)_linux_arm64.tar.gz
+tars += telegraf-$(tar_version)_linux_armel.tar.gz
+tars += telegraf-$(tar_version)_linux_armhf.tar.gz
+tars += telegraf-$(tar_version)_linux_i386.tar.gz
+tars += telegraf-$(tar_version)_linux_mips.tar.gz
+tars += telegraf-$(tar_version)_linux_mipsel.tar.gz
+tars += telegraf-$(tar_version)_linux_s390x.tar.gz
+tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz
+
+zips += telegraf-$(tar_version)_windows_amd64.zip
+zips += telegraf-$(tar_version)_windows_i386.zip
+
+dists := $(debs) $(rpms) $(tars) $(zips)
+
+.PHONY: package
+package: $(dists)
+
+rpm_amd64 := amd64
+rpm_386 := i386
+rpm_s390x := s390x
+rpm_arm5 := armel
+rpm_arm6 := armv6hl
+rpm_arm647 := aarch64
+rpm_arch = $(rpm_$(GOARCH)$(GOARM))
+
+.PHONY: $(rpms)
+$(rpms):
+ @$(MAKE) install
+ @mkdir -p $(pkgdir)
+ fpm --force \
+ --log info \
+ --architecture $(rpm_arch) \
+ --input-type dir \
+ --output-type rpm \
+ --vendor InfluxData \
+ --url https://github.com/influxdata/telegraf \
+ --license MIT \
+ --maintainer support@influxdb.com \
+ --config-files /etc/telegraf/telegraf.conf \
+ --config-files /etc/logrotate.d/telegraf \
+ --after-install scripts/rpm/post-install.sh \
+ --before-install scripts/rpm/pre-install.sh \
+ --after-remove scripts/rpm/post-remove.sh \
+ --description "Plugin-driven server agent for reporting metrics into InfluxDB." \
+ --depends coreutils \
+ --depends shadow-utils \
+ --rpm-posttrans scripts/rpm/post-install.sh \
+ --name telegraf \
+ --version $(version) \
+ --iteration $(rpm_iteration) \
+ --chdir $(DESTDIR) \
+ --package $(pkgdir)/$@
+
+deb_amd64 := amd64
+deb_386 := i386
+deb_s390x := s390x
+deb_arm5 := armel
+deb_arm6 := armhf
+deb_arm647 := arm64
+deb_mips := mips
+deb_mipsle := mipsel
+deb_arch = $(deb_$(GOARCH)$(GOARM))
+
+.PHONY: $(debs)
+$(debs):
+ @$(MAKE) install
+ @mkdir -pv $(pkgdir)
+ fpm --force \
+ --log info \
+ --architecture $(deb_arch) \
+ --input-type dir \
+ --output-type deb \
+ --vendor InfluxData \
+ --url https://github.com/influxdata/telegraf \
+ --license MIT \
+ --maintainer support@influxdb.com \
+ --config-files /etc/telegraf/telegraf.conf.sample \
+ --config-files /etc/logrotate.d/telegraf \
+ --after-install scripts/deb/post-install.sh \
+ --before-install scripts/deb/pre-install.sh \
+ --after-remove scripts/deb/post-remove.sh \
+ --before-remove scripts/deb/pre-remove.sh \
+ --description "Plugin-driven server agent for reporting metrics into InfluxDB." \
+ --name telegraf \
+ --version $(version) \
+ --iteration $(deb_iteration) \
+ --chdir $(DESTDIR) \
+ --package $(pkgdir)/$@
+
+.PHONY: $(zips)
+$(zips):
+ @$(MAKE) install
+ @mkdir -p $(pkgdir)
+ (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/$@
+
+.PHONY: $(tars)
+$(tars):
+ @$(MAKE) install
+ @mkdir -p $(pkgdir)
+ tar --owner 0 --group 0 -czvf $(pkgdir)/$@ -C $(dir $(DESTDIR)) .
+
+.PHONY: upload-nightly
+upload-nightly:
+ aws s3 sync $(pkgdir) s3://dl.influxdata.com/telegraf/nightlies/ \
+ --exclude "*" \
+ --include "*.tar.gz" \
+ --include "*.deb" \
+ --include "*.rpm" \
+ --include "*.zip" \
+ --acl public-read
+
+%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOOS := linux
+%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOARCH := amd64
+
+%static_linux_amd64.tar.gz: export cgo := -nocgo
+%static_linux_amd64.tar.gz: export CGO_ENABLED := 0
+
+%i386.deb %i386.rpm %linux_i386.tar.gz: export GOOS := linux
+%i386.deb %i386.rpm %linux_i386.tar.gz: export GOARCH := 386
+
+%armel.deb %armel.rpm %linux_armel.tar.gz: export GOOS := linux
+%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARCH := arm
+%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARM := 5
+
+%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOOS := linux
+%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARCH := arm
+%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARM := 6
+
+%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOOS := linux
+%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARCH := arm64
+%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARM := 7
+
+%mips.deb %linux_mips.tar.gz: export GOOS := linux
+%mips.deb %linux_mips.tar.gz: export GOARCH := mips
+
+%mipsel.deb %linux_mipsel.tar.gz: export GOOS := linux
+%mipsel.deb %linux_mipsel.tar.gz: export GOARCH := mipsle
+
+%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOOS := linux
+%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOARCH := s390x
+
+%freebsd_amd64.tar.gz: export GOOS := freebsd
+%freebsd_amd64.tar.gz: export GOARCH := amd64
+
+%freebsd_i386.tar.gz: export GOOS := freebsd
+%freebsd_i386.tar.gz: export GOARCH := 386
+
+%windows_amd64.zip: export GOOS := windows
+%windows_amd64.zip: export GOARCH := amd64
+
+%darwin_amd64.tar.gz: export GOOS := darwin
+%darwin_amd64.tar.gz: export GOARCH := amd64
+
+%windows_i386.zip: export GOOS := windows
+%windows_i386.zip: export GOARCH := 386
+
+%windows_i386.zip %windows_amd64.zip: export prefix =
+%windows_i386.zip %windows_amd64.zip: export bindir = $(prefix)
+%windows_i386.zip %windows_amd64.zip: export sysconfdir = $(prefix)
+%windows_i386.zip %windows_amd64.zip: export localstatedir = $(prefix)
+%windows_i386.zip %windows_amd64.zip: export EXEEXT := .exe
-.PHONY: ci-1.10
-ci-1.10:
- docker build -t quay.io/influxdb/telegraf-ci:1.10.8 - < scripts/ci-1.10.docker
- docker push quay.io/influxdb/telegraf-ci:1.10.8
+%.deb: export pkg := deb
+%.deb: export prefix := /usr
+%.deb: export conf_suffix := .sample
+%.deb: export sysconfdir := /etc
+%.deb: export localstatedir := /var
+%.rpm: export pkg := rpm
+%.rpm: export prefix := /usr
+%.rpm: export sysconfdir := /etc
+%.rpm: export localstatedir := /var
+%.tar.gz: export pkg := tar
+%.tar.gz: export prefix := /usr
+%.tar.gz: export sysconfdir := /etc
+%.tar.gz: export localstatedir := /var
+%.zip: export pkg := zip
+%.zip: export prefix := /
-.PHONY: ci-1.9
-ci-1.9:
- docker build -t quay.io/influxdb/telegraf-ci:1.9.7 - < scripts/ci-1.9.docker
- docker push quay.io/influxdb/telegraf-ci:1.9.7
+%.deb %.rpm %.tar.gz %.zip: export DESTDIR = build/$(GOOS)-$(GOARCH)$(GOARM)$(cgo)-$(pkg)/telegraf-$(version)
+%.deb %.rpm %.tar.gz %.zip: export buildbin = build/$(GOOS)-$(GOARCH)$(GOARM)$(cgo)/telegraf$(EXEEXT)
+%.deb %.rpm %.tar.gz %.zip: export LDFLAGS = -w -s
diff --git a/README.md b/README.md
index 758d7acb0dddf..a136e4e72d04c 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,35 @@
# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/)
+[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack)
+# Logz.io Output Plugin
+
+This plugin sends metrics to Logz.io over HTTPs.
+
+### Configuration:
+
+```toml
+# A plugin that can send metrics over HTTPs to Logz.io
+[[outputs.http]]
+ ## Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue.
+ # check_disk_space = true
+
+ ## The percent of used file system space at which the sender will stop queueing.
+ ## When we will reach that percentage, the file system in which the queue is stored will drop
+ ## all new logs until the percentage of used space drops below that threshold.
+ # disk_threshold = 98
+
+New plugins are designed to be easy to contribute, pull requests are welcomed
+and we work to incorporate as many pull requests as possible.
+
+## Try in Browser :rocket:
+
+You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/).
+ ## How often Logz.io sender should drain the queue.
+ ## Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+ # drain_duration = "3s"
+
+ ## Where Logz.io sender should store the queue
+ ## queue_dir = Sprintf("%s%s%s%s%d", os.TempDir(), string(os.PathSeparator),
+ ## "logzio-buffer", string(os.PathSeparator), time.Now().UnixNano())
Telegraf is an agent for collecting, processing, aggregating, and writing metrics.
@@ -24,6 +55,17 @@ There are many ways to contribute:
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/)
- [Contribute plugins](CONTRIBUTING.md)
+- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)*
+
+## Minimum Requirements
+
+Telegraf shares the same [minimum requirements][] as Go:
+- Linux kernel version 2.6.23 or later
+- Windows 7 or later
+- FreeBSD 11.2 or later
+- MacOS 10.11 El Capitan or later
+
+[minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements
## Installation:
@@ -36,6 +78,17 @@ Ansible role: https://github.com/rossmcdonald/telegraf
### From Source:
+Telegraf requires Go version 1.13 or newer, the Makefile requires GNU make.
+
+1. [Install Go](https://golang.org/doc/install) >=1.13 (1.14 recommended)
+2. Clone the Telegraf repository:
+ ```
+ cd ~/src
+ git clone https://github.com/influxdata/telegraf.git
+ ```
+3. Run `make` from the source directory
+ ```
+ cd ~/src/telegraf
Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make.
1. [Install Go](https://golang.org/doc/install) >=1.9 (1.11 recommended)
@@ -58,6 +111,7 @@ version.
### Nightly Builds
These builds are generated from the master branch:
+- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz)
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
@@ -99,6 +153,10 @@ telegraf config > telegraf.conf
#### Generate config with only cpu input & influxdb output plugins defined:
```
+telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config
+```
+
+#### Run a single telegraf collection, outputting metrics to stdout:
telegraf --input-filter cpu --output-filter influxdb config
```
@@ -109,6 +167,11 @@ telegraf --config telegraf.conf --test
```
#### Run telegraf with all plugins defined in config file:
+ ## Logz.io account token
+ token = "your Logz.io token" # required
+
+ ## Use your listener URL for your Logz.io account region.
+ # url = "https://listener.logz.io:8071"
```
telegraf --config telegraf.conf
@@ -135,6 +198,13 @@ For documentation on the latest development code see the [documentation index][d
* [aerospike](./plugins/inputs/aerospike)
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
* [apache](./plugins/inputs/apache)
+* [apcupsd](./plugins/inputs/apcupsd)
+* [aurora](./plugins/inputs/aurora)
+* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch)
+* [azure_storage_queue](./plugins/inputs/azure_storage_queue)
+* [bcache](./plugins/inputs/bcache)
+* [beanstalkd](./plugins/inputs/beanstalkd)
+* [bind](./plugins/inputs/bind)
* [aurora](./plugins/inputs/aurora)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [bcache](./plugins/inputs/bcache)
@@ -146,6 +216,9 @@ For documentation on the latest development code see the [documentation index][d
* [ceph](./plugins/inputs/ceph)
* [cgroup](./plugins/inputs/cgroup)
* [chrony](./plugins/inputs/chrony)
+* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) (deprecated, renamed to [gnmi](/plugins/inputs/gnmi))
+* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt)
+* [clickhouse](./plugins/inputs/clickhouse)
* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub
* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint
* [conntrack](./plugins/inputs/conntrack)
@@ -160,6 +233,14 @@ For documentation on the latest development code see the [documentation index][d
* [dmcache](./plugins/inputs/dmcache)
* [dns query time](./plugins/inputs/dns_query)
* [docker](./plugins/inputs/docker)
+* [docker_log](./plugins/inputs/docker_log)
+* [dovecot](./plugins/inputs/dovecot)
+* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate)
+* [elasticsearch](./plugins/inputs/elasticsearch)
+* [ethtool](./plugins/inputs/ethtool)
+* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub)
+* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
+* [execd](./plugins/inputs/execd) (generic executable "daemon" processes)
* [dovecot](./plugins/inputs/dovecot)
* [elasticsearch](./plugins/inputs/elasticsearch)
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
@@ -168,6 +249,10 @@ For documentation on the latest development code see the [documentation index][d
* [file](./plugins/inputs/file)
* [filestat](./plugins/inputs/filestat)
* [filecount](./plugins/inputs/filecount)
+* [fireboard](/plugins/inputs/fireboard)
+* [fluentd](./plugins/inputs/fluentd)
+* [github](./plugins/inputs/github)
+* [gnmi](./plugins/inputs/gnmi)
* [fluentd](./plugins/inputs/fluentd)
* [github](./plugins/inputs/github)
* [graylog](./plugins/inputs/graylog)
@@ -179,6 +264,7 @@ For documentation on the latest development code see the [documentation index][d
* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats)
* [http_response](./plugins/inputs/http_response)
* [icinga2](./plugins/inputs/icinga2)
+* [infiniband](./plugins/inputs/infiniband)
* [influxdb](./plugins/inputs/influxdb)
* [influxdb_listener](./plugins/inputs/influxdb_listener)
* [internal](./plugins/inputs/internal)
@@ -193,12 +279,21 @@ For documentation on the latest development code see the [documentation index][d
* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry)
* [kafka_consumer](./plugins/inputs/kafka_consumer)
* [kapacitor](./plugins/inputs/kapacitor)
+* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis)
* [kinesis](./plugins/inputs/kinesis_consumer)
* [kernel](./plugins/inputs/kernel)
* [kernel_vmstat](./plugins/inputs/kernel_vmstat)
* [kibana](./plugins/inputs/kibana)
* [kubernetes](./plugins/inputs/kubernetes)
* [kube_inventory](./plugins/inputs/kube_inventory)
+* [lanz](./plugins/inputs/lanz)
+* [leofs](./plugins/inputs/leofs)
+* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs)
+* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail))
+* [logstash](./plugins/inputs/logstash)
+* [lustre2](./plugins/inputs/lustre2)
+* [mailchimp](./plugins/inputs/mailchimp)
+* [marklogic](./plugins/inputs/marklogic)
* [leofs](./plugins/inputs/leofs)
* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs)
* [logparser](./plugins/inputs/logparser)
@@ -209,6 +304,9 @@ For documentation on the latest development code see the [documentation index][d
* [mem](./plugins/inputs/mem)
* [mesos](./plugins/inputs/mesos)
* [minecraft](./plugins/inputs/minecraft)
+* [modbus](./plugins/inputs/modbus)
+* [mongodb](./plugins/inputs/mongodb)
+* [monit](./plugins/inputs/monit)
* [mongodb](./plugins/inputs/mongodb)
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
* [multifile](./plugins/inputs/multifile)
@@ -222,6 +320,7 @@ For documentation on the latest development code see the [documentation index][d
* [nginx](./plugins/inputs/nginx)
* [nginx_plus_api](./plugins/inputs/nginx_plus_api)
* [nginx_plus](./plugins/inputs/nginx_plus)
+* [nginx_sts](./plugins/inputs/nginx_sts)
* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check)
* [nginx_vts](./plugins/inputs/nginx_vts)
* [nsq_consumer](./plugins/inputs/nsq_consumer)
@@ -230,6 +329,9 @@ For documentation on the latest development code see the [documentation index][d
* [ntpq](./plugins/inputs/ntpq)
* [nvidia_smi](./plugins/inputs/nvidia_smi)
* [openldap](./plugins/inputs/openldap)
+* [openntpd](./plugins/inputs/openntpd)
+* [opensmtpd](./plugins/inputs/opensmtpd)
+* [openweathermap](./plugins/inputs/openweathermap)
* [opensmtpd](./plugins/inputs/opensmtpd)
* [pf](./plugins/inputs/pf)
* [pgbouncer](./plugins/inputs/pgbouncer)
@@ -240,17 +342,35 @@ For documentation on the latest development code see the [documentation index][d
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [postgresql](./plugins/inputs/postgresql)
* [powerdns](./plugins/inputs/powerdns)
+* [powerdns_recursor](./plugins/inputs/powerdns_recursor)
* [processes](./plugins/inputs/processes)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
* [puppetagent](./plugins/inputs/puppetagent)
* [rabbitmq](./plugins/inputs/rabbitmq)
* [raindrops](./plugins/inputs/raindrops)
+* [redfish](./plugins/inputs/redfish)
* [redis](./plugins/inputs/redis)
* [rethinkdb](./plugins/inputs/rethinkdb)
* [riak](./plugins/inputs/riak)
* [salesforce](./plugins/inputs/salesforce)
* [sensors](./plugins/inputs/sensors)
+* [sflow](./plugins/inputs/sflow)
+* [smart](./plugins/inputs/smart)
+* [snmp_legacy](./plugins/inputs/snmp_legacy)
+* [snmp](./plugins/inputs/snmp)
+* [snmp_trap](./plugins/inputs/snmp_trap)
+* [socket_listener](./plugins/inputs/socket_listener)
+* [solr](./plugins/inputs/solr)
+* [sql server](./plugins/inputs/sqlserver) (microsoft)
+* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring)
+* [statsd](./plugins/inputs/statsd)
+* [suricata](./plugins/inputs/suricata)
+* [swap](./plugins/inputs/swap)
+* [synproxy](./plugins/inputs/synproxy)
+* [syslog](./plugins/inputs/syslog)
+* [sysstat](./plugins/inputs/sysstat)
+* [systemd_units](./plugins/inputs/systemd_units)
* [smart](./plugins/inputs/smart)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [snmp](./plugins/inputs/snmp)
@@ -272,6 +392,7 @@ For documentation on the latest development code see the [documentation index][d
* [twemproxy](./plugins/inputs/twemproxy)
* [udp_listener](./plugins/inputs/socket_listener)
* [unbound](./plugins/inputs/unbound)
+* [uwsgi](./plugins/inputs/uwsgi)
* [varnish](./plugins/inputs/varnish)
* [vsphere](./plugins/inputs/vsphere) VMware vSphere
* [webhooks](./plugins/inputs/webhooks)
@@ -283,6 +404,7 @@ For documentation on the latest development code see the [documentation index][d
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
+* [wireguard](./plugins/inputs/wireguard)
* [wireless](./plugins/inputs/wireless)
* [x509_cert](./plugins/inputs/x509_cert)
* [zfs](./plugins/inputs/zfs)
@@ -295,6 +417,7 @@ For documentation on the latest development code see the [documentation index][d
- [Collectd](/plugins/parsers/collectd)
- [CSV](/plugins/parsers/csv)
- [Dropwizard](/plugins/parsers/dropwizard)
+- [FormUrlencoded](/plugins/parser/form_urlencoded)
- [Graphite](/plugins/parsers/graphite)
- [Grok](/plugins/parsers/grok)
- [JSON](/plugins/parsers/json)
@@ -315,6 +438,48 @@ For documentation on the latest development code see the [documentation index][d
## Processor Plugins
+* [clone](/plugins/processors/clone)
+* [converter](/plugins/processors/converter)
+* [date](/plugins/processors/date)
+* [dedup](/plugins/processors/dedup)
+* [defaults](/plugins/processors/defaults)
+* [enum](/plugins/processors/enum)
+* [execd](/plugins/processors/execd)
+* [ifname](/plugins/processors/ifname)
+* [filepath](/plugins/processors/filepath)
+* [override](/plugins/processors/override)
+* [parser](/plugins/processors/parser)
+* [pivot](/plugins/processors/pivot)
+* [port_name](/plugins/processors/port_name)
+* [printer](/plugins/processors/printer)
+* [regex](/plugins/processors/regex)
+* [rename](/plugins/processors/rename)
+* [reverse_dns](/plugins/processors/reverse_dns)
+* [s2geo](/plugins/processors/s2geo)
+* [starlark](/plugins/processors/starlark)
+* [strings](/plugins/processors/strings)
+* [tag_limit](/plugins/processors/tag_limit)
+* [template](/plugins/processors/template)
+* [topk](/plugins/processors/topk)
+* [unpivot](/plugins/processors/unpivot)
+
+### Required parameters:
+
+* [basicstats](./plugins/aggregators/basicstats)
+* [final](./plugins/aggregators/final)
+* [histogram](./plugins/aggregators/histogram)
+* [merge](./plugins/aggregators/merge)
+* [minmax](./plugins/aggregators/minmax)
+* [valuecounter](./plugins/aggregators/valuecounter)
+* `token`: Your Logz.io token, which can be found under "settings" in your account.
+
+### Optional parameters:
+
+* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x)
+* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb))
+
+## Processor Plugins
+
* [converter](./plugins/processors/converter)
* [enum](./plugins/processors/enum)
* [override](./plugins/processors/override)
@@ -346,6 +511,14 @@ For documentation on the latest development code see the [documentation index][d
* [cratedb](./plugins/outputs/cratedb)
* [datadog](./plugins/outputs/datadog)
* [discard](./plugins/outputs/discard)
+* [dynatrace](./plugins/outputs/dynatrace)
+* [elasticsearch](./plugins/outputs/elasticsearch)
+* [exec](./plugins/outputs/exec)
+* [execd](./plugins/outputs/execd)
+* [file](./plugins/outputs/file)
+* [graphite](./plugins/outputs/graphite)
+* [graylog](./plugins/outputs/graylog)
+* [health](./plugins/outputs/health)
* [elasticsearch](./plugins/outputs/elasticsearch)
* [file](./plugins/outputs/file)
* [graphite](./plugins/outputs/graphite)
@@ -354,14 +527,28 @@ For documentation on the latest development code see the [documentation index][d
* [instrumental](./plugins/outputs/instrumental)
* [kafka](./plugins/outputs/kafka)
* [librato](./plugins/outputs/librato)
+* [logz.io](./plugins/outputs/logzio)
* [mqtt](./plugins/outputs/mqtt)
* [nats](./plugins/outputs/nats)
+* [newrelic](./plugins/outputs/newrelic)
* [nsq](./plugins/outputs/nsq)
* [opentsdb](./plugins/outputs/opentsdb)
* [prometheus](./plugins/outputs/prometheus_client)
* [riemann](./plugins/outputs/riemann)
* [riemann_legacy](./plugins/outputs/riemann_legacy)
* [socket_writer](./plugins/outputs/socket_writer)
+* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring)
+* [syslog](./plugins/outputs/syslog)
+* [tcp](./plugins/outputs/socket_writer)
+* [udp](./plugins/outputs/socket_writer)
+* [warp10](./plugins/outputs/warp10)
+* [wavefront](./plugins/outputs/wavefront)
+* [sumologic](./plugins/outputs/sumologic)
+* `check_disk_space`: Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue.
+* `disk_threshold`: If the queue_dir space crosses this threshold (in % of disk usage), the plugin will start dropping logs.
+* `drain_duration`: Time to sleep between sending attempts.
+* `queue_dir`: Metrics disk path. All the unsent metrics are saved to the disk in this location.
+* `url`: Logz.io listener URL.
* [stackdriver](./plugins/outputs/stackdriver)
* [tcp](./plugins/outputs/socket_writer)
* [udp](./plugins/outputs/socket_writer)
diff --git a/agent/accumulator.go b/agent/accumulator.go
index 9e0bb11ca0cb8..65000fd98a541 100644
--- a/agent/accumulator.go
+++ b/agent/accumulator.go
@@ -1,21 +1,16 @@
package agent
import (
- "log"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
- "github.com/influxdata/telegraf/selfstat"
-)
-
-var (
- NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
)
type MetricMaker interface {
- Name() string
+ LogName() string
MakeMetric(metric telegraf.Metric) telegraf.Metric
+ Log() telegraf.Logger
}
type accumulator struct {
@@ -110,8 +105,7 @@ func (ac *accumulator) AddError(err error) {
if err == nil {
return
}
- NErrors.Incr(1)
- log.Printf("E! [%s]: Error in plugin: %v", ac.maker.Name(), err)
+ ac.maker.Log().Errorf("Error in plugin: %v", err)
}
func (ac *accumulator) SetPrecision(precision time.Duration) {
diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go
index 933821701c5e5..38a7e047c9bcc 100644
--- a/agent/accumulator_test.go
+++ b/agent/accumulator_test.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -59,7 +60,6 @@ func TestAccAddError(t *testing.T) {
a.AddError(fmt.Errorf("baz"))
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
- assert.EqualValues(t, int64(3), NErrors.Get())
require.Len(t, errs, 4) // 4 because of trailing newline
assert.Contains(t, string(errs[0]), "TestPlugin")
assert.Contains(t, string(errs[0]), "foo")
@@ -147,6 +147,14 @@ func (tm *TestMetricMaker) Name() string {
return "TestPlugin"
}
+func (tm *TestMetricMaker) LogName() string {
+ return tm.Name()
+}
+
func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric {
return metric
}
+
+func (tm *TestMetricMaker) Log() telegraf.Logger {
+ return models.NewLogger("TestPlugin", "test", "")
+}
diff --git a/agent/agent.go b/agent/agent.go
index 2687bbc0f2eb0..e7ffee322ff20 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -4,14 +4,16 @@ import (
"context"
"fmt"
"log"
+ "os"
"runtime"
+ "sort"
"sync"
"time"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/config"
- "github.com/influxdata/telegraf/internal/models"
+ "github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/serializers/influx"
)
@@ -28,6 +30,70 @@ func NewAgent(config *config.Config) (*Agent, error) {
return a, nil
}
+// inputUnit is a group of input plugins and the shared channel they write to.
+//
+// ┌───────┐
+// │ Input │───┐
+// └───────┘ │
+// ┌───────┐ │ ______
+// │ Input │───┼──▶ ()_____)
+// └───────┘ │
+// ┌───────┐ │
+// │ Input │───┘
+// └───────┘
+type inputUnit struct {
+ dst chan<- telegraf.Metric
+ inputs []*models.RunningInput
+}
+
+// ______ ┌───────────┐ ______
+// ()_____)──▶ │ Processor │──▶ ()_____)
+// └───────────┘
+type processorUnit struct {
+ src <-chan telegraf.Metric
+ dst chan<- telegraf.Metric
+ processor *models.RunningProcessor
+}
+
+// aggregatorUnit is a group of Aggregators and their source and sink channels.
+// Typically the aggregators write to a processor channel and pass the original
+// metrics to the output channel. The sink channels may be the same channel.
+//
+// ┌────────────┐
+// ┌──▶ │ Aggregator │───┐
+// │ └────────────┘ │
+// ______ │ ┌────────────┐ │ ______
+// ()_____)───┼──▶ │ Aggregator │───┼──▶ ()_____)
+// │ └────────────┘ │
+// │ ┌────────────┐ │
+// ├──▶ │ Aggregator │───┘
+// │ └────────────┘
+// │ ______
+// └────────────────────────▶ ()_____)
+type aggregatorUnit struct {
+ src <-chan telegraf.Metric
+ aggC chan<- telegraf.Metric
+ outputC chan<- telegraf.Metric
+ aggregators []*models.RunningAggregator
+}
+
+// outputUnit is a group of Outputs and their source channel. Metrics on the
+// channel are written to all outputs.
+//
+// ┌────────┐
+// ┌──▶ │ Output │
+// │ └────────┘
+// ______ ┌─────┐ │ ┌────────┐
+// ()_____)──▶ │ Fan │───┼──▶ │ Output │
+// └─────┘ │ └────────┘
+// │ ┌────────┐
+// └──▶ │ Output │
+// └────────┘
+type outputUnit struct {
+ src <-chan telegraf.Metric
+ outputs []*models.RunningOutput
+}
+
// Run starts and runs the Agent until the context is done.
func (a *Agent) Run(ctx context.Context) error {
log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
@@ -35,177 +101,183 @@ func (a *Agent) Run(ctx context.Context) error {
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
- if ctx.Err() != nil {
- return ctx.Err()
+ log.Printf("D! [agent] Initializing plugins")
+ err := a.initPlugins()
+ if err != nil {
+ return err
}
+ startTime := time.Now()
+
log.Printf("D! [agent] Connecting outputs")
- err := a.connectOutputs(ctx)
+ next, ou, err := a.startOutputs(ctx, a.Config.Outputs)
if err != nil {
return err
}
- inputC := make(chan telegraf.Metric, 100)
- procC := make(chan telegraf.Metric, 100)
- outputC := make(chan telegraf.Metric, 100)
+ var apu []*processorUnit
+ var au *aggregatorUnit
+ if len(a.Config.Aggregators) != 0 {
+ aggC := next
+ if len(a.Config.AggProcessors) != 0 {
+ aggC, apu, err = a.startProcessors(next, a.Config.AggProcessors)
+ if err != nil {
+ return err
+ }
+ }
- startTime := time.Now()
+ next, au, err = a.startAggregators(aggC, next, a.Config.Aggregators)
+ if err != nil {
+ return err
+ }
+ }
- log.Printf("D! [agent] Starting service inputs")
- err = a.startServiceInputs(ctx, inputC)
+ var pu []*processorUnit
+ if len(a.Config.Processors) != 0 {
+ next, pu, err = a.startProcessors(next, a.Config.Processors)
+ if err != nil {
+ return err
+ }
+ }
+
+ iu, err := a.startInputs(next, a.Config.Inputs)
if err != nil {
return err
}
var wg sync.WaitGroup
-
- src := inputC
- dst := inputC
-
wg.Add(1)
- go func(dst chan telegraf.Metric) {
+ go func() {
defer wg.Done()
-
- err := a.runInputs(ctx, startTime, dst)
+ err := a.runOutputs(ou)
if err != nil {
- log.Printf("E! [agent] Error running inputs: %v", err)
+ log.Printf("E! [agent] Error running outputs: %v", err)
}
+ }()
- log.Printf("D! [agent] Stopping service inputs")
- a.stopServiceInputs()
-
- close(dst)
- log.Printf("D! [agent] Input channel closed")
- }(dst)
-
- src = dst
-
- if len(a.Config.Processors) > 0 {
- dst = procC
-
+ if au != nil {
wg.Add(1)
- go func(src, dst chan telegraf.Metric) {
+ go func() {
defer wg.Done()
-
- err := a.runProcessors(src, dst)
+ err := a.runProcessors(apu)
if err != nil {
log.Printf("E! [agent] Error running processors: %v", err)
}
- close(dst)
- log.Printf("D! [agent] Processor channel closed")
- }(src, dst)
-
- src = dst
- }
-
- if len(a.Config.Aggregators) > 0 {
- dst = outputC
+ }()
wg.Add(1)
- go func(src, dst chan telegraf.Metric) {
+ go func() {
defer wg.Done()
-
- err := a.runAggregators(startTime, src, dst)
+ err := a.runAggregators(startTime, au)
if err != nil {
log.Printf("E! [agent] Error running aggregators: %v", err)
}
- close(dst)
- log.Printf("D! [agent] Output channel closed")
- }(src, dst)
+ }()
+ }
- src = dst
+ if pu != nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.runProcessors(pu)
+ if err != nil {
+ log.Printf("E! [agent] Error running processors: %v", err)
+ }
+ }()
}
wg.Add(1)
- go func(src chan telegraf.Metric) {
+ go func() {
defer wg.Done()
-
- err := a.runOutputs(startTime, src)
+ err := a.runInputs(ctx, startTime, iu)
if err != nil {
- log.Printf("E! [agent] Error running outputs: %v", err)
+ log.Printf("E! [agent] Error running inputs: %v", err)
}
- }(src)
+ }()
wg.Wait()
- log.Printf("D! [agent] Closing outputs")
- a.closeOutputs()
-
log.Printf("D! [agent] Stopped Successfully")
- return nil
+ return err
}
-// Test runs the inputs once and prints the output to stdout in line protocol.
-func (a *Agent) Test(ctx context.Context) error {
- var wg sync.WaitGroup
- metricC := make(chan telegraf.Metric)
- nulC := make(chan telegraf.Metric)
- defer func() {
- close(metricC)
- close(nulC)
- wg.Wait()
- }()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- s := influx.NewSerializer()
- s.SetFieldSortOrder(influx.SortFields)
- for metric := range metricC {
- octets, err := s.Serialize(metric)
- if err == nil {
- fmt.Print("> ", string(octets))
-
- }
+// initPlugins runs the Init function on plugins.
+func (a *Agent) initPlugins() error {
+ for _, input := range a.Config.Inputs {
+ err := input.Init()
+ if err != nil {
+ return fmt.Errorf("could not initialize input %s: %v",
+ input.LogName(), err)
}
- }()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- for range nulC {
+ }
+ for _, processor := range a.Config.Processors {
+ err := processor.Init()
+ if err != nil {
+ return fmt.Errorf("could not initialize processor %s: %v",
+ processor.Config.Name, err)
}
- }()
+ }
+ for _, aggregator := range a.Config.Aggregators {
+ err := aggregator.Init()
+ if err != nil {
+ return fmt.Errorf("could not initialize aggregator %s: %v",
+ aggregator.Config.Name, err)
+ }
+ }
+ for _, processor := range a.Config.AggProcessors {
+ err := processor.Init()
+ if err != nil {
+ return fmt.Errorf("could not initialize processor %s: %v",
+ processor.Config.Name, err)
+ }
+ }
+ for _, output := range a.Config.Outputs {
+ err := output.Init()
+ if err != nil {
+ return fmt.Errorf("could not initialize output %s: %v",
+ output.Config.Name, err)
+ }
+ }
+ return nil
+}
- for _, input := range a.Config.Inputs {
- select {
- case <-ctx.Done():
- return nil
- default:
- if _, ok := input.Input.(telegraf.ServiceInput); ok {
- log.Printf("W!: [agent] skipping plugin [[%s]]: service inputs not supported in --test mode",
- input.Name())
- continue
- }
+func (a *Agent) startInputs(
+ dst chan<- telegraf.Metric,
+ inputs []*models.RunningInput,
+) (*inputUnit, error) {
+ log.Printf("D! [agent] Starting service inputs")
- acc := NewAccumulator(input, metricC)
- acc.SetPrecision(a.Precision())
- input.SetDefaultTags(a.Config.Tags)
+ unit := &inputUnit{
+ dst: dst,
+ }
- // Special instructions for some inputs. cpu, for example, needs to be
- // run twice in order to return cpu usage percentages.
- switch input.Name() {
- case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
- nulAcc := NewAccumulator(input, nulC)
- nulAcc.SetPrecision(a.Precision())
- if err := input.Input.Gather(nulAcc); err != nil {
- return err
- }
+ for _, input := range inputs {
+ if si, ok := input.Input.(telegraf.ServiceInput); ok {
+ // Service input plugins are not normally subject to timestamp
+ // rounding except for when precision is set on the input plugin.
+ //
+ // This only applies to the accumulator passed to Start(), the
+ // Gather() accumulator does apply rounding according to the
+ // precision and interval agent/plugin settings.
+ var interval time.Duration
+ var precision time.Duration
+ if input.Config.Precision != 0 {
+ precision = input.Config.Precision
+ }
- time.Sleep(500 * time.Millisecond)
- if err := input.Input.Gather(acc); err != nil {
- return err
- }
- default:
- if err := input.Input.Gather(acc); err != nil {
- return err
- }
+ acc := NewAccumulator(input, dst)
+ acc.SetPrecision(getPrecision(precision, interval))
+
+ err := si.Start(acc)
+ if err != nil {
+ stopServiceInputs(unit.inputs)
+ return nil, fmt.Errorf("starting input %s: %w", input.LogName(), err)
}
}
+ unit.inputs = append(unit.inputs, input)
}
- return nil
+ return unit, nil
}
// runInputs starts and triggers the periodic gather for Inputs.
@@ -215,69 +287,184 @@ func (a *Agent) Test(ctx context.Context) error {
func (a *Agent) runInputs(
ctx context.Context,
startTime time.Time,
- dst chan<- telegraf.Metric,
+ unit *inputUnit,
) error {
var wg sync.WaitGroup
- for _, input := range a.Config.Inputs {
- interval := a.Config.Agent.Interval.Duration
- jitter := a.Config.Agent.CollectionJitter.Duration
-
+ for _, input := range unit.inputs {
// Overwrite agent interval if this plugin has its own.
+ interval := a.Config.Agent.Interval.Duration
if input.Config.Interval != 0 {
interval = input.Config.Interval
}
- acc := NewAccumulator(input, dst)
- acc.SetPrecision(a.Precision())
+ // Overwrite agent precision if this plugin has its own.
+ precision := a.Config.Agent.Precision.Duration
+ if input.Config.Precision != 0 {
+ precision = input.Config.Precision
+ }
+
+ // Overwrite agent collection_jitter if this plugin has its own.
+ jitter := a.Config.Agent.CollectionJitter.Duration
+ if input.Config.CollectionJitter != 0 {
+ jitter = input.Config.CollectionJitter
+ }
+
+ var ticker Ticker
+ if a.Config.Agent.RoundInterval {
+ ticker = NewAlignedTicker(startTime, interval, jitter)
+ } else {
+ ticker = NewUnalignedTicker(interval, jitter)
+ }
+ defer ticker.Stop()
+
+ acc := NewAccumulator(input, unit.dst)
+ acc.SetPrecision(getPrecision(precision, interval))
wg.Add(1)
go func(input *models.RunningInput) {
defer wg.Done()
+ a.gatherLoop(ctx, acc, input, ticker, interval)
+ }(input)
+ }
+
+ wg.Wait()
- if a.Config.Agent.RoundInterval {
- err := internal.SleepContext(
- ctx, internal.AlignDuration(startTime, interval))
- if err != nil {
- return
+ log.Printf("D! [agent] Stopping service inputs")
+ stopServiceInputs(unit.inputs)
+
+ close(unit.dst)
+ log.Printf("D! [agent] Input channel closed")
+
+ return nil
+}
+
+// testStartInputs is a variation of startInputs for use in --test and --once
+// mode. It differs by logging Start errors and returning only plugins
+// successfully started.
+func (a *Agent) testStartInputs(
+ dst chan<- telegraf.Metric,
+ inputs []*models.RunningInput,
+) (*inputUnit, error) {
+ log.Printf("D! [agent] Starting service inputs")
+
+ unit := &inputUnit{
+ dst: dst,
+ }
+
+ for _, input := range inputs {
+ if si, ok := input.Input.(telegraf.ServiceInput); ok {
+ // Service input plugins are not subject to timestamp rounding.
+ // This only applies to the accumulator passed to Start(), the
+ // Gather() accumulator does apply rounding according to the
+ // precision agent setting.
+ acc := NewAccumulator(input, dst)
+ acc.SetPrecision(time.Nanosecond)
+
+ err := si.Start(acc)
+ if err != nil {
+ log.Printf("E! [agent] Starting input %s: %v", input.LogName(), err)
+ }
+
+ }
+
+ unit.inputs = append(unit.inputs, input)
+ }
+
+ return unit, nil
+}
+
+// testRunInputs is a variation of runInputs for use in --test and --once mode.
+// Instead of using a ticker to run the inputs they are called once immediately.
+func (a *Agent) testRunInputs(
+ ctx context.Context,
+ wait time.Duration,
+ unit *inputUnit,
+) error {
+ var wg sync.WaitGroup
+
+ nul := make(chan telegraf.Metric)
+ go func() {
+ for range nul {
+ }
+ }()
+
+ for _, input := range unit.inputs {
+ wg.Add(1)
+ go func(input *models.RunningInput) {
+ defer wg.Done()
+
+ // Overwrite agent interval if this plugin has its own.
+ interval := a.Config.Agent.Interval.Duration
+ if input.Config.Interval != 0 {
+ interval = input.Config.Interval
+ }
+
+ // Overwrite agent precision if this plugin has its own.
+ precision := a.Config.Agent.Precision.Duration
+ if input.Config.Precision != 0 {
+ precision = input.Config.Precision
+ }
+
+ // Run plugins that require multiple gathers to calculate rate
+ // and delta metrics twice.
+ switch input.Config.Name {
+ case "cpu", "mongodb", "procstat":
+ nulAcc := NewAccumulator(input, nul)
+ nulAcc.SetPrecision(getPrecision(precision, interval))
+ if err := input.Input.Gather(nulAcc); err != nil {
+ nulAcc.AddError(err)
}
+
+ time.Sleep(500 * time.Millisecond)
}
- a.gatherOnInterval(ctx, acc, input, interval, jitter)
+ acc := NewAccumulator(input, unit.dst)
+ acc.SetPrecision(getPrecision(precision, interval))
+
+ if err := input.Input.Gather(acc); err != nil {
+ acc.AddError(err)
+ }
}(input)
}
wg.Wait()
+ internal.SleepContext(ctx, wait)
+
+ log.Printf("D! [agent] Stopping service inputs")
+ stopServiceInputs(unit.inputs)
+
+ close(unit.dst)
+ log.Printf("D! [agent] Input channel closed")
return nil
}
+// stopServiceInputs stops all service inputs.
+func stopServiceInputs(inputs []*models.RunningInput) {
+ for _, input := range inputs {
+ if si, ok := input.Input.(telegraf.ServiceInput); ok {
+ si.Stop()
+ }
+ }
+}
+
// gather runs an input's gather function periodically until the context is
// done.
-func (a *Agent) gatherOnInterval(
+func (a *Agent) gatherLoop(
ctx context.Context,
acc telegraf.Accumulator,
input *models.RunningInput,
+ ticker Ticker,
interval time.Duration,
- jitter time.Duration,
) {
defer panicRecover(input)
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
for {
- err := internal.SleepContext(ctx, internal.RandomDuration(jitter))
- if err != nil {
- return
- }
-
- err = a.gatherOnce(acc, input, interval)
- if err != nil {
- acc.AddError(err)
- }
-
select {
- case <-ticker.C:
- continue
+ case <-ticker.Elapsed():
+ err := a.gatherOnce(acc, input, ticker, interval)
+ if err != nil {
+ acc.AddError(err)
+ }
case <-ctx.Done():
return
}
@@ -289,78 +476,122 @@ func (a *Agent) gatherOnInterval(
func (a *Agent) gatherOnce(
acc telegraf.Accumulator,
input *models.RunningInput,
- timeout time.Duration,
+ ticker Ticker,
+ interval time.Duration,
) error {
- ticker := time.NewTicker(timeout)
- defer ticker.Stop()
-
done := make(chan error)
go func() {
done <- input.Gather(acc)
}()
+ // Only warn after interval seconds, even if the interval is started late.
+ // Intervals can start late if the previous interval went over or due to
+ // clock changes.
+ slowWarning := time.NewTicker(interval)
+ defer slowWarning.Stop()
+
for {
select {
case err := <-done:
return err
- case <-ticker.C:
- log.Printf("W! [agent] input %q did not complete within its interval",
- input.Name())
+ case <-slowWarning.C:
+ log.Printf("W! [%s] Collection took longer than expected; not complete after interval of %s",
+ input.LogName(), interval)
+ case <-ticker.Elapsed():
+ log.Printf("D! [%s] Previous collection has not completed; scheduled collection skipped",
+ input.LogName())
}
}
}
-// runProcessors applies processors to metrics.
-func (a *Agent) runProcessors(
- src <-chan telegraf.Metric,
- agg chan<- telegraf.Metric,
-) error {
- for metric := range src {
- metrics := a.applyProcessors(metric)
+// startProcessors sets up the processor chain and calls Start on all
+// processors. If an error occurs any started processors are Stopped.
+func (a *Agent) startProcessors(
+ dst chan<- telegraf.Metric,
+ processors models.RunningProcessors,
+) (chan<- telegraf.Metric, []*processorUnit, error) {
+ var units []*processorUnit
+
+ // Sort from last to first
+ sort.SliceStable(processors, func(i, j int) bool {
+ return processors[i].Config.Order > processors[j].Config.Order
+ })
- for _, metric := range metrics {
- agg <- metric
+ var src chan telegraf.Metric
+ for _, processor := range processors {
+ src = make(chan telegraf.Metric, 100)
+ acc := NewAccumulator(processor, dst)
+
+ err := processor.Start(acc)
+ if err != nil {
+ for _, u := range units {
+ u.processor.Stop()
+ close(u.dst)
+ }
+ return nil, nil, fmt.Errorf("starting processor %s: %w", processor.LogName(), err)
}
- }
- return nil
-}
+ units = append(units, &processorUnit{
+ src: src,
+ dst: dst,
+ processor: processor,
+ })
-// applyProcessors applies all processors to a metric.
-func (a *Agent) applyProcessors(m telegraf.Metric) []telegraf.Metric {
- metrics := []telegraf.Metric{m}
- for _, processor := range a.Config.Processors {
- metrics = processor.Apply(metrics...)
+ dst = src
}
- return metrics
+ return src, units, nil
}
-func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) {
- var until time.Time
- if roundInterval {
- until = internal.AlignTime(start, period)
- if until == start {
- until = internal.AlignTime(start.Add(time.Nanosecond), period)
- }
- } else {
- until = start.Add(period)
+// runProcessors begins processing metrics and runs until the source channel is
+// closed and all metrics have been written.
+func (a *Agent) runProcessors(
+ units []*processorUnit,
+) error {
+ var wg sync.WaitGroup
+ for _, unit := range units {
+ wg.Add(1)
+ go func(unit *processorUnit) {
+ defer wg.Done()
+
+ acc := NewAccumulator(unit.processor, unit.dst)
+ for m := range unit.src {
+ if err := unit.processor.Add(m, acc); err != nil {
+ acc.AddError(err)
+ m.Drop()
+ }
+ }
+ unit.processor.Stop()
+ close(unit.dst)
+ log.Printf("D! [agent] Processor channel closed")
+ }(unit)
}
+ wg.Wait()
- since := until.Add(-period)
+ return nil
+}
- return since, until
+// startAggregators sets up the aggregator unit and returns the source channel.
+func (a *Agent) startAggregators(
+ aggC chan<- telegraf.Metric,
+ outputC chan<- telegraf.Metric,
+ aggregators []*models.RunningAggregator,
+) (chan<- telegraf.Metric, *aggregatorUnit, error) {
+ src := make(chan telegraf.Metric, 100)
+ unit := &aggregatorUnit{
+ src: src,
+ aggC: aggC,
+ outputC: outputC,
+ aggregators: aggregators,
+ }
+ return src, unit, nil
}
-// runAggregators adds metrics to the aggregators and triggers their periodic
-// push call.
-//
-// Runs until src is closed and all metrics have been processed. Will call
-// push one final time before returning.
+// runAggregators beings aggregating metrics and runs until the source channel
+// is closed and all metrics have been written.
func (a *Agent) runAggregators(
startTime time.Time,
- src <-chan telegraf.Metric,
- dst chan<- telegraf.Metric,
+ unit *aggregatorUnit,
) error {
ctx, cancel := context.WithCancel(context.Background())
@@ -375,7 +606,7 @@ func (a *Agent) runAggregators(
wg.Add(1)
go func() {
defer wg.Done()
- for metric := range src {
+ for metric := range unit.src {
var dropOriginal bool
for _, agg := range a.Config.Aggregators {
if ok := agg.Add(metric); ok {
@@ -384,7 +615,7 @@ func (a *Agent) runAggregators(
}
if !dropOriginal {
- dst <- metric
+ unit.outputC <- metric // keep original.
} else {
metric.Drop()
}
@@ -392,36 +623,45 @@ func (a *Agent) runAggregators(
cancel()
}()
- aggregations := make(chan telegraf.Metric, 100)
- wg.Add(1)
- go func() {
- defer wg.Done()
+ for _, agg := range a.Config.Aggregators {
+ wg.Add(1)
+ go func(agg *models.RunningAggregator) {
+ defer wg.Done()
- var aggWg sync.WaitGroup
- for _, agg := range a.Config.Aggregators {
- aggWg.Add(1)
- go func(agg *models.RunningAggregator) {
- defer aggWg.Done()
+ interval := a.Config.Agent.Interval.Duration
+ precision := a.Config.Agent.Precision.Duration
- acc := NewAccumulator(agg, aggregations)
- acc.SetPrecision(a.Precision())
- a.push(ctx, agg, acc)
- }(agg)
- }
+ acc := NewAccumulator(agg, unit.aggC)
+ acc.SetPrecision(getPrecision(precision, interval))
+ a.push(ctx, agg, acc)
+ }(agg)
+ }
- aggWg.Wait()
- close(aggregations)
- }()
+ wg.Wait()
- for metric := range aggregations {
- metrics := a.applyProcessors(metric)
- for _, metric := range metrics {
- dst <- metric
+ // In the case that there are no processors, both aggC and outputC are the
+ // same channel. If there are processors, we close the aggC and the
+ // processor chain will close the outputC when it finishes processing.
+ close(unit.aggC)
+ log.Printf("D! [agent] Aggregator channel closed")
+
+ return nil
+}
+
+func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) {
+ var until time.Time
+ if roundInterval {
+ until = internal.AlignTime(start, period)
+ if until == start {
+ until = internal.AlignTime(start.Add(time.Nanosecond), period)
}
+ } else {
+ until = start.Add(period)
}
- wg.Wait()
- return nil
+ since := until.Add(-period)
+
+ return since, until
}
// push runs the push for a single aggregator every period.
@@ -448,46 +688,91 @@ func (a *Agent) push(
}
}
-// runOutputs triggers the periodic write for Outputs.
-//
+// startOutputs calls Connect on all outputs and returns the source channel.
+// If an error occurs calling Connect all stared plugins have Close called.
+func (a *Agent) startOutputs(
+ ctx context.Context,
+ outputs []*models.RunningOutput,
+) (chan<- telegraf.Metric, *outputUnit, error) {
+ src := make(chan telegraf.Metric, 100)
+ unit := &outputUnit{src: src}
+ for _, output := range outputs {
+ err := a.connectOutput(ctx, output)
+ if err != nil {
+ for _, output := range unit.outputs {
+ output.Close()
+ }
+ return nil, nil, fmt.Errorf("connecting output %s: %w", output.LogName(), err)
+ }
+
+ unit.outputs = append(unit.outputs, output)
+ }
+
+ return src, unit, nil
+}
+
+// connectOutputs connects to all outputs.
+func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) error {
+ log.Printf("D! [agent] Attempting connection to [%s]", output.LogName())
+ err := output.Output.Connect()
+ if err != nil {
+ log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+
+ "error was '%s'", output.LogName(), err)
+
+ err := internal.SleepContext(ctx, 15*time.Second)
+ if err != nil {
+ return err
+ }
+
+ err = output.Output.Connect()
+ if err != nil {
+ return fmt.Errorf("Error connecting to output %q: %w", output.LogName(), err)
+ }
+ }
+ log.Printf("D! [agent] Successfully connected to %s", output.LogName())
+ return nil
+}
-// Runs until src is closed and all metrics have been processed. Will call
-// Write one final time before returning.
+// runOutputs begins processing metrics and returns until the source channel is
+// closed and all metrics have been written. On shutdown metrics will be
+// written one last time and dropped if unsuccessful.
func (a *Agent) runOutputs(
- startTime time.Time,
- src <-chan telegraf.Metric,
+ unit *outputUnit,
) error {
+ var wg sync.WaitGroup
+
+ // Start flush loop
interval := a.Config.Agent.FlushInterval.Duration
jitter := a.Config.Agent.FlushJitter.Duration
ctx, cancel := context.WithCancel(context.Background())
- var wg sync.WaitGroup
- for _, output := range a.Config.Outputs {
+ for _, output := range unit.outputs {
interval := interval
// Overwrite agent flush_interval if this plugin has its own.
if output.Config.FlushInterval != 0 {
interval = output.Config.FlushInterval
}
+ jitter := jitter
+ // Overwrite agent flush_jitter if this plugin has its own.
+ if output.Config.FlushJitter != 0 {
+ jitter = output.Config.FlushJitter
+ }
+
wg.Add(1)
go func(output *models.RunningOutput) {
defer wg.Done()
- if a.Config.Agent.RoundInterval {
- err := internal.SleepContext(
- ctx, internal.AlignDuration(startTime, interval))
- if err != nil {
- return
- }
- }
+ ticker := NewRollingTicker(interval, jitter)
+ defer ticker.Stop()
- a.flush(ctx, output, interval, jitter)
+ a.flushLoop(ctx, output, ticker)
}(output)
}
- for metric := range src {
- for i, output := range a.Config.Outputs {
+ for metric := range unit.src {
+ for i, output := range unit.outputs {
if i == len(a.Config.Outputs)-1 {
output.AddMetric(metric)
} else {
@@ -503,48 +788,49 @@ func (a *Agent) runOutputs(
return nil
}
-// flush runs an output's flush function periodically until the context is
+// flushLoop runs an output's flush function periodically until the context is
// done.
-func (a *Agent) flush(
+func (a *Agent) flushLoop(
ctx context.Context,
output *models.RunningOutput,
- interval time.Duration,
- jitter time.Duration,
+ ticker Ticker,
) {
- // since we are watching two channels we need a ticker with the jitter
- // integrated.
- ticker := NewTicker(interval, jitter)
- defer ticker.Stop()
-
logError := func(err error) {
if err != nil {
- log.Printf("E! [agent] Error writing to output [%s]: %v", output.Name, err)
+ log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err)
}
}
+ // watch for flush requests
+ flushRequested := make(chan os.Signal, 1)
+ watchForFlushSignal(flushRequested)
+ defer stopListeningForFlushSignal(flushRequested)
+
for {
// Favor shutdown over other methods.
select {
case <-ctx.Done():
- logError(a.flushOnce(output, interval, output.Write))
+ logError(a.flushOnce(output, ticker, output.Write))
return
default:
}
select {
- case <-ticker.C:
- logError(a.flushOnce(output, interval, output.Write))
+ case <-ctx.Done():
+ logError(a.flushOnce(output, ticker, output.Write))
+ return
+ case <-ticker.Elapsed():
+ logError(a.flushOnce(output, ticker, output.Write))
+ case <-flushRequested:
+ logError(a.flushOnce(output, ticker, output.Write))
case <-output.BatchReady:
// Favor the ticker over batch ready
select {
- case <-ticker.C:
- logError(a.flushOnce(output, interval, output.Write))
+ case <-ticker.Elapsed():
+ logError(a.flushOnce(output, ticker, output.Write))
default:
- logError(a.flushOnce(output, interval, output.WriteBatch))
+ logError(a.flushOnce(output, ticker, output.WriteBatch))
}
- case <-ctx.Done():
- logError(a.flushOnce(output, interval, output.Write))
- return
}
}
}
@@ -553,12 +839,9 @@ func (a *Agent) flush(
// interval it fails to complete before.
func (a *Agent) flushOnce(
output *models.RunningOutput,
- timeout time.Duration,
+ ticker Ticker,
writeFunc func() error,
) error {
- ticker := time.NewTicker(timeout)
- defer ticker.Stop()
-
done := make(chan error)
go func() {
done <- writeFunc()
@@ -569,95 +852,269 @@ func (a *Agent) flushOnce(
case err := <-done:
output.LogBufferStatus()
return err
- case <-ticker.C:
- log.Printf("W! [agent] output %q did not complete within its flush interval",
- output.Name)
+ case <-ticker.Elapsed():
+ log.Printf("W! [agent] [%q] did not complete within its flush interval",
+ output.LogName())
output.LogBufferStatus()
}
}
+}
+
+// Test runs the inputs, processors and aggregators for a single gather and
+// writes the metrics to stdout.
+func (a *Agent) Test(ctx context.Context, wait time.Duration) error {
+ src := make(chan telegraf.Metric, 100)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ s := influx.NewSerializer()
+ s.SetFieldSortOrder(influx.SortFields)
+
+ for metric := range src {
+ octets, err := s.Serialize(metric)
+ if err == nil {
+ fmt.Print("> ", string(octets))
+ }
+ metric.Reject()
+ }
+ }()
+
+ err := a.test(ctx, wait, src)
+ if err != nil {
+ return err
+ }
+ wg.Wait()
+
+ if models.GlobalGatherErrors.Get() != 0 {
+ return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get())
+ }
+ return nil
}
-// connectOutputs connects to all outputs.
-func (a *Agent) connectOutputs(ctx context.Context) error {
- for _, output := range a.Config.Outputs {
- log.Printf("D! [agent] Attempting connection to output: %s\n", output.Name)
- err := output.Output.Connect()
- if err != nil {
- log.Printf("E! [agent] Failed to connect to output %s, retrying in 15s, "+
- "error was '%s' \n", output.Name, err)
+// Test runs the agent and performs a single gather sending output to the
+// outputF. After gathering pauses for the wait duration to allow service
+// inputs to run.
+func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- telegraf.Metric) error {
+ log.Printf("D! [agent] Initializing plugins")
+ err := a.initPlugins()
+ if err != nil {
+ return err
+ }
+
+ startTime := time.Now()
- err := internal.SleepContext(ctx, 15*time.Second)
+ next := outputC
+
+ var apu []*processorUnit
+ var au *aggregatorUnit
+ if len(a.Config.Aggregators) != 0 {
+ procC := next
+ if len(a.Config.AggProcessors) != 0 {
+ procC, apu, err = a.startProcessors(next, a.Config.AggProcessors)
if err != nil {
return err
}
+ }
+
+ next, au, err = a.startAggregators(procC, next, a.Config.Aggregators)
+ if err != nil {
+ return err
+ }
+ }
+
+ var pu []*processorUnit
+ if len(a.Config.Processors) != 0 {
+ next, pu, err = a.startProcessors(next, a.Config.Processors)
+ if err != nil {
+ return err
+ }
+ }
+
+ iu, err := a.testStartInputs(next, a.Config.Inputs)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+
+ if au != nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.runProcessors(apu)
+ if err != nil {
+ log.Printf("E! [agent] Error running processors: %v", err)
+ }
+ }()
- err = output.Output.Connect()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.runAggregators(startTime, au)
if err != nil {
- return err
+ log.Printf("E! [agent] Error running aggregators: %v", err)
}
- }
- log.Printf("D! [agent] Successfully connected to output: %s\n", output.Name)
+ }()
}
+
+ if pu != nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.runProcessors(pu)
+ if err != nil {
+ log.Printf("E! [agent] Error running processors: %v", err)
+ }
+ }()
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.testRunInputs(ctx, wait, iu)
+ if err != nil {
+ log.Printf("E! [agent] Error running inputs: %v", err)
+ }
+ }()
+
+ wg.Wait()
+
+ log.Printf("D! [agent] Stopped Successfully")
+
return nil
}
-// closeOutputs closes all outputs.
-func (a *Agent) closeOutputs() {
+// Once runs the full agent for a single gather.
+func (a *Agent) Once(ctx context.Context, wait time.Duration) error {
+ err := a.once(ctx, wait)
+ if err != nil {
+ return err
+ }
+
+ if models.GlobalGatherErrors.Get() != 0 {
+ return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get())
+ }
+
+ unsent := 0
for _, output := range a.Config.Outputs {
- output.Close()
+ unsent += output.BufferLength()
}
+ if unsent != 0 {
+ return fmt.Errorf("output plugins unable to send %d metrics", unsent)
+ }
+ return nil
}
-// startServiceInputs starts all service inputs.
-func (a *Agent) startServiceInputs(
- ctx context.Context,
- dst chan<- telegraf.Metric,
-) error {
- started := []telegraf.ServiceInput{}
-
- for _, input := range a.Config.Inputs {
- if si, ok := input.Input.(telegraf.ServiceInput); ok {
- // Service input plugins are not subject to timestamp rounding.
- // This only applies to the accumulator passed to Start(), the
- // Gather() accumulator does apply rounding according to the
- // precision agent setting.
- acc := NewAccumulator(input, dst)
- acc.SetPrecision(time.Nanosecond)
+// On runs the agent and performs a single gather sending output to the
+// outputF. After gathering pauses for the wait duration to allow service
+// inputs to run.
+func (a *Agent) once(ctx context.Context, wait time.Duration) error {
+ log.Printf("D! [agent] Initializing plugins")
+ err := a.initPlugins()
+ if err != nil {
+ return err
+ }
- err := si.Start(acc)
- if err != nil {
- log.Printf("E! [agent] Service for input %s failed to start: %v",
- input.Name(), err)
+ startTime := time.Now()
- for _, si := range started {
- si.Stop()
- }
+ log.Printf("D! [agent] Connecting outputs")
+ next, ou, err := a.startOutputs(ctx, a.Config.Outputs)
+ if err != nil {
+ return err
+ }
+ var apu []*processorUnit
+ var au *aggregatorUnit
+ if len(a.Config.Aggregators) != 0 {
+ procC := next
+ if len(a.Config.AggProcessors) != 0 {
+ procC, apu, err = a.startProcessors(next, a.Config.AggProcessors)
+ if err != nil {
return err
}
+ }
- started = append(started, si)
+ next, au, err = a.startAggregators(procC, next, a.Config.Aggregators)
+ if err != nil {
+ return err
}
}
- return nil
-}
+ var pu []*processorUnit
+ if len(a.Config.Processors) != 0 {
+ next, pu, err = a.startProcessors(next, a.Config.Processors)
+ if err != nil {
+ return err
+ }
+ }
-// stopServiceInputs stops all service inputs.
-func (a *Agent) stopServiceInputs() {
- for _, input := range a.Config.Inputs {
- if si, ok := input.Input.(telegraf.ServiceInput); ok {
- si.Stop()
+ iu, err := a.testStartInputs(next, a.Config.Inputs)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.runOutputs(ou)
+ if err != nil {
+ log.Printf("E! [agent] Error running outputs: %v", err)
}
+ }()
+
+ if au != nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.runProcessors(apu)
+ if err != nil {
+ log.Printf("E! [agent] Error running processors: %v", err)
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.runAggregators(startTime, au)
+ if err != nil {
+ log.Printf("E! [agent] Error running aggregators: %v", err)
+ }
+ }()
+ }
+
+ if pu != nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.runProcessors(pu)
+ if err != nil {
+ log.Printf("E! [agent] Error running processors: %v", err)
+ }
+ }()
}
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := a.testRunInputs(ctx, wait, iu)
+ if err != nil {
+ log.Printf("E! [agent] Error running inputs: %v", err)
+ }
+ }()
+
+ wg.Wait()
+
+ log.Printf("D! [agent] Stopped Successfully")
+
+ return nil
}
// Returns the rounding precision for metrics.
-func (a *Agent) Precision() time.Duration {
- precision := a.Config.Agent.Precision.Duration
- interval := a.Config.Agent.Interval.Duration
-
+func getPrecision(precision, interval time.Duration) time.Duration {
if precision > 0 {
return precision
}
@@ -679,8 +1136,8 @@ func panicRecover(input *models.RunningInput) {
if err := recover(); err != nil {
trace := make([]byte, 2048)
runtime.Stack(trace, true)
- log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
- input.Name(), err, trace)
+ log.Printf("E! FATAL: [%s] panicked: %s, Stack:\n%s",
+ input.LogName(), err, trace)
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
"stack trace, configuration, and OS information: " +
"https://github.com/influxdata/telegraf/issues/new/choose")
diff --git a/agent/agent_posix.go b/agent/agent_posix.go
new file mode 100644
index 0000000000000..09552cac07026
--- /dev/null
+++ b/agent/agent_posix.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package agent
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+const flushSignal = syscall.SIGUSR1
+
+func watchForFlushSignal(flushRequested chan os.Signal) {
+ signal.Notify(flushRequested, flushSignal)
+}
+
+func stopListeningForFlushSignal(flushRequested chan os.Signal) {
+ defer signal.Stop(flushRequested)
+}
diff --git a/agent/agent_test.go b/agent/agent_test.go
index c822a236b3084..9cc631b17c465 100644
--- a/agent/agent_test.go
+++ b/agent/agent_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"time"
- "github.com/influxdata/telegraf/internal/config"
+ "github.com/influxdata/telegraf/config"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
"github.com/stretchr/testify/assert"
@@ -22,35 +22,35 @@ func TestAgent_OmitHostname(t *testing.T) {
func TestAgent_LoadPlugin(t *testing.T) {
c := config.NewConfig()
c.InputFilters = []string{"mysql"}
- err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err := c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ := NewAgent(c)
assert.Equal(t, 1, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"foo"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 0, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "foo"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 1, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "redis"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Inputs))
@@ -59,42 +59,42 @@ func TestAgent_LoadPlugin(t *testing.T) {
func TestAgent_LoadOutput(t *testing.T) {
c := config.NewConfig()
c.OutputFilters = []string{"influxdb"}
- err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err := c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ := NewAgent(c)
assert.Equal(t, 2, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"kafka"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 1, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 3, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"foo"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 0, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "foo"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "kafka"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
assert.Equal(t, 3, len(c.Outputs))
a, _ = NewAgent(c)
@@ -102,7 +102,7 @@ func TestAgent_LoadOutput(t *testing.T) {
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
- err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
+ err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 3, len(a.Config.Outputs))
diff --git a/agent/agent_windows.go b/agent/agent_windows.go
new file mode 100644
index 0000000000000..94ed9d006acb2
--- /dev/null
+++ b/agent/agent_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package agent
+
+import "os"
+
+func watchForFlushSignal(flushRequested chan os.Signal) {
+ // not supported
+}
+
+func stopListeningForFlushSignal(flushRequested chan os.Signal) {
+ // not supported
+}
diff --git a/agent/tick.go b/agent/tick.go
index 64dbff50b6275..91b99712a73b4 100644
--- a/agent/tick.go
+++ b/agent/tick.go
@@ -5,53 +5,271 @@ import (
"sync"
"time"
+ "github.com/benbjohnson/clock"
"github.com/influxdata/telegraf/internal"
)
-type Ticker struct {
- C chan time.Time
- ticker *time.Ticker
- jitter time.Duration
- wg sync.WaitGroup
- cancelFunc context.CancelFunc
+type empty struct{}
+
+type Ticker interface {
+ Elapsed() <-chan time.Time
+ Stop()
}
-func NewTicker(
- interval time.Duration,
- jitter time.Duration,
-) *Ticker {
- ctx, cancel := context.WithCancel(context.Background())
+// AlignedTicker delivers ticks at aligned times plus an optional jitter. Each
+// tick is realigned to avoid drift and handle changes to the system clock.
+//
+// The ticks may have an jitter duration applied to them as an random offset to
+// the interval. However the overall pace of is that of the interval, so on
+// average you will have one collection each interval.
+//
+// The first tick is emitted at the next alignment.
+//
+// Ticks are dropped for slow consumers.
+//
+// The implementation currently does not recalculate until the next tick with
+// no maximum sleep, when using large intervals alignment is not corrected
+// until the next tick.
+type AlignedTicker struct {
+ interval time.Duration
+ jitter time.Duration
+ minInterval time.Duration
+ ch chan time.Time
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+}
- t := &Ticker{
- C: make(chan time.Time, 1),
- ticker: time.NewTicker(interval),
- jitter: jitter,
- cancelFunc: cancel,
+func NewAlignedTicker(now time.Time, interval, jitter time.Duration) *AlignedTicker {
+ return newAlignedTicker(now, interval, jitter, clock.New())
+}
+
+func newAlignedTicker(now time.Time, interval, jitter time.Duration, clock clock.Clock) *AlignedTicker {
+ ctx, cancel := context.WithCancel(context.Background())
+ t := &AlignedTicker{
+ interval: interval,
+ jitter: jitter,
+ minInterval: interval / 100,
+ ch: make(chan time.Time, 1),
+ cancel: cancel,
}
+ d := t.next(now)
+ timer := clock.Timer(d)
+
t.wg.Add(1)
- go t.relayTime(ctx)
+ go func() {
+ defer t.wg.Done()
+ t.run(ctx, timer)
+ }()
return t
}
-func (t *Ticker) Stop() {
- t.cancelFunc()
+func (t *AlignedTicker) next(now time.Time) time.Duration {
+ // Add minimum interval size to avoid scheduling an interval that is
+ // exceptionally short. This avoids an issue that can occur where the
+ // previous interval ends slightly early due to very minor clock changes.
+ next := now.Add(t.minInterval)
+
+ next = internal.AlignTime(next, t.interval)
+ d := next.Sub(now)
+ if d == 0 {
+ d = t.interval
+ }
+ d += internal.RandomDuration(t.jitter)
+ return d
+}
+
+func (t *AlignedTicker) run(ctx context.Context, timer *clock.Timer) {
+ for {
+ select {
+ case <-ctx.Done():
+ timer.Stop()
+ return
+ case now := <-timer.C:
+ select {
+ case t.ch <- now:
+ default:
+ }
+
+ d := t.next(now)
+ timer.Reset(d)
+ }
+ }
+}
+
+func (t *AlignedTicker) Elapsed() <-chan time.Time {
+ return t.ch
+}
+
+func (t *AlignedTicker) Stop() {
+ t.cancel()
t.wg.Wait()
}
-func (t *Ticker) relayTime(ctx context.Context) {
- defer t.wg.Done()
+// UnalignedTicker delivers ticks at regular but unaligned intervals. No
+// effort is made to avoid drift.
+//
+// The ticks may have an jitter duration applied to them as an random offset to
+// the interval. However the overall pace of is that of the interval, so on
+// average you will have one collection each interval.
+//
+// The first tick is emitted immediately.
+//
+// Ticks are dropped for slow consumers.
+type UnalignedTicker struct {
+ interval time.Duration
+ jitter time.Duration
+ ch chan time.Time
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+}
+
+func NewUnalignedTicker(interval, jitter time.Duration) *UnalignedTicker {
+ return newUnalignedTicker(interval, jitter, clock.New())
+}
+
+func newUnalignedTicker(interval, jitter time.Duration, clock clock.Clock) *UnalignedTicker {
+ ctx, cancel := context.WithCancel(context.Background())
+ t := &UnalignedTicker{
+ interval: interval,
+ jitter: jitter,
+ ch: make(chan time.Time, 1),
+ cancel: cancel,
+ }
+
+ ticker := clock.Ticker(t.interval)
+ t.ch <- clock.Now()
+
+ t.wg.Add(1)
+ go func() {
+ defer t.wg.Done()
+ t.run(ctx, ticker, clock)
+ }()
+
+ return t
+}
+
+func sleep(ctx context.Context, duration time.Duration, clock clock.Clock) error {
+ if duration == 0 {
+ return nil
+ }
+
+ t := clock.Timer(duration)
+ select {
+ case <-t.C:
+ return nil
+ case <-ctx.Done():
+ t.Stop()
+ return ctx.Err()
+ }
+}
+
+func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clock clock.Clock) {
for {
select {
- case tm := <-t.ticker.C:
- internal.SleepContext(ctx, internal.RandomDuration(t.jitter))
+ case <-ctx.Done():
+ ticker.Stop()
+ return
+ case <-ticker.C:
+ jitter := internal.RandomDuration(t.jitter)
+ err := sleep(ctx, jitter, clock)
+ if err != nil {
+ ticker.Stop()
+ return
+ }
select {
- case t.C <- tm:
+ case t.ch <- clock.Now():
default:
}
+ }
+ }
+}
+
+func (t *UnalignedTicker) InjectTick() {
+ t.ch <- time.Now()
+}
+
+func (t *UnalignedTicker) Elapsed() <-chan time.Time {
+ return t.ch
+}
+
+func (t *UnalignedTicker) Stop() {
+ t.cancel()
+ t.wg.Wait()
+}
+
+// RollingTicker delivers ticks at regular but unaligned intervals.
+//
+// Because the next interval is scheduled based on the interval + jitter, you
+// are guaranteed at least interval seconds without missing a tick and ticks
+// will be evenly scheduled over time.
+//
+// On average you will have one collection each interval + (jitter/2).
+//
+// The first tick is emitted after interval+jitter seconds.
+//
+// Ticks are dropped for slow consumers.
+type RollingTicker struct {
+ interval time.Duration
+ jitter time.Duration
+ ch chan time.Time
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+}
+
+func NewRollingTicker(interval, jitter time.Duration) *RollingTicker {
+ return newRollingTicker(interval, jitter, clock.New())
+}
+
+func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *RollingTicker {
+ ctx, cancel := context.WithCancel(context.Background())
+ t := &RollingTicker{
+ interval: interval,
+ jitter: jitter,
+ ch: make(chan time.Time, 1),
+ cancel: cancel,
+ }
+
+ d := t.next()
+ timer := clock.Timer(d)
+
+ t.wg.Add(1)
+ go func() {
+ defer t.wg.Done()
+ t.run(ctx, timer)
+ }()
+
+ return t
+}
+
+func (t *RollingTicker) next() time.Duration {
+ return t.interval + internal.RandomDuration(t.jitter)
+}
+
+func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) {
+ for {
+ select {
case <-ctx.Done():
+ timer.Stop()
return
+ case now := <-timer.C:
+ select {
+ case t.ch <- now:
+ default:
+ }
+
+ d := t.next()
+ timer.Reset(d)
}
}
}
+
+func (t *RollingTicker) Elapsed() <-chan time.Time {
+ return t.ch
+}
+
+func (t *RollingTicker) Stop() {
+ t.cancel()
+ t.wg.Wait()
+}
diff --git a/agent/tick_test.go b/agent/tick_test.go
new file mode 100644
index 0000000000000..5b8db7e93d4c6
--- /dev/null
+++ b/agent/tick_test.go
@@ -0,0 +1,262 @@
+package agent
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ "github.com/stretchr/testify/require"
+)
+
+var format = "2006-01-02T15:04:05.999Z07:00"
+
+func TestAlignedTicker(t *testing.T) {
+ interval := 10 * time.Second
+ jitter := 0 * time.Second
+
+ clock := clock.NewMock()
+ since := clock.Now()
+ until := since.Add(60 * time.Second)
+
+ ticker := newAlignedTicker(since, interval, jitter, clock)
+ defer ticker.Stop()
+
+ expected := []time.Time{
+ time.Unix(10, 0).UTC(),
+ time.Unix(20, 0).UTC(),
+ time.Unix(30, 0).UTC(),
+ time.Unix(40, 0).UTC(),
+ time.Unix(50, 0).UTC(),
+ time.Unix(60, 0).UTC(),
+ }
+
+ actual := []time.Time{}
+
+ clock.Add(10 * time.Second)
+ for !clock.Now().After(until) {
+ select {
+ case tm := <-ticker.Elapsed():
+ actual = append(actual, tm.UTC())
+ }
+ clock.Add(10 * time.Second)
+ }
+
+ require.Equal(t, expected, actual)
+}
+
+func TestAlignedTickerJitter(t *testing.T) {
+ interval := 10 * time.Second
+ jitter := 5 * time.Second
+
+ clock := clock.NewMock()
+ since := clock.Now()
+ until := since.Add(61 * time.Second)
+
+ ticker := newAlignedTicker(since, interval, jitter, clock)
+ defer ticker.Stop()
+
+ last := since
+ for !clock.Now().After(until) {
+ select {
+ case tm := <-ticker.Elapsed():
+ dur := tm.Sub(last)
+ // 10s interval + 5s jitter + up to 1s late firing.
+ require.True(t, dur <= 16*time.Second, "expected elapsed time to be less than 16 seconds, but was %s", dur)
+ require.True(t, dur >= 5*time.Second, "expected elapsed time to be more than 5 seconds, but was %s", dur)
+ last = last.Add(interval)
+ default:
+ }
+ clock.Add(1 * time.Second)
+ }
+}
+
+func TestAlignedTickerMissedTick(t *testing.T) {
+ interval := 10 * time.Second
+ jitter := 0 * time.Second
+
+ clock := clock.NewMock()
+ since := clock.Now()
+
+ ticker := newAlignedTicker(since, interval, jitter, clock)
+ defer ticker.Stop()
+
+ clock.Add(25 * time.Second)
+ tm := <-ticker.Elapsed()
+ require.Equal(t, time.Unix(10, 0).UTC(), tm.UTC())
+ clock.Add(5 * time.Second)
+ tm = <-ticker.Elapsed()
+ require.Equal(t, time.Unix(30, 0).UTC(), tm.UTC())
+}
+
+func TestUnalignedTicker(t *testing.T) {
+ interval := 10 * time.Second
+ jitter := 0 * time.Second
+
+ clock := clock.NewMock()
+ clock.Add(1 * time.Second)
+ since := clock.Now()
+ until := since.Add(60 * time.Second)
+
+ ticker := newUnalignedTicker(interval, jitter, clock)
+ defer ticker.Stop()
+
+ expected := []time.Time{
+ time.Unix(1, 0).UTC(),
+ time.Unix(11, 0).UTC(),
+ time.Unix(21, 0).UTC(),
+ time.Unix(31, 0).UTC(),
+ time.Unix(41, 0).UTC(),
+ time.Unix(51, 0).UTC(),
+ time.Unix(61, 0).UTC(),
+ }
+
+ actual := []time.Time{}
+ for !clock.Now().After(until) {
+ select {
+ case tm := <-ticker.Elapsed():
+ actual = append(actual, tm.UTC())
+ default:
+ }
+ clock.Add(10 * time.Second)
+ }
+
+ require.Equal(t, expected, actual)
+}
+
+func TestRollingTicker(t *testing.T) {
+ interval := 10 * time.Second
+ jitter := 0 * time.Second
+
+ clock := clock.NewMock()
+ clock.Add(1 * time.Second)
+ since := clock.Now()
+ until := since.Add(60 * time.Second)
+
+ ticker := newUnalignedTicker(interval, jitter, clock)
+ defer ticker.Stop()
+
+ expected := []time.Time{
+ time.Unix(1, 0).UTC(),
+ time.Unix(11, 0).UTC(),
+ time.Unix(21, 0).UTC(),
+ time.Unix(31, 0).UTC(),
+ time.Unix(41, 0).UTC(),
+ time.Unix(51, 0).UTC(),
+ time.Unix(61, 0).UTC(),
+ }
+
+ actual := []time.Time{}
+ for !clock.Now().After(until) {
+ select {
+ case tm := <-ticker.Elapsed():
+ actual = append(actual, tm.UTC())
+ default:
+ }
+ clock.Add(10 * time.Second)
+ }
+
+ require.Equal(t, expected, actual)
+}
+
+// Simulates running the Ticker for an hour and displays stats about the
+// operation.
+func TestAlignedTickerDistribution(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ interval := 10 * time.Second
+ jitter := 5 * time.Second
+
+ clock := clock.NewMock()
+ since := clock.Now()
+
+ ticker := newAlignedTicker(since, interval, jitter, clock)
+ defer ticker.Stop()
+ dist := simulatedDist(ticker, clock)
+ printDist(dist)
+ require.True(t, 350 < dist.Count)
+ require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
+}
+
+// Simulates running the Ticker for an hour and displays stats about the
+// operation.
+func TestUnalignedTickerDistribution(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ interval := 10 * time.Second
+ jitter := 5 * time.Second
+
+ clock := clock.NewMock()
+
+ ticker := newUnalignedTicker(interval, jitter, clock)
+ defer ticker.Stop()
+ dist := simulatedDist(ticker, clock)
+ printDist(dist)
+ require.True(t, 350 < dist.Count)
+ require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
+}
+
+// Simulates running the Ticker for an hour and displays stats about the
+// operation.
+func TestRollingTickerDistribution(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ interval := 10 * time.Second
+ jitter := 5 * time.Second
+
+ clock := clock.NewMock()
+
+ ticker := newRollingTicker(interval, jitter, clock)
+ defer ticker.Stop()
+ dist := simulatedDist(ticker, clock)
+ printDist(dist)
+ require.True(t, 275 < dist.Count)
+ require.True(t, 12 < dist.Mean() && 13 > dist.Mean())
+}
+
+type Distribution struct {
+ Buckets [60]int
+ Count int
+ Waittime float64
+}
+
+func (d *Distribution) Mean() float64 {
+ return d.Waittime / float64(d.Count)
+}
+
+func printDist(dist Distribution) {
+ for i, count := range dist.Buckets {
+ fmt.Printf("%2d %s\n", i, strings.Repeat("x", count))
+ }
+ fmt.Printf("Average interval: %f\n", dist.Mean())
+ fmt.Printf("Count: %d\n", dist.Count)
+}
+
+func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution {
+ since := clock.Now()
+ until := since.Add(1 * time.Hour)
+
+ var dist Distribution
+
+ last := clock.Now()
+ for !clock.Now().After(until) {
+ select {
+ case tm := <-ticker.Elapsed():
+ dist.Buckets[tm.Second()] += 1
+ dist.Count++
+ dist.Waittime += tm.Sub(last).Seconds()
+ last = tm
+ default:
+ clock.Add(1 * time.Second)
+ }
+ }
+
+ return dist
+}
diff --git a/aggregator.go b/aggregator.go
index 48aa8e4bf48ae..f168b04d020be 100644
--- a/aggregator.go
+++ b/aggregator.go
@@ -5,11 +5,7 @@ package telegraf
// Add, Push, and Reset can not be called concurrently, so locking is not
// required when implementing an Aggregator plugin.
type Aggregator interface {
- // SampleConfig returns the default configuration of the Input.
- SampleConfig() string
-
- // Description returns a one-sentence description on the Input.
- Description() string
+ PluginDescriber
// Add the metric to the aggregator.
Add(in Metric)
diff --git a/appveyor.yml b/appveyor.yml
index 39ec04425f0be..b454c8dc8d9dd 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -1,40 +1,35 @@
version: "{build}"
+image: Visual Studio 2019
+
cache:
- - C:\Cache
- - C:\gopath\pkg\dep\sources -> Gopkg.lock
+ - C:\gopath\pkg\mod -> go.sum
+ - C:\ProgramData\chocolatey\bin -> appveyor.yml
+ - C:\ProgramData\chocolatey\lib -> appveyor.yml
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
environment:
GOPATH: C:\gopath
+stack: go 1.14
+
platform: x64
install:
- - IF NOT EXIST "C:\Cache" mkdir C:\Cache
- - IF NOT EXIST "C:\Cache\go1.11.5.msi" curl -o "C:\Cache\go1.11.5.msi" https://storage.googleapis.com/golang/go1.11.5.windows-amd64.msi
- - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- - IF EXIST "C:\Go" rmdir /S /Q C:\Go
- - msiexec.exe /i "C:\Cache\go1.11.5.msi" /quiet
- - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- - go get -d github.com/golang/dep
- - cd "%GOPATH%\src\github.com\golang\dep"
- - git checkout -q v0.5.0
- - go install -ldflags="-X main.version=v0.5.0" ./cmd/dep
+ - choco install make
- cd "%GOPATH%\src\github.com\influxdata\telegraf"
- git config --system core.longpaths true
- go version
- go env
build_script:
- - cmd: C:\GnuWin32\bin\make
+ - make deps
+ - make telegraf
test_script:
- - cmd: C:\GnuWin32\bin\make check
- - cmd: C:\GnuWin32\bin\make test-windows
+ - make check
+ - make test-windows
artifacts:
- path: telegraf.exe
diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go
index 5dd29cef74bf6..7e0b4ec1ca67a 100644
--- a/cmd/telegraf/telegraf.go
+++ b/cmd/telegraf/telegraf.go
@@ -10,13 +10,15 @@ import (
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
"os"
"os/signal"
- "runtime"
+ "sort"
"strings"
"syscall"
+ "time"
"github.com/influxdata/telegraf/agent"
+ "github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/config"
+ "github.com/influxdata/telegraf/internal/goplugin"
"github.com/influxdata/telegraf/logger"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -24,16 +26,17 @@ import (
"github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
_ "github.com/influxdata/telegraf/plugins/processors/all"
- "github.com/kardianos/service"
)
+// If you update these, update usage.go and usage_windows.go
var fDebug = flag.Bool("debug", false,
"turn on debug logging")
var pprofAddr = flag.String("pprof-addr", "",
"pprof address to listen on, not activate pprof if empty")
var fQuiet = flag.Bool("quiet", false,
"run in quiet mode")
-var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
+var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit. Note: Test mode only runs inputs, not processors, aggregators, or outputs")
+var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode")
var fConfig = flag.String("config", "", "configuration file to load")
var fConfigDirectory = flag.String("config-directory", "",
"directory containing additional *.conf files")
@@ -41,6 +44,8 @@ var fVersion = flag.Bool("version", false, "display the version and exit")
var fSampleConfig = flag.Bool("sample-config", false,
"print out full sample configuration")
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
+var fSectionFilters = flag.String("section-filter", "",
+ "filter the sections to print, separator is ':'. Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs'")
var fInputFilters = flag.String("input-filter", "",
"filter the inputs to enable, separator is :")
var fInputList = flag.Bool("input-list", false,
@@ -58,7 +63,11 @@ var fUsage = flag.String("usage", "",
var fService = flag.String("service", "",
"operate on the service (windows only)")
var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)")
+var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)")
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
+var fPlugins = flag.String("plugin-directory", "",
+ "path to directory containing external plugins")
+var fRunOnce = flag.Bool("once", false, "run one gather and exit")
var (
version string
@@ -69,7 +78,6 @@ var (
var stop chan struct{}
func reloadLoop(
- stop chan struct{},
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
@@ -82,7 +90,7 @@ func reloadLoop(
ctx, cancel := context.WithCancel(context.Background())
- signals := make(chan os.Signal)
+ signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt, syscall.SIGHUP,
syscall.SIGTERM, syscall.SIGINT)
go func() {
@@ -100,7 +108,7 @@ func reloadLoop(
}()
err := runAgent(ctx, inputFilters, outputFilters)
- if err != nil {
+ if err != nil && err != context.Canceled {
log.Fatalf("E! [telegraf] Error running agent: %v", err)
}
}
@@ -110,9 +118,6 @@ func runAgent(ctx context.Context,
inputFilters []string,
outputFilters []string,
) error {
- // Setup default logging. This may need to change after reading the config
- // file, but we can configure it to use our logger implementation now.
- logger.SetupLogging(false, false, "")
log.Printf("I! Starting Telegraf %s", version)
// If no other options are specified, load the config file and run.
@@ -133,7 +138,7 @@ func runAgent(ctx context.Context,
if !*fTest && len(c.Outputs) == 0 {
return errors.New("Error: no outputs found, did you provide a valid config file?")
}
- if len(c.Inputs) == 0 {
+ if *fPlugins == "" && len(c.Inputs) == 0 {
return errors.New("Error: no inputs found, did you provide a valid config file?")
}
@@ -153,14 +158,26 @@ func runAgent(ctx context.Context,
}
// Setup logging as configured.
- logger.SetupLogging(
- ag.Config.Agent.Debug || *fDebug,
- ag.Config.Agent.Quiet || *fQuiet,
- ag.Config.Agent.Logfile,
- )
+ logConfig := logger.LogConfig{
+ Debug: ag.Config.Agent.Debug || *fDebug,
+ Quiet: ag.Config.Agent.Quiet || *fQuiet,
+ LogTarget: ag.Config.Agent.LogTarget,
+ Logfile: ag.Config.Agent.Logfile,
+ RotationInterval: ag.Config.Agent.LogfileRotationInterval,
+ RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize,
+ RotationMaxArchives: ag.Config.Agent.LogfileRotationMaxArchives,
+ }
+
+ logger.SetupLogging(logConfig)
+
+ if *fRunOnce {
+ wait := time.Duration(*fTestWait) * time.Second
+ return ag.Once(ctx, wait)
+ }
- if *fTest {
- return ag.Test(ctx)
+ if *fTest || *fTestWait != 0 {
+ wait := time.Duration(*fTestWait) * time.Second
+ return ag.Test(ctx, wait)
}
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
@@ -195,32 +212,6 @@ func usageExit(rc int) {
os.Exit(rc)
}
-type program struct {
- inputFilters []string
- outputFilters []string
- aggregatorFilters []string
- processorFilters []string
-}
-
-func (p *program) Start(s service.Service) error {
- go p.run()
- return nil
-}
-func (p *program) run() {
- stop = make(chan struct{})
- reloadLoop(
- stop,
- p.inputFilters,
- p.outputFilters,
- p.aggregatorFilters,
- p.processorFilters,
- )
-}
-func (p *program) Stop(s service.Service) error {
- close(stop)
- return nil
-}
-
func formatFullVersion() string {
var parts = []string{"Telegraf"}
@@ -249,7 +240,10 @@ func main() {
flag.Parse()
args := flag.Args()
- inputFilters, outputFilters := []string{}, []string{}
+ sectionFilters, inputFilters, outputFilters := []string{}, []string{}, []string{}
+ if *fSectionFilters != "" {
+ sectionFilters = strings.Split(":"+strings.TrimSpace(*fSectionFilters)+":", ":")
+ }
if *fInputFilters != "" {
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
}
@@ -265,6 +259,16 @@ func main() {
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
}
+ logger.SetupLogging(logger.LogConfig{})
+
+ // Load external plugins, if requested.
+ if *fPlugins != "" {
+ log.Printf("I! Loading external plugins from: %s", *fPlugins)
+ if err := goplugin.LoadExternalPlugins(*fPlugins); err != nil {
+ log.Fatal("E! " + err.Error())
+ }
+ }
+
if *pprofAddr != "" {
go func() {
pprofHostPort := *pprofAddr
@@ -289,6 +293,7 @@ func main() {
return
case "config":
config.PrintSampleConfig(
+ sectionFilters,
inputFilters,
outputFilters,
aggregatorFilters,
@@ -301,14 +306,24 @@ func main() {
// switch for flags which just do something and exit immediately
switch {
case *fOutputList:
- fmt.Println("Available Output Plugins:")
+ fmt.Println("Available Output Plugins: ")
+ names := make([]string, 0, len(outputs.Outputs))
for k := range outputs.Outputs {
+ names = append(names, k)
+ }
+ sort.Strings(names)
+ for _, k := range names {
fmt.Printf(" %s\n", k)
}
return
case *fInputList:
fmt.Println("Available Input Plugins:")
+ names := make([]string, 0, len(inputs.Inputs))
for k := range inputs.Inputs {
+ names = append(names, k)
+ }
+ sort.Strings(names)
+ for _, k := range names {
fmt.Printf(" %s\n", k)
}
return
@@ -317,6 +332,7 @@ func main() {
return
case *fSampleConfig:
config.PrintSampleConfig(
+ sectionFilters,
inputFilters,
outputFilters,
aggregatorFilters,
@@ -342,66 +358,10 @@ func main() {
log.Println("Telegraf version already configured to: " + internal.Version())
}
- if runtime.GOOS == "windows" && windowsRunAsService() {
- svcConfig := &service.Config{
- Name: *fServiceName,
- DisplayName: "Telegraf Data Collector Service",
- Description: "Collects data using a series of plugins and publishes it to" +
- "another series of plugins.",
- Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
- }
-
- prg := &program{
- inputFilters: inputFilters,
- outputFilters: outputFilters,
- aggregatorFilters: aggregatorFilters,
- processorFilters: processorFilters,
- }
- s, err := service.New(prg, svcConfig)
- if err != nil {
- log.Fatal("E! " + err.Error())
- }
- // Handle the --service flag here to prevent any issues with tooling that
- // may not have an interactive session, e.g. installing from Ansible.
- if *fService != "" {
- if *fConfig != "" {
- (*svcConfig).Arguments = []string{"--config", *fConfig}
- }
- if *fConfigDirectory != "" {
- (*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory)
- }
- err := service.Control(s, *fService)
- if err != nil {
- log.Fatal("E! " + err.Error())
- }
- os.Exit(0)
- } else {
- err = s.Run()
- if err != nil {
- log.Println("E! " + err.Error())
- }
- }
- } else {
- stop = make(chan struct{})
- reloadLoop(
- stop,
- inputFilters,
- outputFilters,
- aggregatorFilters,
- processorFilters,
- )
- }
-}
-
-// Return true if Telegraf should create a Windows service.
-func windowsRunAsService() bool {
- if *fService != "" {
- return true
- }
-
- if *fRunAsConsole {
- return false
- }
-
- return !service.Interactive()
+ run(
+ inputFilters,
+ outputFilters,
+ aggregatorFilters,
+ processorFilters,
+ )
}
diff --git a/cmd/telegraf/telegraf_posix.go b/cmd/telegraf/telegraf_posix.go
new file mode 100644
index 0000000000000..ca28622f16752
--- /dev/null
+++ b/cmd/telegraf/telegraf_posix.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package main
+
+func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
+ stop = make(chan struct{})
+ reloadLoop(
+ inputFilters,
+ outputFilters,
+ aggregatorFilters,
+ processorFilters,
+ )
+}
diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go
new file mode 100644
index 0000000000000..830e6eaa4f8a0
--- /dev/null
+++ b/cmd/telegraf/telegraf_windows.go
@@ -0,0 +1,124 @@
+// +build windows
+
+package main
+
+import (
+ "log"
+ "os"
+ "runtime"
+
+ "github.com/influxdata/telegraf/logger"
+ "github.com/kardianos/service"
+)
+
+func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
+ if runtime.GOOS == "windows" && windowsRunAsService() {
+ runAsWindowsService(
+ inputFilters,
+ outputFilters,
+ aggregatorFilters,
+ processorFilters,
+ )
+ } else {
+ stop = make(chan struct{})
+ reloadLoop(
+ inputFilters,
+ outputFilters,
+ aggregatorFilters,
+ processorFilters,
+ )
+ }
+}
+
+type program struct {
+ inputFilters []string
+ outputFilters []string
+ aggregatorFilters []string
+ processorFilters []string
+}
+
+func (p *program) Start(s service.Service) error {
+ go p.run()
+ return nil
+}
+func (p *program) run() {
+ stop = make(chan struct{})
+ reloadLoop(
+ p.inputFilters,
+ p.outputFilters,
+ p.aggregatorFilters,
+ p.processorFilters,
+ )
+}
+func (p *program) Stop(s service.Service) error {
+ close(stop)
+ return nil
+}
+
+func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
+ programFiles := os.Getenv("ProgramFiles")
+ if programFiles == "" { // Should never happen
+ programFiles = "C:\\Program Files"
+ }
+ svcConfig := &service.Config{
+ Name: *fServiceName,
+ DisplayName: *fServiceDisplayName,
+ Description: "Collects data using a series of plugins and publishes it to " +
+ "another series of plugins.",
+ Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"},
+ }
+
+ prg := &program{
+ inputFilters: inputFilters,
+ outputFilters: outputFilters,
+ aggregatorFilters: aggregatorFilters,
+ processorFilters: processorFilters,
+ }
+ s, err := service.New(prg, svcConfig)
+ if err != nil {
+ log.Fatal("E! " + err.Error())
+ }
+ // Handle the --service flag here to prevent any issues with tooling that
+ // may not have an interactive session, e.g. installing from Ansible.
+ if *fService != "" {
+ if *fConfig != "" {
+ svcConfig.Arguments = []string{"--config", *fConfig}
+ }
+ if *fConfigDirectory != "" {
+ svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory)
+ }
+ //set servicename to service cmd line, to have a custom name after relaunch as a service
+ svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName)
+
+ err := service.Control(s, *fService)
+ if err != nil {
+ log.Fatal("E! " + err.Error())
+ }
+ os.Exit(0)
+ } else {
+ winlogger, err := s.Logger(nil)
+ if err == nil {
+ //When in service mode, register eventlog target andd setup default logging to eventlog
+ logger.RegisterEventLogger(winlogger)
+ logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog})
+ }
+ err = s.Run()
+
+ if err != nil {
+ log.Println("E! " + err.Error())
+ }
+ }
+}
+
+// Return true if Telegraf should create a Windows service.
+func windowsRunAsService() bool {
+ if *fService != "" {
+ return true
+ }
+
+ if *fRunAsConsole {
+ return false
+ }
+
+ return !service.Interactive()
+}
diff --git a/internal/config/aws/credentials.go b/config/aws/credentials.go
similarity index 100%
rename from internal/config/aws/credentials.go
rename to config/aws/credentials.go
diff --git a/internal/config/config.go b/config/config.go
similarity index 75%
rename from internal/config/config.go
rename to config/config.go
index a0fc45a3c8273..4fd65139e2ab9 100644
--- a/internal/config/config.go
+++ b/config/config.go
@@ -20,7 +20,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/models"
+ "github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
@@ -32,6 +32,10 @@ import (
)
var (
+ // Default sections
+ sectionDefaults = []string{"global_tags", "agent", "outputs",
+ "processors", "aggregators", "inputs"}
+
// Default input plugins
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
"processes", "disk", "diskio"}
@@ -61,22 +65,26 @@ type Config struct {
Outputs []*models.RunningOutput
Aggregators []*models.RunningAggregator
// Processors have a slice wrapper type because they need to be sorted
- Processors models.RunningProcessors
+ Processors models.RunningProcessors
+ AggProcessors models.RunningProcessors
}
func NewConfig() *Config {
c := &Config{
// Agent defaults:
Agent: &AgentConfig{
- Interval: internal.Duration{Duration: 10 * time.Second},
- RoundInterval: true,
- FlushInterval: internal.Duration{Duration: 10 * time.Second},
+ Interval: internal.Duration{Duration: 10 * time.Second},
+ RoundInterval: true,
+ FlushInterval: internal.Duration{Duration: 10 * time.Second},
+ LogTarget: "file",
+ LogfileRotationMaxArchives: 5,
},
Tags: make(map[string]string),
Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*models.RunningOutput, 0),
Processors: make([]*models.RunningProcessor, 0),
+ AggProcessors: make([]*models.RunningProcessor, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
@@ -128,59 +136,98 @@ type AgentConfig struct {
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
- FlushBufferWhenFull bool
+ FlushBufferWhenFull bool // deprecated in 0.13; has no effect
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatibility
- UTC bool `toml:"utc"`
+ UTC bool `toml:"utc"` // deprecated in 1.0.0; has no effect
// Debug is the option for running in debug mode
- Debug bool
-
- // Logfile specifies the file to send logs to
- Logfile string
+ Debug bool `toml:"debug"`
// Quiet is the option for running in quiet mode
- Quiet bool
+ Quiet bool `toml:"quiet"`
+
+ // Log target controls the destination for logs and can be one of "file",
+ // "stderr" or, on Windows, "eventlog". When set to "file", the output file
+ // is determined by the "logfile" setting.
+ LogTarget string `toml:"logtarget"`
+
+ // Name of the file to be logged to when using the "file" logtarget. If set to
+ // the empty string then logs are written to stderr.
+ Logfile string `toml:"logfile"`
+
+ // The file will be rotated after the time interval specified. When set
+ // to 0 no time based rotation is performed.
+ LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"`
+
+ // The logfile will be rotated when it becomes larger than the specified
+ // size. When set to 0 no size based rotation is performed.
+ LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"`
+
+ // Maximum number of rotated archives to keep, any older logs are deleted.
+ // If set to -1, no archives are removed.
+ LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"`
+
Hostname string
OmitHostname bool
}
-// Inputs returns a list of strings of the configured inputs.
+// InputNames returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
for _, input := range c.Inputs {
name = append(name, input.Config.Name)
}
- return name
+ return PluginNameCounts(name)
}
-// Outputs returns a list of strings of the configured aggregators.
+// AggregatorNames returns a list of strings of the configured aggregators.
func (c *Config) AggregatorNames() []string {
var name []string
for _, aggregator := range c.Aggregators {
name = append(name, aggregator.Config.Name)
}
- return name
+ return PluginNameCounts(name)
}
-// Outputs returns a list of strings of the configured processors.
+// ProcessorNames returns a list of strings of the configured processors.
func (c *Config) ProcessorNames() []string {
var name []string
for _, processor := range c.Processors {
- name = append(name, processor.Name)
+ name = append(name, processor.Config.Name)
}
- return name
+ return PluginNameCounts(name)
}
-// Outputs returns a list of strings of the configured outputs.
+// OutputNames returns a list of strings of the configured outputs.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
- name = append(name, output.Name)
+ name = append(name, output.Config.Name)
+ }
+ return PluginNameCounts(name)
+}
+
+// PluginNameCounts returns a list of sorted plugin names and their count
+func PluginNameCounts(plugins []string) []string {
+ names := make(map[string]int)
+ for _, plugin := range plugins {
+ names[plugin]++
}
- return name
+
+ var namecount []string
+ for name, count := range names {
+ if count == 1 {
+ namecount = append(namecount, name)
+ } else {
+ namecount = append(namecount, fmt.Sprintf("%s (%dx)", name, count))
+ }
+ }
+
+ sort.Strings(namecount)
+ return namecount
}
// ListTags returns a string of tags specified in the config,
@@ -212,7 +259,8 @@ var header = `# Telegraf Configuration
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
-
+`
+var globalTagsConfig = `
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
@@ -220,7 +268,8 @@ var header = `# Telegraf Configuration
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
-
+`
+var agentConfig = `
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
@@ -234,10 +283,9 @@ var header = `# Telegraf Configuration
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
- ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
- ## output, and will flush this buffer on a successful write. Oldest metrics
- ## are dropped first when this buffer fills.
- ## This buffer only fills when writes fail to output plugin(s).
+ ## Maximum number of unwritten metrics per output. Increasing this value
+ ## allows for longer periods of output downtime without dropping metrics at the
+ ## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
@@ -263,119 +311,169 @@ var header = `# Telegraf Configuration
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
- ## Logging configuration:
- ## Run telegraf with debug log messages.
- debug = false
- ## Run telegraf in quiet mode (error log messages only).
- quiet = false
- ## Specify the log file name. The empty string means to log to stderr.
- logfile = ""
+ ## Log at debug level.
+ # debug = false
+ ## Log only error level messages.
+ # quiet = false
+
+ ## Log target controls the destination for logs and can be one of "file",
+ ## "stderr" or, on Windows, "eventlog". When set to "file", the output file
+ ## is determined by the "logfile" setting.
+ # logtarget = "file"
+
+ ## Name of the file to be logged to when using the "file" logtarget. If set to
+ ## the empty string then logs are written to stderr.
+ # logfile = ""
+
+ ## The logfile will be rotated after the time interval specified. When set
+ ## to 0 no time based rotation is performed. Logs are rotated only when
+ ## written to, if there is no log activity rotation may be delayed.
+ # logfile_rotation_interval = "0d"
+
+ ## The logfile will be rotated when it becomes larger than the specified
+ ## size. When set to 0 no size based rotation is performed.
+ # logfile_rotation_max_size = "0MB"
+
+ ## Maximum number of rotated archives to keep, any older logs are deleted.
+ ## If set to -1, no archives are removed.
+ # logfile_rotation_max_archives = 5
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
+`
+var outputHeader = `
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
+
`
var processorHeader = `
-
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
+
`
var aggregatorHeader = `
-
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
+
`
var inputHeader = `
-
###############################################################################
# INPUT PLUGINS #
###############################################################################
+
`
var serviceInputHeader = `
-
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
+
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(
+ sectionFilters []string,
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
+ // print headers
fmt.Printf(header)
+ if len(sectionFilters) == 0 {
+ sectionFilters = sectionDefaults
+ }
+ printFilteredGlobalSections(sectionFilters)
+
// print output plugins
- if len(outputFilters) != 0 {
- printFilteredOutputs(outputFilters, false)
- } else {
- printFilteredOutputs(outputDefaults, false)
- // Print non-default outputs, commented
- var pnames []string
- for pname := range outputs.Outputs {
- if !sliceContains(pname, outputDefaults) {
- pnames = append(pnames, pname)
+ if sliceContains("outputs", sectionFilters) {
+ if len(outputFilters) != 0 {
+ if len(outputFilters) >= 3 && outputFilters[1] != "none" {
+ fmt.Printf(outputHeader)
}
+ printFilteredOutputs(outputFilters, false)
+ } else {
+ fmt.Printf(outputHeader)
+ printFilteredOutputs(outputDefaults, false)
+ // Print non-default outputs, commented
+ var pnames []string
+ for pname := range outputs.Outputs {
+ if !sliceContains(pname, outputDefaults) {
+ pnames = append(pnames, pname)
+ }
+ }
+ sort.Strings(pnames)
+ printFilteredOutputs(pnames, true)
}
- sort.Strings(pnames)
- printFilteredOutputs(pnames, true)
}
// print processor plugins
- fmt.Printf(processorHeader)
- if len(processorFilters) != 0 {
- printFilteredProcessors(processorFilters, false)
- } else {
- pnames := []string{}
- for pname := range processors.Processors {
- pnames = append(pnames, pname)
+ if sliceContains("processors", sectionFilters) {
+ if len(processorFilters) != 0 {
+ if len(processorFilters) >= 3 && processorFilters[1] != "none" {
+ fmt.Printf(processorHeader)
+ }
+ printFilteredProcessors(processorFilters, false)
+ } else {
+ fmt.Printf(processorHeader)
+ pnames := []string{}
+ for pname := range processors.Processors {
+ pnames = append(pnames, pname)
+ }
+ sort.Strings(pnames)
+ printFilteredProcessors(pnames, true)
}
- sort.Strings(pnames)
- printFilteredProcessors(pnames, true)
}
- // pring aggregator plugins
- fmt.Printf(aggregatorHeader)
- if len(aggregatorFilters) != 0 {
- printFilteredAggregators(aggregatorFilters, false)
- } else {
- pnames := []string{}
- for pname := range aggregators.Aggregators {
- pnames = append(pnames, pname)
+ // print aggregator plugins
+ if sliceContains("aggregators", sectionFilters) {
+ if len(aggregatorFilters) != 0 {
+ if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" {
+ fmt.Printf(aggregatorHeader)
+ }
+ printFilteredAggregators(aggregatorFilters, false)
+ } else {
+ fmt.Printf(aggregatorHeader)
+ pnames := []string{}
+ for pname := range aggregators.Aggregators {
+ pnames = append(pnames, pname)
+ }
+ sort.Strings(pnames)
+ printFilteredAggregators(pnames, true)
}
- sort.Strings(pnames)
- printFilteredAggregators(pnames, true)
}
// print input plugins
- fmt.Printf(inputHeader)
- if len(inputFilters) != 0 {
- printFilteredInputs(inputFilters, false)
- } else {
- printFilteredInputs(inputDefaults, false)
- // Print non-default inputs, commented
- var pnames []string
- for pname := range inputs.Inputs {
- if !sliceContains(pname, inputDefaults) {
- pnames = append(pnames, pname)
+ if sliceContains("inputs", sectionFilters) {
+ if len(inputFilters) != 0 {
+ if len(inputFilters) >= 3 && inputFilters[1] != "none" {
+ fmt.Printf(inputHeader)
+ }
+ printFilteredInputs(inputFilters, false)
+ } else {
+ fmt.Printf(inputHeader)
+ printFilteredInputs(inputDefaults, false)
+ // Print non-default inputs, commented
+ var pnames []string
+ for pname := range inputs.Inputs {
+ if !sliceContains(pname, inputDefaults) {
+ pnames = append(pnames, pname)
+ }
}
+ sort.Strings(pnames)
+ printFilteredInputs(pnames, true)
}
- sort.Strings(pnames)
- printFilteredInputs(pnames, true)
}
}
@@ -432,6 +530,9 @@ func printFilteredInputs(inputFilters []string, commented bool) {
// Print Inputs
for _, pname := range pnames {
+ if pname == "cisco_telemetry_gnmi" {
+ continue
+ }
creator := inputs.Inputs[pname]
input := creator()
@@ -450,6 +551,7 @@ func printFilteredInputs(inputFilters []string, commented bool) {
return
}
sort.Strings(servInputNames)
+
fmt.Printf(serviceInputHeader)
for _, name := range servInputNames {
printConfig(name, servInputs[name], "inputs", commented)
@@ -474,12 +576,17 @@ func printFilteredOutputs(outputFilters []string, commented bool) {
}
}
-type printer interface {
- Description() string
- SampleConfig() string
+func printFilteredGlobalSections(sectionFilters []string) {
+ if sliceContains("global_tags", sectionFilters) {
+ fmt.Printf(globalTagsConfig)
+ }
+
+ if sliceContains("agent", sectionFilters) {
+ fmt.Printf(agentConfig)
+ }
}
-func printConfig(name string, p printer, op string, commented bool) {
+func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) {
comment := ""
if commented {
comment = "# "
@@ -569,7 +676,11 @@ func getDefaultConfigPath() (string, error) {
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
etcfile := "/etc/telegraf/telegraf.conf"
if runtime.GOOS == "windows" {
- etcfile = `C:\Program Files\Telegraf\telegraf.conf`
+ programFiles := os.Getenv("ProgramFiles")
+ if programFiles == "" { // Should never happen
+ programFiles = `C:\Program Files`
+ }
+ etcfile = programFiles + `\Telegraf\telegraf.conf`
}
for _, path := range []string{envfile, homefile, etcfile} {
if _, err := os.Stat(path); err == nil {
@@ -593,12 +704,20 @@ func (c *Config) LoadConfig(path string) error {
}
data, err := loadConfig(path)
if err != nil {
- return fmt.Errorf("Error loading %s, %s", path, err)
+ return fmt.Errorf("Error loading config file %s: %w", path, err)
+ }
+
+ if err = c.LoadConfigData(data); err != nil {
+ return fmt.Errorf("Error loading config file %s: %w", path, err)
}
+ return nil
+}
+// LoadConfigData loads TOML-formatted config data
+func (c *Config) LoadConfigData(data []byte) error {
tbl, err := parseConfig(data)
if err != nil {
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("Error parsing data: %s", err)
}
// Parse tags tables first:
@@ -606,11 +725,10 @@ func (c *Config) LoadConfig(path string) error {
if val, ok := tbl.Fields[tableName]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
- return fmt.Errorf("%s: invalid configuration", path)
+ return fmt.Errorf("invalid configuration, bad table name %q", tableName)
}
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
- log.Printf("E! Could not parse [global_tags] config\n")
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("error parsing table name %q: %w", tableName, err)
}
}
}
@@ -619,11 +737,10 @@ func (c *Config) LoadConfig(path string) error {
if val, ok := tbl.Fields["agent"]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
- return fmt.Errorf("%s: invalid configuration", path)
+ return fmt.Errorf("invalid configuration, error parsing agent table")
}
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
- log.Printf("E! Could not parse [agent] config\n")
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("error parsing agent table: %w", err)
}
}
@@ -644,7 +761,7 @@ func (c *Config) LoadConfig(path string) error {
for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table)
if !ok {
- return fmt.Errorf("%s: invalid configuration", path)
+ return fmt.Errorf("invalid configuration, error parsing field %q as table", name)
}
switch name {
@@ -655,17 +772,17 @@ func (c *Config) LoadConfig(path string) error {
// legacy [outputs.influxdb] support
case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("Error parsing %s, %s", pluginName, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addOutput(pluginName, t); err != nil {
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("Error parsing %s array, %s", pluginName, err)
}
}
default:
- return fmt.Errorf("Unsupported config format: %s, file %s",
- pluginName, path)
+ return fmt.Errorf("Unsupported config format: %s",
+ pluginName)
}
}
case "inputs", "plugins":
@@ -674,17 +791,17 @@ func (c *Config) LoadConfig(path string) error {
// legacy [inputs.cpu] support
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("Error parsing %s, %s", pluginName, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil {
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("Error parsing %s, %s", pluginName, err)
}
}
default:
- return fmt.Errorf("Unsupported config format: %s, file %s",
- pluginName, path)
+ return fmt.Errorf("Unsupported config format: %s",
+ pluginName)
}
}
case "processors":
@@ -693,12 +810,12 @@ func (c *Config) LoadConfig(path string) error {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addProcessor(pluginName, t); err != nil {
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("Error parsing %s, %s", pluginName, err)
}
}
default:
- return fmt.Errorf("Unsupported config format: %s, file %s",
- pluginName, path)
+ return fmt.Errorf("Unsupported config format: %s",
+ pluginName)
}
}
case "aggregators":
@@ -707,19 +824,19 @@ func (c *Config) LoadConfig(path string) error {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addAggregator(pluginName, t); err != nil {
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("Error parsing %s, %s", pluginName, err)
}
}
default:
- return fmt.Errorf("Unsupported config format: %s, file %s",
- pluginName, path)
+ return fmt.Errorf("Unsupported config format: %s",
+ pluginName)
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
if err = c.addInput(name, subTable); err != nil {
- return fmt.Errorf("Error parsing %s, %s", path, err)
+ return fmt.Errorf("Error parsing %s, %s", name, err)
}
}
}
@@ -760,14 +877,16 @@ func loadConfig(config string) ([]byte, error) {
}
func fetchConfig(u *url.URL) ([]byte, error) {
- v := os.Getenv("INFLUX_TOKEN")
-
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
- req.Header.Add("Authorization", "Token "+v)
+
+ if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists {
+ req.Header.Add("Authorization", "Token "+v)
+ }
req.Header.Add("Accept", "application/toml")
+ req.Header.Set("User-Agent", internal.ProductToken())
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
@@ -837,27 +956,50 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
if !ok {
return fmt.Errorf("Undefined but requested processor: %s", name)
}
- processor := creator()
processorConfig, err := buildProcessor(name, table)
if err != nil {
return err
}
- if err := toml.UnmarshalTable(table, processor); err != nil {
+ rf, err := c.newRunningProcessor(creator, processorConfig, name, table)
+ if err != nil {
return err
}
+ c.Processors = append(c.Processors, rf)
- rf := &models.RunningProcessor{
- Name: name,
- Processor: processor,
- Config: processorConfig,
+ // save a copy for the aggregator
+ rf, err = c.newRunningProcessor(creator, processorConfig, name, table)
+ if err != nil {
+ return err
}
+ c.AggProcessors = append(c.AggProcessors, rf)
- c.Processors = append(c.Processors, rf)
return nil
}
+func (c *Config) newRunningProcessor(
+ creator processors.StreamingCreator,
+ processorConfig *models.ProcessorConfig,
+ name string,
+ table *ast.Table,
+) (*models.RunningProcessor, error) {
+ processor := creator()
+
+ if p, ok := processor.(unwrappable); ok {
+ if err := toml.UnmarshalTable(table, p.Unwrap()); err != nil {
+ return nil, err
+ }
+ } else {
+ if err := toml.UnmarshalTable(table, processor); err != nil {
+ return nil, err
+ }
+ }
+
+ rf := models.NewRunningProcessor(processor, processorConfig)
+ return rf, nil
+}
+
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
@@ -911,8 +1053,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
// If the input has a SetParser function, then this means it can accept
// arbitrary types of input, so build the parser and set it.
- switch t := input.(type) {
- case parsers.ParserInput:
+ if t, ok := input.(parsers.ParserInput); ok {
parser, err := buildParser(name, table)
if err != nil {
return err
@@ -920,8 +1061,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
t.SetParser(parser)
}
- switch t := input.(type) {
- case parsers.ParserFuncInput:
+ if t, ok := input.(parsers.ParserFuncInput); ok {
config, err := getParserConfig(name, table)
if err != nil {
return err
@@ -954,32 +1094,19 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err
Name: name,
Delay: time.Millisecond * 100,
Period: time.Second * 30,
+ Grace: time.Second * 0,
}
- if node, ok := tbl.Fields["period"]; ok {
- if kv, ok := node.(*ast.KeyValue); ok {
- if str, ok := kv.Value.(*ast.String); ok {
- dur, err := time.ParseDuration(str.Value)
- if err != nil {
- return nil, err
- }
-
- conf.Period = dur
- }
- }
+ if err := getConfigDuration(tbl, "period", &conf.Period); err != nil {
+ return nil, err
}
- if node, ok := tbl.Fields["delay"]; ok {
- if kv, ok := node.(*ast.KeyValue); ok {
- if str, ok := kv.Value.(*ast.String); ok {
- dur, err := time.ParseDuration(str.Value)
- if err != nil {
- return nil, err
- }
+ if err := getConfigDuration(tbl, "delay", &conf.Delay); err != nil {
+ return nil, err
+ }
- conf.Delay = dur
- }
- }
+ if err := getConfigDuration(tbl, "grace", &conf.Grace); err != nil {
+ return nil, err
}
if node, ok := tbl.Fields["drop_original"]; ok {
@@ -988,7 +1115,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err
var err error
conf.DropOriginal, err = strconv.ParseBool(b.Value)
if err != nil {
- log.Printf("Error parsing boolean value for %s: %s\n", name, err)
+ return nil, fmt.Errorf("error parsing boolean value for %s: %s", name, err)
}
}
}
@@ -1018,21 +1145,28 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err
}
}
+ if node, ok := tbl.Fields["alias"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ conf.Alias = str.Value
+ }
+ }
+ }
+
conf.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
- log.Printf("Could not parse tags for input %s\n", name)
+ return nil, fmt.Errorf("could not parse tags for input %s", name)
}
}
}
- delete(tbl.Fields, "period")
- delete(tbl.Fields, "delay")
delete(tbl.Fields, "drop_original")
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
+ delete(tbl.Fields, "alias")
delete(tbl.Fields, "tags")
var err error
conf.Filter, err = buildFilter(tbl)
@@ -1054,12 +1188,21 @@ func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error
var err error
conf.Order, err = strconv.ParseInt(b.Value, 10, 64)
if err != nil {
- log.Printf("Error parsing int value for %s: %s\n", name, err)
+ return nil, fmt.Errorf("error parsing int value for %s: %s", name, err)
}
}
}
}
+ if node, ok := tbl.Fields["alias"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ conf.Alias = str.Value
+ }
+ }
+ }
+
+ delete(tbl.Fields, "alias")
delete(tbl.Fields, "order")
var err error
conf.Filter, err = buildFilter(tbl)
@@ -1211,17 +1354,17 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
// models.InputConfig to be inserted into models.RunningInput
func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp := &models.InputConfig{Name: name}
- if node, ok := tbl.Fields["interval"]; ok {
- if kv, ok := node.(*ast.KeyValue); ok {
- if str, ok := kv.Value.(*ast.String); ok {
- dur, err := time.ParseDuration(str.Value)
- if err != nil {
- return nil, err
- }
- cp.Interval = dur
- }
- }
+ if err := getConfigDuration(tbl, "interval", &cp.Interval); err != nil {
+ return nil, err
+ }
+
+ if err := getConfigDuration(tbl, "precision", &cp.Precision); err != nil {
+ return nil, err
+ }
+
+ if err := getConfigDuration(tbl, "collection_jitter", &cp.CollectionJitter); err != nil {
+ return nil, err
}
if node, ok := tbl.Fields["name_prefix"]; ok {
@@ -1248,11 +1391,19 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
}
}
+ if node, ok := tbl.Fields["alias"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ cp.Alias = str.Value
+ }
+ }
+ }
+
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
- log.Printf("E! Could not parse tags for input %s\n", name)
+ return nil, fmt.Errorf("could not parse tags for input %s\n", name)
}
}
}
@@ -1260,7 +1411,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
- delete(tbl.Fields, "interval")
+ delete(tbl.Fields, "alias")
delete(tbl.Fields, "tags")
var err error
cp.Filter, err = buildFilter(tbl)
@@ -1282,7 +1433,9 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
}
func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
- c := &parsers.Config{}
+ c := &parsers.Config{
+ JSONStrict: true,
+ }
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
@@ -1383,6 +1536,18 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
}
}
+ if node, ok := tbl.Fields["json_strict"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if b, ok := kv.Value.(*ast.Boolean); ok {
+ var err error
+ c.JSONStrict, err = b.Boolean()
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
if node, ok := tbl.Fields["data_type"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
@@ -1606,6 +1771,14 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
}
}
+ if node, ok := tbl.Fields["csv_timezone"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ c.CSVTimezone = str.Value
+ }
+ }
+ }
+
if node, ok := tbl.Fields["csv_header_row_count"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
@@ -1655,6 +1828,18 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
}
}
+ if node, ok := tbl.Fields["form_urlencoded_tag_keys"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if ary, ok := kv.Value.(*ast.Array); ok {
+ for _, elem := range ary.Value {
+ if str, ok := elem.(*ast.String); ok {
+ c.FormUrlencodedTagKeys = append(c.FormUrlencodedTagKeys, str.Value)
+ }
+ }
+ }
+ }
+ }
+
c.MetricName = name
delete(tbl.Fields, "data_format")
@@ -1667,6 +1852,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
delete(tbl.Fields, "json_time_format")
delete(tbl.Fields, "json_time_key")
delete(tbl.Fields, "json_timezone")
+ delete(tbl.Fields, "json_strict")
delete(tbl.Fields, "data_type")
delete(tbl.Fields, "collectd_auth_file")
delete(tbl.Fields, "collectd_security_level")
@@ -1695,7 +1881,9 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
delete(tbl.Fields, "csv_tag_columns")
delete(tbl.Fields, "csv_timestamp_column")
delete(tbl.Fields, "csv_timestamp_format")
+ delete(tbl.Fields, "csv_timezone")
delete(tbl.Fields, "csv_trim_space")
+ delete(tbl.Fields, "form_urlencoded_tag_keys")
return c, nil
}
@@ -1734,6 +1922,26 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
}
}
+ if node, ok := tbl.Fields["templates"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if ary, ok := kv.Value.(*ast.Array); ok {
+ for _, elem := range ary.Value {
+ if str, ok := elem.(*ast.String); ok {
+ c.Templates = append(c.Templates, str.Value)
+ }
+ }
+ }
+ }
+ }
+
+ if node, ok := tbl.Fields["carbon2_format"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ c.Carbon2Format = str.Value
+ }
+ }
+ }
+
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
@@ -1782,6 +1990,14 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
}
}
+ if node, ok := tbl.Fields["graphite_separator"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ c.GraphiteSeparator = str.Value
+ }
+ }
+ }
+
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
@@ -1810,6 +2026,18 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
}
}
+ if node, ok := tbl.Fields["splunkmetric_multimetric"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if b, ok := kv.Value.(*ast.Boolean); ok {
+ var err error
+ c.SplunkmetricMultiMetric, err = b.Boolean()
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
if node, ok := tbl.Fields["wavefront_source_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
@@ -1834,17 +2062,60 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
}
}
+ if node, ok := tbl.Fields["prometheus_export_timestamp"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if b, ok := kv.Value.(*ast.Boolean); ok {
+ var err error
+ c.PrometheusExportTimestamp, err = b.Boolean()
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ if node, ok := tbl.Fields["prometheus_sort_metrics"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if b, ok := kv.Value.(*ast.Boolean); ok {
+ var err error
+ c.PrometheusSortMetrics, err = b.Boolean()
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ if node, ok := tbl.Fields["prometheus_string_as_label"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if b, ok := kv.Value.(*ast.Boolean); ok {
+ var err error
+ c.PrometheusStringAsLabel, err = b.Boolean()
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ delete(tbl.Fields, "carbon2_format")
delete(tbl.Fields, "influx_max_line_bytes")
delete(tbl.Fields, "influx_sort_fields")
delete(tbl.Fields, "influx_uint_support")
delete(tbl.Fields, "graphite_tag_support")
+ delete(tbl.Fields, "graphite_separator")
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "prefix")
delete(tbl.Fields, "template")
+ delete(tbl.Fields, "templates")
delete(tbl.Fields, "json_timestamp_units")
delete(tbl.Fields, "splunkmetric_hec_routing")
+ delete(tbl.Fields, "splunkmetric_multimetric")
delete(tbl.Fields, "wavefront_source_override")
delete(tbl.Fields, "wavefront_use_strict")
+ delete(tbl.Fields, "prometheus_export_timestamp")
+ delete(tbl.Fields, "prometheus_sort_metrics")
+ delete(tbl.Fields, "prometheus_string_as_label")
return serializers.NewSerializer(c)
}
@@ -1871,17 +2142,12 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
oc.Filter.NamePass = oc.Filter.FieldPass
}
- if node, ok := tbl.Fields["flush_interval"]; ok {
- if kv, ok := node.(*ast.KeyValue); ok {
- if str, ok := kv.Value.(*ast.String); ok {
- dur, err := time.ParseDuration(str.Value)
- if err != nil {
- return nil, err
- }
+ if err := getConfigDuration(tbl, "flush_interval", &oc.FlushInterval); err != nil {
+ return nil, err
+ }
- oc.FlushInterval = dur
- }
- }
+ if err := getConfigDuration(tbl, "flush_jitter", &oc.FlushJitter); err != nil {
+ return nil, err
}
if node, ok := tbl.Fields["metric_buffer_limit"]; ok {
@@ -1908,9 +2174,67 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
}
}
- delete(tbl.Fields, "flush_interval")
+ if node, ok := tbl.Fields["alias"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ oc.Alias = str.Value
+ }
+ }
+ }
+
+ if node, ok := tbl.Fields["name_override"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ oc.NameOverride = str.Value
+ }
+ }
+ }
+
+ if node, ok := tbl.Fields["name_suffix"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ oc.NameSuffix = str.Value
+ }
+ }
+ }
+
+ if node, ok := tbl.Fields["name_prefix"]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ oc.NamePrefix = str.Value
+ }
+ }
+ }
+
delete(tbl.Fields, "metric_buffer_limit")
delete(tbl.Fields, "metric_batch_size")
+ delete(tbl.Fields, "alias")
+ delete(tbl.Fields, "name_override")
+ delete(tbl.Fields, "name_suffix")
+ delete(tbl.Fields, "name_prefix")
return oc, nil
}
+
+// unwrappable lets you retrieve the original telegraf.Processor from the
+// StreamingProcessor. This is necessary because the toml Unmarshaller won't
+// look inside composed types.
+type unwrappable interface {
+ Unwrap() telegraf.Processor
+}
+
+func getConfigDuration(tbl *ast.Table, key string, target *time.Duration) error {
+ if node, ok := tbl.Fields[key]; ok {
+ if kv, ok := node.(*ast.KeyValue); ok {
+ if str, ok := kv.Value.(*ast.String); ok {
+ d, err := time.ParseDuration(str.Value)
+ if err != nil {
+ return err
+ }
+ delete(tbl.Fields, key)
+ *target = d
+ }
+ }
+ }
+ return nil
+}
diff --git a/internal/config/config_test.go b/config/config_test.go
similarity index 59%
rename from internal/config/config_test.go
rename to config/config_test.go
index 77b0dffd40992..6c5e3662a3151 100644
--- a/internal/config/config_test.go
+++ b/config/config_test.go
@@ -5,13 +5,17 @@ import (
"testing"
"time"
- "github.com/influxdata/telegraf/internal/models"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/inputs/exec"
+ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2"
"github.com/influxdata/telegraf/plugins/inputs/memcached"
"github.com/influxdata/telegraf/plugins/inputs/procstat"
+ httpOut "github.com/influxdata/telegraf/plugins/outputs/http"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
@@ -145,6 +149,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
p, err := parsers.NewParser(&parsers.Config{
MetricName: "exec",
DataFormat: "json",
+ JSONStrict: true,
})
assert.NoError(t, err)
ex.SetParser(p)
@@ -154,6 +159,11 @@ func TestConfig_LoadDirectory(t *testing.T) {
MeasurementSuffix: "_myothercollector",
}
eConfig.Tags = make(map[string]string)
+
+ exec := c.Inputs[1].Input.(*exec.Exec)
+ require.NotNil(t, exec.Log)
+ exec.Log = nil
+
assert.Equal(t, ex, c.Inputs[1].Input,
"Merged Testdata did not produce a correct exec struct.")
assert.Equal(t, eConfig, c.Inputs[1].Config,
@@ -176,3 +186,74 @@ func TestConfig_LoadDirectory(t *testing.T) {
assert.Equal(t, pConfig, c.Inputs[3].Config,
"Merged Testdata did not produce correct procstat metadata.")
}
+
+func TestConfig_LoadSpecialTypes(t *testing.T) {
+ c := NewConfig()
+ err := c.LoadConfig("./testdata/special_types.toml")
+ assert.NoError(t, err)
+ require.Equal(t, 1, len(c.Inputs))
+
+ inputHTTPListener, ok := c.Inputs[0].Input.(*http_listener_v2.HTTPListenerV2)
+ assert.Equal(t, true, ok)
+ // Tests telegraf duration parsing.
+ assert.Equal(t, internal.Duration{Duration: time.Second}, inputHTTPListener.WriteTimeout)
+ // Tests telegraf size parsing.
+ assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize)
+ // Tests toml multiline basic strings.
+ assert.Equal(t, "/path/to/my/cert\n", inputHTTPListener.TLSCert)
+}
+
+func TestConfig_FieldNotDefined(t *testing.T) {
+ c := NewConfig()
+ err := c.LoadConfig("./testdata/invalid_field.toml")
+ require.Error(t, err, "invalid field name")
+ assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: Error parsing http_listener_v2, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error())
+
+}
+
+func TestConfig_WrongFieldType(t *testing.T) {
+ c := NewConfig()
+ err := c.LoadConfig("./testdata/wrong_field_type.toml")
+ require.Error(t, err, "invalid field type")
+ assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error())
+
+ c = NewConfig()
+ err = c.LoadConfig("./testdata/wrong_field_type2.toml")
+ require.Error(t, err, "invalid field type2")
+ assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error())
+}
+
+func TestConfig_InlineTables(t *testing.T) {
+ // #4098
+ c := NewConfig()
+ err := c.LoadConfig("./testdata/inline_table.toml")
+ assert.NoError(t, err)
+ require.Equal(t, 2, len(c.Outputs))
+
+ outputHTTP, ok := c.Outputs[1].Output.(*httpOut.HTTP)
+ assert.Equal(t, true, ok)
+ assert.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, outputHTTP.Headers)
+ assert.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude)
+}
+
+func TestConfig_SliceComment(t *testing.T) {
+ t.Skipf("Skipping until #3642 is resolved")
+
+ c := NewConfig()
+ err := c.LoadConfig("./testdata/slice_comment.toml")
+ assert.NoError(t, err)
+ require.Equal(t, 1, len(c.Outputs))
+
+ outputHTTP, ok := c.Outputs[0].Output.(*httpOut.HTTP)
+ assert.Equal(t, []string{"test"}, outputHTTP.Scopes)
+ assert.Equal(t, true, ok)
+}
+
+func TestConfig_BadOrdering(t *testing.T) {
+ // #3444: when not using inline tables, care has to be taken so subsequent configuration
+ // doesn't become part of the table. This is not a bug, but TOML syntax.
+ c := NewConfig()
+ err := c.LoadConfig("./testdata/non_slice_slice.toml")
+ require.Error(t, err, "bad ordering")
+ assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error())
+}
diff --git a/config/testdata/inline_table.toml b/config/testdata/inline_table.toml
new file mode 100644
index 0000000000000..525fdce17e389
--- /dev/null
+++ b/config/testdata/inline_table.toml
@@ -0,0 +1,7 @@
+[[outputs.http]]
+ headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
+ taginclude = ["org_id"]
+
+[[outputs.http]]
+ headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
+ taginclude = ["org_id"]
diff --git a/config/testdata/invalid_field.toml b/config/testdata/invalid_field.toml
new file mode 100644
index 0000000000000..4c718d7bbe998
--- /dev/null
+++ b/config/testdata/invalid_field.toml
@@ -0,0 +1,2 @@
+[[inputs.http_listener_v2]]
+ not_a_field = true
diff --git a/config/testdata/non_slice_slice.toml b/config/testdata/non_slice_slice.toml
new file mode 100644
index 0000000000000..f92edcc0b2383
--- /dev/null
+++ b/config/testdata/non_slice_slice.toml
@@ -0,0 +1,4 @@
+[[outputs.http]]
+ [outputs.http.headers]
+ Content-Type = "application/json"
+ taginclude = ["org_id"]
diff --git a/internal/config/testdata/single_plugin.toml b/config/testdata/single_plugin.toml
similarity index 100%
rename from internal/config/testdata/single_plugin.toml
rename to config/testdata/single_plugin.toml
diff --git a/internal/config/testdata/single_plugin_env_vars.toml b/config/testdata/single_plugin_env_vars.toml
similarity index 100%
rename from internal/config/testdata/single_plugin_env_vars.toml
rename to config/testdata/single_plugin_env_vars.toml
diff --git a/config/testdata/slice_comment.toml b/config/testdata/slice_comment.toml
new file mode 100644
index 0000000000000..1177e5f8901e2
--- /dev/null
+++ b/config/testdata/slice_comment.toml
@@ -0,0 +1,5 @@
+[[outputs.http]]
+ scopes = [
+ # comment
+ "test" # comment
+ ]
diff --git a/config/testdata/special_types.toml b/config/testdata/special_types.toml
new file mode 100644
index 0000000000000..24b73ae45f1d3
--- /dev/null
+++ b/config/testdata/special_types.toml
@@ -0,0 +1,9 @@
+[[inputs.http_listener_v2]]
+ write_timeout = "1s"
+ max_body_size = "1MiB"
+ tls_cert = """
+/path/to/my/cert
+"""
+ tls_key = '''
+/path/to/my/key
+'''
diff --git a/internal/config/testdata/subconfig/exec.conf b/config/testdata/subconfig/exec.conf
similarity index 100%
rename from internal/config/testdata/subconfig/exec.conf
rename to config/testdata/subconfig/exec.conf
diff --git a/internal/config/testdata/subconfig/memcached.conf b/config/testdata/subconfig/memcached.conf
similarity index 100%
rename from internal/config/testdata/subconfig/memcached.conf
rename to config/testdata/subconfig/memcached.conf
diff --git a/internal/config/testdata/subconfig/procstat.conf b/config/testdata/subconfig/procstat.conf
similarity index 100%
rename from internal/config/testdata/subconfig/procstat.conf
rename to config/testdata/subconfig/procstat.conf
diff --git a/internal/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml
similarity index 99%
rename from internal/config/testdata/telegraf-agent.toml
rename to config/testdata/telegraf-agent.toml
index 9da79605f3833..f71b98206e5e8 100644
--- a/internal/config/testdata/telegraf-agent.toml
+++ b/config/testdata/telegraf-agent.toml
@@ -256,7 +256,7 @@
# specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
- # host=localhost user=pqotest password=... sslmode=... dbname=app_production
+ # host=localhost user=pqgotest password=... sslmode=... dbname=app_production
#
# All connection parameters are optional. By default, the host is localhost
# and the user is the currently running user. For localhost, we default
diff --git a/config/testdata/wrong_field_type.toml b/config/testdata/wrong_field_type.toml
new file mode 100644
index 0000000000000..237176e7e54b4
--- /dev/null
+++ b/config/testdata/wrong_field_type.toml
@@ -0,0 +1,2 @@
+[[inputs.http_listener_v2]]
+ port = "80"
diff --git a/config/testdata/wrong_field_type2.toml b/config/testdata/wrong_field_type2.toml
new file mode 100644
index 0000000000000..6f3def792e530
--- /dev/null
+++ b/config/testdata/wrong_field_type2.toml
@@ -0,0 +1,2 @@
+[[inputs.http_listener_v2]]
+ methods = "POST"
diff --git a/config/types.go b/config/types.go
new file mode 100644
index 0000000000000..7c1c50b9e3690
--- /dev/null
+++ b/config/types.go
@@ -0,0 +1,88 @@
+package config
+
+import (
+ "bytes"
+ "strconv"
+ "time"
+
+ "github.com/alecthomas/units"
+)
+
+// Duration is a time.Duration
+type Duration time.Duration
+
+// Size is an int64
+type Size int64
+
+// UnmarshalTOML parses the duration from the TOML config file
+func (d *Duration) UnmarshalTOML(b []byte) error {
+ var err error
+ b = bytes.Trim(b, `'`)
+
+ // see if we can directly convert it
+ dur, err := time.ParseDuration(string(b))
+ if err == nil {
+ *d = Duration(dur)
+ return nil
+ }
+
+ // Parse string duration, ie, "1s"
+ if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {
+ dur, err := time.ParseDuration(uq)
+ if err == nil {
+ *d = Duration(dur)
+ return nil
+ }
+ }
+
+ // First try parsing as integer seconds
+ sI, err := strconv.ParseInt(string(b), 10, 64)
+ if err == nil {
+ dur := time.Second * time.Duration(sI)
+ *d = Duration(dur)
+ return nil
+ }
+ // Second try parsing as float seconds
+ sF, err := strconv.ParseFloat(string(b), 64)
+ if err == nil {
+ dur := time.Second * time.Duration(sF)
+ *d = Duration(dur)
+ return nil
+ }
+
+ return nil
+}
+
+func (d *Duration) UnmarshalText(text []byte) error {
+ return d.UnmarshalTOML(text)
+}
+
+func (s *Size) UnmarshalTOML(b []byte) error {
+ var err error
+ if len(b) == 0 {
+ return nil
+ }
+ str := string(b)
+ if b[0] == '"' || b[0] == '\'' {
+ str, err = strconv.Unquote(str)
+ if err != nil {
+ return err
+ }
+ }
+
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err == nil {
+ *s = Size(val)
+ return nil
+ }
+ val, err = units.ParseStrictBytes(str)
+ if err != nil {
+ return err
+ }
+ *s = Size(val)
+ return nil
+}
+
+func (s *Size) UnmarshalText(text []byte) error {
+ return s.UnmarshalTOML(text)
+}
diff --git a/config/types_test.go b/config/types_test.go
new file mode 100644
index 0000000000000..8e35de6111c82
--- /dev/null
+++ b/config/types_test.go
@@ -0,0 +1,31 @@
+package config_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/plugins/processors/reverse_dns"
+ "github.com/stretchr/testify/require"
+)
+
+func TestConfigDuration(t *testing.T) {
+ c := config.NewConfig()
+ err := c.LoadConfigData([]byte(`
+[[processors.reverse_dns]]
+ cache_ttl = "3h"
+ lookup_timeout = "17s"
+ max_parallel_lookups = 13
+ ordered = true
+ [[processors.reverse_dns.lookup]]
+ field = "source_ip"
+ dest = "source_name"
+`))
+ require.NoError(t, err)
+ require.Len(t, c.Processors, 1)
+ p := c.Processors[0].Processor.(*reverse_dns.ReverseDNS)
+ require.EqualValues(t, p.CacheTTL, 3*time.Hour)
+ require.EqualValues(t, p.LookupTimeout, 17*time.Second)
+ require.Equal(t, p.MaxParallelLookups, 13)
+ require.Equal(t, p.Ordered, true)
+}
diff --git a/docker-compose.yml b/docker-compose.yml
index a5991434bc16e..4e94b8f012eab 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -2,9 +2,12 @@ version: '3'
services:
aerospike:
- image: aerospike/aerospike-server:3.9.0
+ image: aerospike/aerospike-server:4.9.0.11
ports:
- "3000:3000"
+ - "3001:3001"
+ - "3002:3002"
+ - "3003:3003"
zookeeper:
image: wurstmeister/zookeeper
environment:
@@ -24,12 +27,13 @@ services:
depends_on:
- zookeeper
elasticsearch:
- image: elasticsearch:5
+ image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
environment:
- - JAVA_OPTS="-Xms256m -Xmx256m"
+ - "ES_JAVA_OPTS=-Xms256m -Xmx256m"
+ - discovery.type=single-node
+ - xpack.security.enabled=false
ports:
- "9200:9200"
- - "9300:9300"
mysql:
image: mysql
environment:
@@ -41,14 +45,16 @@ services:
ports:
- "11211:11211"
pgbouncer:
- image: mbed/pgbouncer
+ image: mbentley/ubuntu-pgbouncer
environment:
- PG_ENV_POSTGRESQL_USER: pgbouncer
- PG_ENV_POSTGRESQL_PASS: pgbouncer
+ - PG_ENV_POSTGRESQL_USER=pgbouncer
+ - PG_ENV_POSTGRESQL_PASS=pgbouncer
ports:
- "6432:6432"
postgres:
image: postgres:alpine
+ environment:
+ - POSTGRES_HOST_AUTH_METHOD=trust
ports:
- "5432:5432"
rabbitmq:
@@ -95,6 +101,5 @@ services:
- crate
- -Cnetwork.host=0.0.0.0
- -Ctransport.host=localhost
- - -Clicense.enterprise=false
environment:
- CRATE_HEAP_SIZE=128m
diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md
index eee5b1de5fa6c..a5930a3e0df6d 100644
--- a/docs/AGGREGATORS.md
+++ b/docs/AGGREGATORS.md
@@ -52,6 +52,10 @@ var sampleConfig = `
drop_original = false
`
+func (m *Min) Init() error {
+ return nil
+}
+
func (m *Min) SampleConfig() string {
return sampleConfig
}
diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md
index 9cbc39381bdac..7be34aed5cef4 100644
--- a/docs/AGGREGATORS_AND_PROCESSORS.md
+++ b/docs/AGGREGATORS_AND_PROCESSORS.md
@@ -52,7 +52,7 @@ all metrics or adding a tag to all metrics that pass through.
### Aggregator
Aggregator plugins, on the other hand, are a bit more complicated. Aggregators
are typically for emitting new _aggregate_ metrics, such as a running mean,
-minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
+minimum, maximum, or standard deviation. For this reason, all _aggregator_
plugins are configured with a `period`. The `period` is the size of the window
of metrics that each _aggregate_ represents. In other words, the emitted
_aggregate_ metric will be the aggregated value of the past `period` seconds.
@@ -64,7 +64,4 @@ Since aggregates are created for each measurement, field, and unique tag combina
the plugin receives, you can make use of `taginclude` to group
aggregates by specific tags only.
-**NOTE** That since aggregators only aggregate metrics within their period, that
-historical data is not supported. In other words, if your metric timestamp is more
-than `now() - period` in the past, it will not be aggregated. If this is a feature
-that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)
+**Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included.
diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md
index 9e016af62ec1b..341accefd4eea 100644
--- a/docs/CONFIGURATION.md
+++ b/docs/CONFIGURATION.md
@@ -45,12 +45,31 @@ in the `/etc/default/telegraf` file.
**Example**:
`/etc/default/telegraf`:
+
+For InfluxDB 1.x:
```
USER="alice"
INFLUX_URL="http://localhost:8086"
INFLUX_SKIP_DATABASE_CREATION="true"
INFLUX_PASSWORD="monkey123"
```
+For InfluxDB OSS 2:
+```
+INFLUX_HOST="http://localhost:9999"
+INFLUX_TOKEN="replace_with_your_token"
+INFLUX_ORG="your_username"
+INFLUX_BUCKET="replace_with_your_bucket_name"
+```
+
+For InfluxDB Cloud 2:
+```
+# For AWS West (Oregon)
+INFLUX_HOST="https://us-west-2-1.aws.cloud2.influxdata.com"
+# Other Cloud URLs at https://v2.docs.influxdata.com/v2.0/reference/urls/#influxdb-cloud-urls
+INFLUX_TOKEN=”replace_with_your_token”
+INFLUX_ORG="yourname@yourcompany.com"
+INFLUX_BUCKET="replace_with_your_bucket_name"
+```
`/etc/telegraf.conf`:
```toml
@@ -59,10 +78,25 @@ INFLUX_PASSWORD="monkey123"
[[inputs.mem]]
+# For InfluxDB 1.x:
[[outputs.influxdb]]
urls = ["${INFLUX_URL}"]
skip_database_creation = ${INFLUX_SKIP_DATABASE_CREATION}
password = "${INFLUX_PASSWORD}"
+
+# For InfluxDB OSS 2:
+[[outputs.influxdb_v2]]
+ urls = ["${INFLUX_HOST}"]
+ token = ["${INFLUX_TOKEN}"]
+ org = ["${INFLUX_ORG}"]
+ bucket = ["${INFLUX_BUCKET}"]
+
+# For InfluxDB Cloud 2:
+[[outputs.influxdb_v2]]
+ urls = ["${INFLUX_HOST}"]
+ token = ["${INFLUX_TOKEN}"]
+ org = ["${INFLUX_ORG}"]
+ bucket = ["${INFLUX_BUCKET}"]
```
The above files will produce the following effective configuration file to be
@@ -71,10 +105,29 @@ parsed:
[global_tags]
user = "alice"
+[[inputs.mem]]
+
+# For InfluxDB 1.x:
[[outputs.influxdb]]
urls = "http://localhost:8086"
skip_database_creation = true
password = "monkey123"
+
+# For InfluxDB OSS 2:
+[[outputs.influxdb_v2]]
+ urls = ["http://127.0.0.1:9999"]
+ token = "replace_with_your_token"
+ org = "your_username"
+ bucket = "replace_with_your_bucket_name"
+
+# For InfluxDB Cloud 2:
+[[outputs.influxdb_v2]]
+ # For AWS West (Oregon)
+ INFLUX_HOST="https://us-west-2-1.aws.cloud2.influxdata.com"
+ # Other Cloud URLs at https://v2.docs.influxdata.com/v2.0/reference/urls/#influxdb-cloud-urls
+ token = "replace_with_your_token"
+ org = "yourname@yourcompany.com"
+ bucket = "replace_with_your_bucket_name"
```
### Intervals
@@ -112,10 +165,9 @@ The agent table configures Telegraf and the defaults used across all plugins.
This controls the size of writes that Telegraf sends to output plugins.
- **metric_buffer_limit**:
- For failed writes, telegraf will cache metric_buffer_limit metrics for each
- output, and will flush this buffer on a successful write. Oldest metrics
- are dropped first when this buffer fills.
- This buffer only fills when writes fail to output plugin(s).
+ Maximum number of unwritten metrics per output. Increasing this value
+ allows for longer periods of output downtime without dropping metrics at the
+ cost of higher maximum memory usage.
- **collection_jitter**:
Collection jitter is used to jitter the collection by a random [interval][].
@@ -125,12 +177,13 @@ The agent table configures Telegraf and the defaults used across all plugins.
- **flush_interval**:
Default flushing [interval][] for all outputs. Maximum flush_interval will be
- flush_interval + flush_jitter
+ flush_interval + flush_jitter.
- **flush_jitter**:
- Jitter the flush [interval][] by a random amount. This is primarily to avoid
- large write spikes for users running a large number of telegraf instances.
- ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+ Default flush jitter for all outputs. This jitters the flush [interval][]
+ by a random amount. This is primarily to avoid large write spikes for users
+ running a large number of telegraf instances. ie, a jitter of 5s and interval
+ 10s means flushes will happen every 10-15s.
- **precision**:
Collected metrics are rounded to the precision specified as an [interval][].
@@ -139,11 +192,32 @@ The agent table configures Telegraf and the defaults used across all plugins.
service input to set the timestamp at the appropriate precision.
- **debug**:
- Run telegraf with debug log messages.
+ Log at debug level.
+
- **quiet**:
- Run telegraf in quiet mode (error log messages only).
+ Log only error level messages.
+
+- **logtarget**:
+ Log target controls the destination for logs and can be one of "file",
+ "stderr" or, on Windows, "eventlog". When set to "file", the output file is
+ determined by the "logfile" setting.
+
- **logfile**:
- Specify the log file name. The empty string means to log to stderr.
+ Name of the file to be logged to when using the "file" logtarget. If set to
+ the empty string then logs are written to stderr.
+
+
+- **logfile_rotation_interval**:
+ The logfile will be rotated after the time interval specified. When set to
+ 0 no time based rotation is performed.
+
+- **logfile_rotation_max_size**:
+ The logfile will be rotated when it becomes larger than the specified size.
+ When set to 0 no size based rotation is performed.
+
+- **logfile_rotation_max_archives**:
+ Maximum number of rotated archives to keep, any older logs are deleted. If
+ set to -1, no archives are removed.
- **hostname**:
Override default hostname, if empty use os.Hostname()
@@ -156,7 +230,7 @@ Telegraf plugins are divided into 4 types: [inputs][], [outputs][],
[processors][], and [aggregators][].
Unlike the `global_tags` and `agent` tables, any plugin can be defined
-multiple times and each instance will run independantly. This allows you to
+multiple times and each instance will run independently. This allows you to
have plugins defined with differing configurations as needed within a single
Telegraf process.
@@ -171,13 +245,33 @@ driven operation.
Parameters that can be used with any input plugin:
-- **interval**: How often to gather this metric. Normal plugins use a single
- global interval, but if one particular input should be run less or more
- often, you can configure that here.
+- **alias**: Name an instance of a plugin.
+
+- **interval**:
+ Overrides the `interval` setting of the [agent][Agent] for the plugin. How
+ often to gather this metric. Normal plugins use a single global interval, but
+ if one particular input should be run less or more often, you can configure
+ that here.
+
+- **precision**:
+ Overrides the `precision` setting of the [agent][Agent] for the plugin.
+ Collected metrics are rounded to the precision specified as an [interval][].
+
+ When this value is set on a service input, multiple events occuring at the
+ same timestamp may be merged by the output database.
+
+- **collection_jitter**:
+ Overrides the `collection_jitter` setting of the [agent][Agent] for the
+ plugin. Collection jitter is used to jitter the collection by a random
+ [interval][].
+
- **name_override**: Override the base name of the measurement. (Default is
the name of the input).
+
- **name_prefix**: Specifies a prefix to attach to the measurement name.
+
- **name_suffix**: Specifies a suffix to attach to the measurement name.
+
- **tags**: A map of tags to apply to a specific input's measurements.
The [metric filtering][] parameters can be used to limit what metrics are
@@ -236,13 +330,19 @@ databases, network services, and messaging systems.
Parameters that can be used with any output plugin:
+- **alias**: Name an instance of a plugin.
- **flush_interval**: The maximum time between flushes. Use this setting to
override the agent `flush_interval` on a per plugin basis.
+- **flush_jitter**: The amount of time to jitter the flush interval. Use this
+ setting to override the agent `flush_jitter` on a per plugin basis.
- **metric_batch_size**: The maximum number of metrics to send at once. Use
this setting to override the agent `metric_batch_size` on a per plugin basis.
- **metric_buffer_limit**: The maximum number of unsent metrics to buffer.
Use this setting to override the agent `metric_buffer_limit` on a per plugin
basis.
+- **name_override**: Override the original name of the measurement.
+- **name_prefix**: Specifies a prefix to attach to the measurement name.
+- **name_suffix**: Specifies a suffix to attach to the measurement name.
The [metric filtering][] parameters can be used to limit what metrics are
emitted from the output plugin.
@@ -253,6 +353,7 @@ Override flush parameters for a single output:
```toml
[agent]
flush_interval = "10s"
+ flush_jitter = "5s"
metric_batch_size = 1000
[[outputs.influxdb]]
@@ -262,6 +363,7 @@ Override flush parameters for a single output:
[[outputs.file]]
files = [ "stdout" ]
flush_interval = "1s"
+ flush_jitter = "1s"
metric_batch_size = 10
```
@@ -273,6 +375,7 @@ input plugins and before any aggregator plugins.
Parameters that can be used with any processor plugin:
+- **alias**: Name an instance of a plugin.
- **order**: The order in which the processor(s) are executed. If this is not
specified then processor execution order will be random.
@@ -307,6 +410,7 @@ processors have been applied.
Parameters that can be used with any aggregator plugin:
+- **alias**: Name an instance of a plugin.
- **period**: The period on which to flush & clear each aggregator. All
metrics that are sent with timestamps outside of this period will be ignored
by the aggregator.
@@ -314,6 +418,10 @@ Parameters that can be used with any aggregator plugin:
how long for aggregators to wait before receiving metrics from input
plugins, in the case that aggregators are flushing and inputs are gathering
on the same interval.
+- **grace**: The duration when the metrics will still be aggregated
+ by the plugin, even though they're outside of the aggregation period. This
+ is needed in a situation when the agent is expected to receive late metrics
+ and it's acceptable to roll them up into next aggregation period.
- **drop_original**: If true, the original metric will be dropped by the
aggregator and will not get sent to the output plugins.
- **name_override**: Override the base name of the measurement. (Default is
@@ -375,7 +483,7 @@ excluded from a Processor or Aggregator plugin, it is skips the plugin and is
sent onwards to the next stage of processing.
- **namepass**:
-An array of glob pattern strings. Only metrics whose measurement name matches
+An array of [glob pattern][] strings. Only metrics whose measurement name matches
a pattern in this list are emitted.
- **namedrop**:
@@ -383,7 +491,7 @@ The inverse of `namepass`. If a match is found the metric is discarded. This
is tested on metrics after they have passed the `namepass` test.
- **tagpass**:
-A table mapping tag keys to arrays of glob pattern strings. Only metrics
+A table mapping tag keys to arrays of [glob pattern][] strings. Only metrics
that contain a tag key in the table and a tag value matching one of its
patterns is emitted.
@@ -391,13 +499,17 @@ patterns is emitted.
The inverse of `tagpass`. If a match is found the metric is discarded. This
is tested on metrics after they have passed the `tagpass` test.
+> NOTE: Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters must be
+defined at the *_end_* of the plugin definition, otherwise subsequent plugin config
+options will be interpreted as part of the tagpass/tagdrop tables.
+
#### Modifiers
Modifier filters remove tags and fields from a metric. If all fields are
removed the metric is removed.
- **fieldpass**:
-An array of glob pattern strings. Only fields whose field key matches a
+An array of [glob pattern][] strings. Only fields whose field key matches a
pattern in this list are emitted.
- **fielddrop**:
@@ -406,7 +518,7 @@ patterns will be discarded from the metric. This is tested on metrics after
they have passed the `fieldpass` test.
- **taginclude**:
-An array of glob pattern strings. Only tags with a tag key matching one of
+An array of [glob pattern][] strings. Only tags with a tag key matching one of
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
metric based on its tag, `taginclude` removes all non matching tags from the
metric. Any tag can be filtered including global tags and the agent `host`
@@ -417,9 +529,9 @@ The inverse of `taginclude`. Tags with a tag key matching one of the patterns
will be discarded from the metric. Any tag can be filtered including global
tags and the agent `host` tag.
-##### Filtering Examples
+#### Filtering Examples
-Using tagpass and tagdrop:
+##### Using tagpass and tagdrop:
```toml
[[inputs.cpu]]
percpu = true
@@ -452,7 +564,7 @@ Using tagpass and tagdrop:
instance = ["isatap*", "Local*"]
```
-Using fieldpass and fielddrop:
+##### Using fieldpass and fielddrop:
```toml
# Drop all metrics for guest & steal CPU usage
[[inputs.cpu]]
@@ -465,7 +577,7 @@ Using fieldpass and fielddrop:
fieldpass = ["inodes*"]
```
-Using namepass and namedrop:
+##### Using namepass and namedrop:
```toml
# Drop all metrics about containers for kubelet
[[inputs.prometheus]]
@@ -478,7 +590,7 @@ Using namepass and namedrop:
namepass = ["rest_client_*"]
```
-Using taginclude and tagexclude:
+##### Using taginclude and tagexclude:
```toml
# Only include the "cpu" tag in the measurements for the cpu plugin.
[[inputs.cpu]]
@@ -491,7 +603,7 @@ Using taginclude and tagexclude:
tagexclude = ["fstype"]
```
-Metrics can be routed to different outputs using the metric name and tags:
+##### Metrics can be routed to different outputs using the metric name and tags:
```toml
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
@@ -513,9 +625,11 @@ Metrics can be routed to different outputs using the metric name and tags:
cpu = ["cpu0"]
```
-Routing metrics to different outputs based on the input. Metrics are tagged
-with `influxdb_database` in the input, which is then used to select the
-output. The tag is removed in the outputs before writing.
+##### Routing metrics to different outputs based on the input.
+
+Metrics are tagged with `influxdb_database` in the input, which is then used to
+select the output. The tag is removed in the outputs before writing.
+
```toml
[[outputs.influxdb]]
urls = ["http://influxdb.example.com"]
@@ -535,6 +649,10 @@ output. The tag is removed in the outputs before writing.
influxdb_database = "other"
```
+### Transport Layer Security (TLS)
+
+Reference the detailed [TLS][] documentation.
+
[TOML]: https://github.com/toml-lang/toml#toml
[global tags]: #global-tags
[interval]: #intervals
@@ -546,3 +664,5 @@ output. The tag is removed in the outputs before writing.
[aggregators]: #aggregator-plugins
[metric filtering]: #metric-filtering
[telegraf.conf]: /etc/telegraf.conf
+[TLS]: /docs/TLS.md
+[glob pattern]: https://github.com/gobwas/glob#syntax
diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md
index f3ac028b980d6..a8650b250f3fd 100644
--- a/docs/DATA_FORMATS_OUTPUT.md
+++ b/docs/DATA_FORMATS_OUTPUT.md
@@ -5,10 +5,11 @@ standard data formats that may be selected from when configuring many output
plugins.
1. [InfluxDB Line Protocol](/plugins/serializers/influx)
-1. [JSON](/plugins/serializers/json)
+1. [Carbon2](/plugins/serializers/carbon2)
1. [Graphite](/plugins/serializers/graphite)
+1. [JSON](/plugins/serializers/json)
+1. [Prometheus](/plugins/serializers/prometheus)
1. [SplunkMetric](/plugins/serializers/splunkmetric)
-1. [Carbon2](/plugins/serializers/carbon2)
1. [Wavefront](/plugins/serializers/wavefront)
You will be able to identify the plugins with support by the presence of a
diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md
new file mode 100644
index 0000000000000..aa3b5058aa8b4
--- /dev/null
+++ b/docs/EXTERNAL_PLUGINS.md
@@ -0,0 +1,68 @@
+### External Plugins
+
+[External plugins](/EXTERNAL_PLUGINS.md) are external programs that are built outside
+of Telegraf that can run through an `execd` plugin. These external plugins allow for
+more flexibility compared to internal Telegraf plugins.
+
+- External plugins can be written in any language (internal Telegraf plugins can only written in Go)
+- External plugins can access to libraries not written in Go
+- Utilize licensed software that isn't available to the open source community
+- Can include large dependencies that would otherwise bloat Telegraf
+
+### External Plugin Guidelines
+The guidelines of writing external plugins would follow those for our general [input](/docs/INPUTS.md),
+[output](/docs/OUTPUTS.md), [processor](/docs/PROCESSORS.md), and [aggregator](/docs/AGGREGATORS.md) plugins.
+Please reference the documentation on how to create these plugins written in Go.
+
+_For listed [external plugins](/EXTERNAL_PLUGINS.md), the author of the external plugin is also responsible for the maintenance
+and feature development of external plugins. Expect to have users open plugin issues on its respective GitHub repository._
+
+#### Execd Go Shim
+For Go plugins, there is a [Execd Go Shim](/plugins/common/shim/) that will make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This shim allows anyone to build and run it as a separate app using one of the `execd`plugins:
+- [inputs.execd](/plugins/inputs/execd)
+- [processors.execd](/plugins/processors/execd)
+- [outputs.execd](/plugins/outputs/execd)
+
+Follow the [Steps to externalize a plugin](/plugins/common/shim#steps-to-externalize-a-plugin) and [Steps to build and run your plugin](/plugins/common/shim#steps-to-build-and-run-your-plugin) to properly with the Execd Go Shim
+
+#### Step-by-Step guidelines
+This is a guide to help you set up your plugin to use it with `execd`
+1. Write your Telegraf plugin. Depending on the plugin, follow the guidelines on how to create the plugin itself using InfluxData's best practices:
+ - [Input Plugins](/docs/INPUTS.md)
+ - [Processor Plugins](/docs/PROCESSORS.md)
+ - [Aggregator Plugins](/docs/AGGREGATORS.md)
+ - [Output Plugins](/docs/OUTPUTS.md)
+2. If your plugin is written in Go, include the steps for the [Execd Go Shim](/plugins/common/shim#steps-to-build-and-run-your-plugin)
+ 1. Move the project to an external repo, it's recommended to preserve the path
+ structure, (but not strictly necessary). eg if your plugin was at
+ `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu`
+ in the new repo. For a further example of what this might look like, take a
+ look at [ssoroka/rand](https://github.com/ssoroka/rand) or
+ [danielnelson/telegraf-execd-openvpn](https://github.com/danielnelson//telegraf-execd-openvpn)
+ 1. Copy [main.go](/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder.
+ This will be the entrypoint to the plugin when run as a stand-alone program, and
+ it will call the shim code for you to make that happen. It's recommended to
+ have only one plugin per repo, as the shim is not designed to run multiple
+ plugins at the same time (it would vastly complicate things).
+ 1. Edit the main.go file to import your plugin. Within Telegraf this would have
+ been done in an all.go file, but here we don't split the two apart, and the change
+ just goes in the top of main.go. If you skip this step, your plugin will do nothing.
+ eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"`
+ 1. Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration
+ specific to your plugin. Note that this config file **must be separate from the
+ rest of the config for Telegraf, and must not be in a shared directory where
+ Telegraf is expecting to load all configs**. If Telegraf reads this config file
+ it will not know which plugin it relates to. Telegraf instead uses an execd config
+ block to look for this plugin.
+ 1. Add usage and development instructions in the homepage of your repository for running
+ your plugin with its respective `execd` plugin. Please refer to
+ [openvpn](/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](/vipinvkmenon/awsalarms#installation)
+ for examples. Include the following steps:
+ 1. How to download the release package for your platform or how to clone the binary for your external plugin
+ 1. The commands to unpack or build your binary
+ 1. Location to edit your `telegraf.conf`
+ 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd),
+ [processors.execd](/plugins/processors/execd) or [outputs.execd](/plugins/outputs/execd)
+ 1. Note that restart or reload of Telegraf is required
+ 1. Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md)
+ list. Please include the plugin name, link to the plugin repository and a short description of the plugin.
diff --git a/docs/FAQ.md b/docs/FAQ.md
index 1d1c490aa2ffc..4fe28db8b9cbc 100644
--- a/docs/FAQ.md
+++ b/docs/FAQ.md
@@ -5,16 +5,14 @@
You will need to setup several volume mounts as well as some environment
variables:
```
-docker run --name telegraf
- -v /:/hostfs:ro
- -v /etc:/hostfs/etc:ro
- -v /proc:/hostfs/proc:ro
- -v /sys:/hostfs/sys:ro
- -v /var/run/utmp:/var/run/utmp:ro
- -e HOST_ETC=/hostfs/etc
- -e HOST_PROC=/hostfs/proc
- -e HOST_SYS=/hostfs/sys
- -e HOST_MOUNT_PREFIX=/hostfs
+docker run --name telegraf \
+ -v /:/hostfs:ro \
+ -e HOST_ETC=/hostfs/etc \
+ -e HOST_PROC=/hostfs/proc \
+ -e HOST_SYS=/hostfs/sys \
+ -e HOST_VAR=/hostfs/var \
+ -e HOST_RUN=/hostfs/run \
+ -e HOST_MOUNT_PREFIX=/hostfs \
telegraf
```
@@ -40,6 +38,33 @@ If running as a service add the environment variable to `/etc/default/telegraf`:
GODEBUG=netdns=cgo
```
+### Q: How can I manage series cardinality?
+
+High [series cardinality][], when not properly managed, can cause high load on
+your database. Telegraf attempts to avoid creating series with high
+cardinality, but some monitoring workloads such as tracking containers are are
+inherently high cardinality. These workloads can still be monitored, but care
+must be taken to manage cardinality growth.
+
+You can use the following techniques to avoid cardinality issues:
+
+- Use [metric filtering][] options to exclude unneeded measurements and tags.
+- Write to a database with an appropriate [retention policy][].
+- Limit series cardinality in your database using the
+ [max-series-per-database][] and [max-values-per-tag][] settings.
+- Consider using the [Time Series Index][tsi].
+- Monitor your databases using the [show cardinality][] commands.
+- Consult the [InfluxDB documentation][influx docs] for the most up-to-date techniques.
+
+[series cardinality]: https://docs.influxdata.com/influxdb/v1.7/concepts/glossary/#series-cardinality
+[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering
+[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/
+[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000
+[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000
+[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/
+[show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality
+[influx docs]: https://docs.influxdata.com/influxdb/latest/
+
### Q: When will the next version be released?
The latest release date estimate can be viewed on the
diff --git a/docs/INPUTS.md b/docs/INPUTS.md
index 32eb9b9f584c8..f8e906f318fee 100644
--- a/docs/INPUTS.md
+++ b/docs/INPUTS.md
@@ -38,7 +38,7 @@ import (
)
type Simple struct {
- Ok bool
+ Ok bool `toml:"ok"`
}
func (s *Simple) Description() string {
@@ -52,6 +52,10 @@ func (s *Simple) SampleConfig() string {
`
}
+func (s *Simple) Init() error {
+ return nil
+}
+
func (s *Simple) Gather(acc telegraf.Accumulator) error {
if s.Ok {
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md
index 5b6faf4c9eb6f..6b800ee1743b9 100644
--- a/docs/LICENSE_OF_DEPENDENCIES.md
+++ b/docs/LICENSE_OF_DEPENDENCIES.md
@@ -3,24 +3,40 @@
When distributed in a binary form, Telegraf may contain portions of the
following works:
-- cloud.google.com/go [Apache License 2.0](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/LICENSE)
+- cloud.google.com/go [Apache License 2.0](https://github.com/googleapis/google-cloud-go/blob/master/LICENSE)
- code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE)
- collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD)
-- contrib.go.opencensus.io/exporter/stackdriver [Apache License 2.0](https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/LICENSE)
+- github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE)
+- github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE)
+- github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE)
+- github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE)
+- github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE)
+- github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE)
+- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE)
+- github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE)
+- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE)
+- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
+- github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE)
+- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE)
- github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
- github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING)
- github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE)
- github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE)
+- github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE)
+- github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING)
- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
-- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE)
+- github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE)
- github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE)
-- github.com/bsm/sarama-cluster [MIT License](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
+- github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE)
+- github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
+- github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE)
- github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
- github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md)
- github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt)
+- github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE)
- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
- github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE)
- github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE)
@@ -33,57 +49,69 @@ following works:
- github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
- github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE)
-- github.com/go-ini/ini [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE)
+- github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE)
+- github.com/glinton/ping [MIT License](https://github.com/glinton/ping/blob/master/LICENSE)
- github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
- github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE)
- github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE)
- github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
+- github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE)
+- github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE)
- github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE)
+- github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE)
+- github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE)
- github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE)
- github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE)
-- github.com/google/uuid [BSD 3-Clause "New" or "Revised" License](https://github.com/google/uuid/blob/master/LICENSE)
- github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE)
-- github.com/gorilla/context [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/context/blob/master/LICENSE)
+- github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE)
- github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE)
- github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
- github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE)
- github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE)
- github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE)
- github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE)
+- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE)
- github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE)
- github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE)
- github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
- github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE)
- github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE)
- github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE)
+- github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE)
- github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
-- github.com/kardianos/osext [BSD 3-Clause "New" or "Revised" License](https://github.com/kardianos/osext/blob/master/LICENSE)
+- github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE)
- github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE)
+- github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE)
- github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
-- github.com/kr/logfmt [MIT License](https://github.com/kr/logfmt/blob/master/Readme)
+- github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE)
+- github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE)
- github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE)
- github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE)
- github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE)
- github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
-- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE)
-- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
+- github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md)
+- github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md)
+- github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md)
- github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE)
- github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
-- github.com/nats-io/gnatsd [Apache License 2.0](https://github.com/nats-io/gnatsd/blob/master/LICENSE)
-- github.com/nats-io/go-nats [Apache License 2.0](https://github.com/nats-io/go-nats/blob/master/LICENSE)
+- github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE)
+- github.com/nats-io/nats-server [Apache License 2.0](https://github.com/nats-io/nats-server/blob/master/LICENSE)
+- github.com/nats-io/nats.go [Apache License 2.0](https://github.com/nats-io/nats.go/blob/master/LICENSE)
+- github.com/nats-io/nkeys [Apache License 2.0](https://github.com/nats-io/nkeys/blob/master/LICENSE)
- github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE)
+- github.com/newrelic/newrelic-telemetry-sdk-go [Apache License 2.0](https://github.com/newrelic/newrelic-telemetry-sdk-go/blob/master/LICENSE.md)
- github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
+- github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE)
- github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE)
- github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE)
-- github.com/opentracing-contrib/go-observer [Apache License 2.0](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
-- github.com/opentracing/opentracing-go [MIT License](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
- github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE)
- github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE)
@@ -93,36 +121,38 @@ following works:
- github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE)
- github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE)
- github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
+- github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
-- github.com/satori/go.uuid [MIT License](https://github.com/satori/go.uuid/blob/master/LICENSE)
- github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE)
-- github.com/shirou/w32 [BSD 3-Clause Clear License](https://github.com/shirou/w32/blob/master/LICENSE)
-- github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE)
- github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE)
- github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE)
-- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE)
- github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE)
- github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE)
- github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE)
- github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE)
+- github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE)
- github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE)
- github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE)
- github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/vmware/govmomi [Apache License 2.0](https://github.com/vmware/govmomi/blob/master/LICENSE.txt)
-- github.com/wavefrontHQ/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE)
+- github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
- go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE)
+- go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE)
- golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE)
- golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE)
- golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE)
- golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE)
- golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE)
- golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE)
+- golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE)
+- golang.org/x/xerrors [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE)
+- golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md)
+- golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md)
- google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE)
-- google.golang.org/appengine [Apache License 2.0](https://github.com/golang/appengine/blob/master/LICENSE)
- google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE)
- google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE)
- gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE)
@@ -130,8 +160,15 @@ following works:
- gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE)
- gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE)
- gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE)
-- gopkg.in/ldap.v2 [MIT License](https://github.com/go-ldap/ldap/blob/v2.5.1/LICENSE)
+- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE)
+- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE)
+- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE)
+- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE)
+- gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE)
- gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE)
- gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
- gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE)
+
+## telegraf used and modified code from these projects
+- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE)
diff --git a/docs/METRICS.md b/docs/METRICS.md
index 1c238e30a482e..f903dcad4b011 100644
--- a/docs/METRICS.md
+++ b/docs/METRICS.md
@@ -12,7 +12,7 @@ four main components:
- **Timestamp**: Date and time associated with the fields.
This metric type exists only in memory and must be converted to a concrete
-representation in order to be transmitted or viewed. To acheive this we
+representation in order to be transmitted or viewed. To achieve this we
provide several [output data formats][] sometimes referred to as
*serializers*. Our default serializer converts to [InfluxDB Line
Protocol][line protocol] which provides a high performance and one-to-one
diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md
index 306b9ea6f4557..c60cd96ba7539 100644
--- a/docs/OUTPUTS.md
+++ b/docs/OUTPUTS.md
@@ -30,7 +30,7 @@ import (
)
type Simple struct {
- Ok bool
+ Ok bool `toml:"ok"`
}
func (s *Simple) Description() string {
@@ -43,6 +43,10 @@ func (s *Simple) SampleConfig() string {
`
}
+func (s *Simple) Init() error {
+ return nil
+}
+
func (s *Simple) Connect() error {
// Make a connection to the URL here
return nil
@@ -90,6 +94,19 @@ You should also add the following to your `SampleConfig()`:
data_format = "influx"
```
+## Flushing Metrics to Outputs
+
+Metrics are flushed to outputs when any of the following events happen:
+- `flush_interval + rand(flush_jitter)` has elapsed since start or the last flush interval
+- At least `metric_batch_size` count of metrics are waiting in the buffer
+- The telegraf process has received a SIGUSR1 signal
+
+Note that if the flush takes longer than the `agent.interval` to write the metrics
+to the output, you'll see a message saying the output `did not complete within its
+flush interval`. This may mean your output is not keeping up with the flow of metrics,
+and you may want to look into enabling compression, reducing the size of your metrics,
+or investigate other reasons why the writes might be taking longer than expected.
+
[file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file
[output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md
index 4f18b2d5544fe..47f29a4160652 100644
--- a/docs/PROCESSORS.md
+++ b/docs/PROCESSORS.md
@@ -46,6 +46,10 @@ func (p *Printer) Description() string {
return "Print all metrics that pass through this filter."
}
+func (p *Printer) Init() error {
+ return nil
+}
+
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
fmt.Println(metric.String())
@@ -60,6 +64,80 @@ func init() {
}
```
+### Streaming Processors
+
+Streaming processors are a new processor type available to you. They are
+particularly useful to implement processor types that use background processes
+or goroutines to process multiple metrics at the same time. Some examples of this
+are the execd processor, which pipes metrics out to an external process over stdin
+and reads them back over stdout, and the reverse_dns processor, which does reverse
+dns lookups on IP addresses in fields. While both of these come with a speed cost,
+it would be significantly worse if you had to process one metric completely from
+start to finish before handling the next metric, and thus they benefit
+significantly from a streaming-pipe approach.
+
+Some differences from classic Processors:
+
+* Streaming processors must conform to the [telegraf.StreamingProcessor][] interface.
+* Processors should call `processors.AddStreaming` in their `init` function to register
+ themselves. See below for a quick example.
+
+### Streaming Processor Example
+
+```go
+package printer
+
+// printer.go
+
+import (
+ "fmt"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+type Printer struct {
+}
+
+var sampleConfig = `
+`
+
+func (p *Printer) SampleConfig() string {
+ return sampleConfig
+}
+
+func (p *Printer) Description() string {
+ return "Print all metrics that pass through this filter."
+}
+
+func (p *Printer) Init() error {
+ return nil
+}
+
+func (p *Printer) Start(acc telegraf.Accumulator) error {
+}
+
+func (p *Printer) Add(metric telegraf.Metric, acc telegraf.Accumulator) error {
+ // print!
+ fmt.Println(metric.String())
+ // pass the metric downstream, or metric.Drop() it.
+ // Metric will be dropped if this function returns an error.
+ acc.AddMetric(metric)
+
+ return nil
+}
+
+func (p *Printer) Stop() error {
+}
+
+func init() {
+ processors.AddStreaming("printer", func() telegraf.StreamingProcessor {
+ return &Printer{}
+ })
+}
+```
+
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle
[telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor
+[telegraf.StreamingProcessor]: https://godoc.org/github.com/influxdata/telegraf#StreamingProcessor
diff --git a/docs/TLS.md b/docs/TLS.md
new file mode 100644
index 0000000000000..3cd6a1025fc4b
--- /dev/null
+++ b/docs/TLS.md
@@ -0,0 +1,105 @@
+# Transport Layer Security
+
+There is an ongoing effort to standardize TLS options across plugins. When
+possible, plugins will provide the standard settings described below. With the
+exception of the advanced configuration available TLS settings will be
+documented in the sample configuration.
+
+### Client Configuration
+
+For client TLS support we have the following options:
+```toml
+## Root certificates for verifying server certificates encoded in PEM format.
+# tls_ca = "/etc/telegraf/ca.pem"
+
+## The public and private keypairs for the client encoded in PEM format. May
+## contain intermediate certificates.
+# tls_cert = "/etc/telegraf/cert.pem"
+# tls_key = "/etc/telegraf/key.pem"
+## Skip TLS verification.
+# insecure_skip_verify = false
+```
+
+### Server Configuration
+
+The server TLS configuration provides support for TLS mutual authentication:
+
+```toml
+## Set one or more allowed client CA certificate file names to
+## enable mutually authenticated TLS connections.
+# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+## Add service certificate and key.
+# tls_cert = "/etc/telegraf/cert.pem"
+# tls_key = "/etc/telegraf/key.pem"
+```
+
+#### Advanced Configuration
+
+For plugins using the standard server configuration you can also set several
+advanced settings. These options are not included in the sample configuration
+for the interest of brevity.
+
+```toml
+## Define list of allowed ciphers suites. If not defined the default ciphers
+## supported by Go will be used.
+## ex: tls_cipher_suites = [
+## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+## "TLS_RSA_WITH_AES_128_GCM_SHA256",
+## "TLS_RSA_WITH_AES_256_GCM_SHA384",
+## "TLS_RSA_WITH_AES_128_CBC_SHA256",
+## "TLS_RSA_WITH_AES_128_CBC_SHA",
+## "TLS_RSA_WITH_AES_256_CBC_SHA"
+## ]
+# tls_cipher_suites = []
+
+## Minimum TLS version that is acceptable.
+# tls_min_version = "TLS10"
+
+## Maximum SSL/TLS version that is acceptable.
+# tls_max_version = "TLS13"
+```
+
+Cipher suites for use with `tls_cipher_suites`:
+- `TLS_RSA_WITH_RC4_128_SHA`
+- `TLS_RSA_WITH_3DES_EDE_CBC_SHA`
+- `TLS_RSA_WITH_AES_128_CBC_SHA`
+- `TLS_RSA_WITH_AES_256_CBC_SHA`
+- `TLS_RSA_WITH_AES_128_CBC_SHA256`
+- `TLS_RSA_WITH_AES_128_GCM_SHA256`
+- `TLS_RSA_WITH_AES_256_GCM_SHA384`
+- `TLS_ECDHE_ECDSA_WITH_RC4_128_SHA`
+- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`
+- `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`
+- `TLS_ECDHE_RSA_WITH_RC4_128_SHA`
+- `TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA`
+- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`
+- `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`
+- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`
+- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`
+- `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`
+- `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`
+- `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`
+- `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`
+- `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`
+- `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`
+- `TLS_AES_128_GCM_SHA256`
+- `TLS_AES_256_GCM_SHA384`
+- `TLS_CHACHA20_POLY1305_SHA256`
+
+TLS versions for use with `tls_min_version` or `tls_max_version`:
+- `TLS10`
+- `TLS11`
+- `TLS12`
+- `TLS13`
diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md
index 51ce6a7a649a0..b0b6ee5adf358 100644
--- a/docs/WINDOWS_SERVICE.md
+++ b/docs/WINDOWS_SERVICE.md
@@ -48,16 +48,24 @@ Telegraf can manage its own service through the --service flag:
## Install multiple services
-You can install multiple telegraf instances with --service-name flag:
+Running multiple instances of Telegraf is seldom needed, as you can run
+multiple instances of each plugin and route metric flow using the metric
+filtering options. However, if you do need to run multiple telegraf instances
+on a single system, you can install the service with the `--service-name` and
+`--service-display-name` flags to give the services unique names:
```
- > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1
- > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2
- > C:\"Program Files"\Telegraf\telegraf.exe --service uninstall --service-name telegraf-1
+> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 --service-display-name "Telegraf 1"
+> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2"
```
-Troubleshooting common error #1067
+## Troubleshooting
+
+When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded.
+Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application
+
+**Troubleshooting common error #1067**
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
- --config C:\"Program Files"\Telegraf\telegraf.conf
+ --config "C:\Program Files\Telegraf\telegraf.conf"
diff --git a/etc/telegraf.conf b/etc/telegraf.conf
index 4c3de469c1cda..b44f41addcf83 100644
--- a/etc/telegraf.conf
+++ b/etc/telegraf.conf
@@ -9,9 +9,9 @@
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
-# Environment variables can be used anywhere in this config file, simply prepend
-# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
-# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
+# Environment variables can be used anywhere in this config file, simply surround
+# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
+# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Global tags can be specified here in key="value" format.
@@ -35,10 +35,9 @@
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
- ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
- ## output, and will flush this buffer on a successful write. Oldest metrics
- ## are dropped first when this buffer fills.
- ## This buffer only fills when writes fail to output plugin(s).
+ ## Maximum number of unwritten metrics per output. Increasing this value
+ ## allows for longer periods of output downtime without dropping metrics at the
+ ## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
@@ -64,13 +63,32 @@
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
- ## Logging configuration:
- ## Run telegraf with debug log messages.
- debug = false
- ## Run telegraf in quiet mode (error log messages only).
- quiet = false
- ## Specify the log file name. The empty string means to log to stderr.
- logfile = ""
+ ## Log at debug level.
+ # debug = false
+ ## Log only error level messages.
+ # quiet = false
+
+ ## Log target controls the destination for logs and can be one of "file",
+ ## "stderr" or, on Windows, "eventlog". When set to "file", the output file
+ ## is determined by the "logfile" setting.
+ # logtarget = "file"
+
+ ## Name of the file to be logged to when using the "file" logtarget. If set to
+ ## the empty string then logs are written to stderr.
+ # logfile = ""
+
+ ## The logfile will be rotated after the time interval specified. When set
+ ## to 0 no time based rotation is performed. Logs are rotated only when
+ ## written to, if there is no log activity rotation may be delayed.
+ # logfile_rotation_interval = "0d"
+
+ ## The logfile will be rotated when it becomes larger than the specified
+ ## size. When set to 0 no size based rotation is performed.
+ # logfile_rotation_max_size = "0MB"
+
+ ## Maximum number of rotated archives to keep, any older logs are deleted.
+ ## If set to -1, no archives are removed.
+ # logfile_rotation_max_archives = 5
## Override default hostname, if empty use os.Hostname()
hostname = ""
@@ -82,6 +100,7 @@
# OUTPUT PLUGINS #
###############################################################################
+
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
@@ -100,6 +119,9 @@
## tag is not set the 'database' option is used as the default.
# database_tag = ""
+ ## If true, the 'database_tag' will not be included in the written metric.
+ # exclude_database_tag = false
+
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
@@ -109,6 +131,13 @@
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
+ ## The value of this tag will be used to determine the retention policy. If this
+ ## tag is not set the 'retention_policy' option is used as the default.
+ # retention_policy_tag = ""
+
+ ## If true, the 'retention_policy_tag' will not be included in the written metric.
+ # exclude_retention_policy_tag = false
+
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
@@ -186,14 +215,14 @@
# # exchange_type = "topic"
#
# ## If true, exchange will be passively declared.
-# # exchange_declare_passive = false
+# # exchange_passive = false
#
# ## Exchange durability can be either "transient" or "durable".
# # exchange_durability = "durable"
#
# ## Additional exchange arguments.
# # exchange_arguments = { }
-# # exchange_arguments = {"hash_propery" = "timestamp"}
+# # exchange_arguments = {"hash_property" = "timestamp"}
#
# ## Authentication credentials for the PLAIN auth_method.
# # username = ""
@@ -245,6 +274,14 @@
# ## Recommended to set to true.
# # use_batch_format = false
#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# ##
+# ## Please note that when use_batch_format = false each amqp message contains only
+# ## a single metric, it is recommended to use compression with batch format
+# ## for best results.
+# # content_encoding = "identity"
+#
# ## Data format to output.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -350,7 +387,7 @@
# # base64_data = false
#
# ## Optional. PubSub attributes to add to metrics.
-# # [[inputs.pubsub.attributes]]
+# # [outputs.cloud_pubsub.attributes]
# # my_attr = "tag_value"
@@ -390,6 +427,9 @@
# ## You could use basicstats aggregator to calculate those fields. If not all statistic
# ## fields are available, all fields would still be sent as raw metrics.
# # write_statistics = false
+#
+# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
+# # high_resolution_metrics = false
# # Configuration for CrateDB to send metrics to.
@@ -408,13 +448,13 @@
# # Configuration for DataDog API to send metrics to.
# [[outputs.datadog]]
# ## Datadog API key
-# apikey = "my-secret-key" # required.
-#
-# # The base endpoint URL can optionally be specified but it defaults to:
-# #url = "https://app.datadoghq.com/api/v1/series"
+# apikey = "my-secret-key"
#
# ## Connection timeout.
# # timeout = "5s"
+#
+# ## Write URL override; useful for debugging.
+# # url = "https://app.datadoghq.com/api/v1/series"
# # Send metrics to nowhere at all
@@ -436,7 +476,7 @@
# ## Set the interval to check if the Elasticsearch nodes are available
# ## Setting to "0s" will disable the health check (not recommended in production)
# health_check_interval = "10s"
-# ## HTTP basic authentication details (eg. when using Shield)
+# ## HTTP basic authentication details
# # username = "telegraf"
# # password = "mypassword"
#
@@ -472,6 +512,38 @@
# template_name = "telegraf"
# ## Set to true if you want telegraf to overwrite an existing template
# overwrite_template = false
+# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
+# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
+# force_document_id = false
+
+# # Send metrics to command as input over stdin
+# [[outputs.exec]]
+# ## Command to ingest metrics via stdin.
+# command = ["tee", "-a", "/dev/null"]
+#
+# ## Timeout for command to complete.
+# # timeout = "5s"
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+
+
+# # Run executable as long-running output plugin
+# [[outputs.execd]]
+# ## Program to run as daemon
+# command = ["my-telegraf-output", "--some-flag", "value"]
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+#
+# ## Data format to export.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
# # Send telegraf metrics to file(s)
@@ -479,6 +551,23 @@
# ## Files to write to, "stdout" is a specially handled file.
# files = ["stdout", "/tmp/metrics.out"]
#
+# ## Use batch serialization format instead of line based delimiting. The
+# ## batch format allows for the production of non line based output formats and
+# ## may more efficiently encode metric groups.
+# # use_batch_format = false
+#
+# ## The file will be rotated after the time interval specified. When set
+# ## to 0 no time based rotation is performed.
+# # rotation_interval = "0d"
+#
+# ## The logfile will be rotated when it becomes larger than the specified
+# ## size. When set to 0 no size based rotation is performed.
+# # rotation_max_size = "0MB"
+#
+# ## Maximum number of rotated archives to keep, any older logs are deleted.
+# ## If set to -1, no archives are removed.
+# # rotation_max_archives = 5
+#
# ## Data format to output.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -501,6 +590,19 @@
# ## Enable Graphite tags support
# # graphite_tag_support = false
#
+# ## Character for separating metric name and field for Graphite tags
+# # graphite_separator = "."
+#
+# ## Graphite templates patterns
+# ## 1. Template for cpu
+# ## 2. Template for disk*
+# ## 3. Default template
+# # templates = [
+# # "cpu tags.measurement.host.field",
+# # "disk* measurement.field",
+# # "host.measurement.tags.field"
+# #]
+#
# ## timeout in seconds for the write connection to graphite
# timeout = 2
#
@@ -512,16 +614,61 @@
# # insecure_skip_verify = false
-# # Send telegraf metrics to graylog(s)
+# # Send telegraf metrics to graylog
# [[outputs.graylog]]
# ## UDP endpoint for your graylog instance.
-# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
+# servers = ["127.0.0.1:12201"]
+#
+# ## The field to use as the GELF short_message, if unset the static string
+# ## "telegraf" will be used.
+# ## example: short_message_field = "message"
+# # short_message_field = ""
+
+
+# # Configurable HTTP health check resource based on metrics
+# [[outputs.health]]
+# ## Address and port to listen on.
+# ## ex: service_address = "http://localhost:8080"
+# ## service_address = "unix:///var/run/telegraf-health.sock"
+# # service_address = "http://:8080"
+#
+# ## The maximum duration for reading the entire request.
+# # read_timeout = "5s"
+# ## The maximum duration for writing the entire response.
+# # write_timeout = "5s"
+#
+# ## Username and password to accept for HTTP basic authentication.
+# # basic_username = "user1"
+# # basic_password = "secret"
+#
+# ## Allowed CA certificates for client certificates.
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## TLS server certificate and private key.
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## One or more check sub-tables should be defined, it is also recommended to
+# ## use metric filtering to limit the metrics that flow into this output.
+# ##
+# ## When using the default buffer sizes, this example will fail when the
+# ## metric buffer is half full.
+# ##
+# ## namepass = ["internal_write"]
+# ## tagpass = { output = ["influxdb"] }
+# ##
+# ## [[outputs.health.compares]]
+# ## field = "buffer_size"
+# ## lt = 5000.0
+# ##
+# ## [[outputs.health.contains]]
+# ## field = "buffer_size"
# # A plugin that can transmit metrics over HTTP
# [[outputs.http]]
# ## URL is the address to send metrics to
-# url = "http://127.0.0.1:8080/metric"
+# url = "http://127.0.0.1:8080/telegraf"
#
# ## Timeout for HTTP message
# # timeout = "5s"
@@ -552,14 +699,14 @@
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# # data_format = "influx"
#
+# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+# ## compress body or "identity" to apply no encoding.
+# # content_encoding = "identity"
+#
# ## Additional HTTP headers
# # [outputs.http.headers]
# # # Should be set manually to "application/json" for json data_format
# # Content-Type = "text/plain; charset=utf-8"
-#
-# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
-# ## compress body or "identity" to apply no encoding.
-# # content_encoding = "identity"
# # Configuration for sending metrics to InfluxDB
@@ -568,6 +715,7 @@
# ##
# ## Multiple URLs can be specified for a single cluster, only ONE of the
# ## urls will be written to each interval.
+# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
# urls = ["http://127.0.0.1:9999"]
#
# ## Token for authentication.
@@ -583,6 +731,9 @@
# ## tag is not set the 'bucket' option is used as the default.
# # bucket_tag = ""
#
+# ## If true, the bucket tag will not be added to the metric.
+# # exclude_bucket_tag = false
+#
# ## Timeout for HTTP messages.
# # timeout = "5s"
#
@@ -622,7 +773,7 @@
# template = "host.tags.measurement.field"
# ## Timeout in seconds to connect
# timeout = "2s"
-# ## Display Communcation to Instrumental
+# ## Display Communication to Instrumental
# debug = false
@@ -633,6 +784,13 @@
# ## Kafka topic for producer messages
# topic = "telegraf"
#
+# ## The value of this tag will be used as the topic. If not set the 'topic'
+# ## option is used.
+# # topic_tag = ""
+#
+# ## If true, the 'topic_tag' will be removed from to the metric.
+# # exclude_topic_tag = false
+#
# ## Optional Client id
# # client_id = "Telegraf"
#
@@ -669,13 +827,21 @@
# # keys = ["foo", "bar"]
# # separator = "_"
#
-# ## Telegraf tag to use as a routing key
-# ## ie, if this tag exists, its value will be used as the routing key
+# ## The routing tag specifies a tagkey on the metric whose value is used as
+# ## the message key. The message key is used to determine which partition to
+# ## send the message to. This tag is prefered over the routing_key option.
# routing_tag = "host"
#
-# ## Static routing key. Used when no routing_tag is set or as a fallback
-# ## when the tag specified in routing tag is not found. If set to "random",
-# ## a random value will be generated for each message.
+# ## The routing key is set as the message key and used to determine which
+# ## partition to send the message to. This value is only used when no
+# ## routing_tag is set or as a fallback when the tag specified in routing tag
+# ## is not found.
+# ##
+# ## If set to "random", a random value will be generated for each message.
+# ##
+# ## When unset, no message key is added and each message is routed to a random
+# ## partition.
+# ##
# ## ex: routing_key = "random"
# ## routing_key = "telegraf"
# # routing_key = ""
@@ -713,6 +879,7 @@
# # max_message_bytes = 1000000
#
# ## Optional TLS Config
+# # enable_tls = true
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
@@ -723,6 +890,9 @@
# # sasl_username = "kafka"
# # sasl_password = "secret"
#
+# ## SASL protocol version. When connecting to Azure EventHub set to 0.
+# # sasl_version = 1
+#
# ## Data format to output.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -760,7 +930,7 @@
# streamname = "StreamName"
# ## DEPRECATED: PartitionKey as used for sharding data.
# partitionkey = "PartitionKey"
-# ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
+# ## DEPRECATED: If set the partitionKey will be a random UUID on every put.
# ## This allows for scaling across multiple shards in a stream.
# ## This will cause issues with ordering.
# use_random_partitionkey = false
@@ -799,7 +969,7 @@
# # Configuration for Librato API to send metrics to.
# [[outputs.librato]]
-# ## Librator API Docs
+# ## Librato API Docs
# ## http://dev.librato.com/v1/metrics-authentication
# ## Librato API user
# api_user = "telegraf@influxdb.com" # required.
@@ -867,12 +1037,20 @@
# [[outputs.nats]]
# ## URLs of NATS servers
# servers = ["nats://localhost:4222"]
+#
# ## Optional credentials
# # username = ""
# # password = ""
+#
+# ## Optional NATS 2.0 and NATS NGS compatible user credentials
+# # credentials = "/etc/telegraf/nats.creds"
+#
# ## NATS subject for producer messages
# subject = "telegraf"
#
+# ## Use Transport Layer Security
+# # secure = false
+#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
@@ -887,6 +1065,18 @@
# data_format = "influx"
+# # Send metrics to New Relic metrics endpoint
+# [[outputs.newrelic]]
+# ## New Relic Insights API key
+# insights_key = "insights api key"
+#
+# ## Prefix to add to add to metric name for easy identification.
+# # metric_prefix = ""
+#
+# ## Timeout for writes to the New Relic API.
+# # timeout = "15s"
+
+
# # Send telegraf measurements to NSQD
# [[outputs.nsq]]
# ## Location of nsqd instance listening on TCP
@@ -934,6 +1124,14 @@
# ## Address to listen on
# listen = ":9273"
#
+# ## Metric version controls the mapping from Telegraf metrics into
+# ## Prometheus format. When using the prometheus input, use the same value in
+# ## both plugins to ensure metrics are round-tripped without modification.
+# ##
+# ## example: metric_version = 1; deprecated in 1.13
+# ## metric_version = 2; recommended version
+# # metric_version = 1
+#
# ## Use HTTP Basic Authentication.
# # basic_username = "Foo"
# # basic_password = "Bar"
@@ -1039,6 +1237,11 @@
# ## Defaults to the OS configuration.
# # keep_alive_period = "5m"
#
+# ## Content encoding for packet-based connections (i.e. UDP, unixgram).
+# ## Can be set to "gzip" or to "identity" to apply no encoding.
+# ##
+# # content_encoding = "identity"
+#
# ## Data format to generate.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -1057,13 +1260,175 @@
# ## Custom resource type
# # resource_type = "generic_node"
#
-# ## Additonal resource labels
+# ## Additional resource labels
# # [outputs.stackdriver.resource_labels]
# # node_id = "$HOSTNAME"
# # namespace = "myapp"
# # location = "eu-north0"
+# # A plugin that can transmit metrics to Sumo Logic HTTP Source
+# [[outputs.sumologic]]
+# ## Unique URL generated for your HTTP Metrics Source.
+# ## This is the address to send metrics to.
+# # url = "https://events.sumologic.net/receiver/v1/http/"
+#
+# ## Data format to be used for sending metrics.
+# ## This will set the "Content-Type" header accordingly.
+# ## Currently supported formats:
+# ## * graphite - for Content-Type of application/vnd.sumologic.graphite
+# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2
+# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus
+# ##
+# ## More information can be found at:
+# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics
+# ##
+# ## NOTE:
+# ## When unset, telegraf will by default use the influx serializer which is currently unsupported
+# ## in HTTP Source.
+# data_format = "carbon2"
+#
+# ## Timeout used for HTTP request
+# # timeout = "5s"
+#
+# ## HTTP method, one of: "POST" or "PUT". "POST" is used by default if unset.
+# # method = "POST"
+#
+# ## Max HTTP request body size in bytes before compression (if applied).
+# ## By default 1MB is recommended.
+# ## NOTE:
+# ## Bear in mind that in some serializer a metric even though serialized to multiple
+# ## lines cannot be split any further so setting this very low might not work
+# ## as expected.
+# # max_request_body_size = 1_000_000
+#
+# ## Additional, Sumo specific options.
+# ## Full list can be found here:
+# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers
+#
+# ## Desired source name.
+# ## Useful if you want to override the source name configured for the source.
+# # source_name = ""
+#
+# ## Desired host name.
+# ## Useful if you want to override the source host configured for the source.
+# # source_host = ""
+#
+# ## Desired source category.
+# ## Useful if you want to override the source category configured for the source.
+# # source_category = ""
+#
+# ## Comma-separated key=value list of dimensions to apply to every metric.
+# ## Custom dimensions will allow you to query your metrics at a more granular level.
+# # dimensions = ""
+
+
+# # Configuration for Syslog server to send metrics to
+# [[outputs.syslog]]
+# ## URL to connect to
+# ## ex: address = "tcp://127.0.0.1:8094"
+# ## ex: address = "tcp4://127.0.0.1:8094"
+# ## ex: address = "tcp6://127.0.0.1:8094"
+# ## ex: address = "tcp6://[2001:db8::1]:8094"
+# ## ex: address = "udp://127.0.0.1:8094"
+# ## ex: address = "udp4://127.0.0.1:8094"
+# ## ex: address = "udp6://127.0.0.1:8094"
+# address = "tcp://127.0.0.1:8094"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Period between keep alive probes.
+# ## Only applies to TCP sockets.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# # keep_alive_period = "5m"
+#
+# ## The framing technique with which it is expected that messages are
+# ## transported (default = "octet-counting"). Whether the messages come
+# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
+# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
+# ## be one of "octet-counting", "non-transparent".
+# # framing = "octet-counting"
+#
+# ## The trailer to be expected in case of non-transparent framing (default = "LF").
+# ## Must be one of "LF", or "NUL".
+# # trailer = "LF"
+#
+# ## SD-PARAMs settings
+# ## Syslog messages can contain key/value pairs within zero or more
+# ## structured data sections. For each unrecognized metric tag/field a
+# ## SD-PARAMS is created.
+# ##
+# ## Example:
+# ## [[outputs.syslog]]
+# ## sdparam_separator = "_"
+# ## default_sdid = "default@32473"
+# ## sdids = ["foo@123", "bar@456"]
+# ##
+# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
+# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
+#
+# ## SD-PARAMs separator between the sdid and tag/field key (default = "_")
+# # sdparam_separator = "_"
+#
+# ## Default sdid used for tags/fields that don't contain a prefix defined in
+# ## the explicit sdids setting below If no default is specified, no SD-PARAMs
+# ## will be used for unrecognized field.
+# # default_sdid = "default@32473"
+#
+# ## List of explicit prefixes to extract from tag/field keys and use as the
+# ## SDID, if they match (see above example for more details):
+# # sdids = ["foo@123", "bar@456"]
+#
+# ## Default severity value. Severity and Facility are used to calculate the
+# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field
+# ## with key "severity_code" is defined. If unset, 5 (notice) is the default
+# # default_severity_code = 5
+#
+# ## Default facility value. Facility and Severity are used to calculate the
+# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with
+# ## key "facility_code" is defined. If unset, 1 (user-level) is the default
+# # default_facility_code = 1
+#
+# ## Default APP-NAME value (RFC5424#section-6.2.5)
+# ## Used when no metric tag with key "appname" is defined.
+# ## If unset, "Telegraf" is the default
+# # default_appname = "Telegraf"
+
+
+# # Write metrics to Warp 10
+# [[outputs.warp10]]
+# # Prefix to add to the measurement.
+# prefix = "telegraf."
+#
+# # URL of the Warp 10 server
+# warp_url = "http://localhost:8080"
+#
+# # Write token to access your app on warp 10
+# token = "Token"
+#
+# # Warp 10 query timeout
+# # timeout = "15s"
+#
+# ## Print Warp 10 error body
+# # print_error_body = false
+#
+# ## Max string error size
+# # max_string_error_size = 511
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
# # Configuration for Wavefront server to send metrics to
# [[outputs.wavefront]]
# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy
@@ -1092,6 +1457,10 @@
# ## When true will convert all _ (underscore) characters in final metric name. default is true
# #convert_paths = true
#
+# ## Use Strict rules to sanitize metric and tag names from invalid characters
+# ## When enabled forward slash (/) and comma (,) will be accepted
+# #use_strict = false
+#
# ## Use Regex to sanitize metric and tag names from invalid characters
# ## Regex is more thorough, but significantly slower. default is false
# #use_regex = false
@@ -1102,6 +1471,10 @@
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
# #convert_bool = true
#
+# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any
+# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility.
+# #truncate_tags = false
+#
# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
# ## deprecated in 1.9; use the enum processor plugin
# #[[outputs.wavefront.string_to_number.elasticsearch]]
@@ -1110,11 +1483,23 @@
# # red = 0.0
-
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
+
+# # Clone metrics and apply modifications.
+# [[processors.clone]]
+# ## All modifications on inputs and aggregators can be overridden:
+# # name_override = "new_name"
+# # name_prefix = "new_name_prefix"
+# # name_suffix = "new_name_suffix"
+#
+# ## Tags to be added (all values must be strings)
+# # [processors.clone.tags]
+# # additional_tag = "tag_value"
+
+
# # Convert values to another metric value type
# [[processors.converter]]
# ## Tags to convert
@@ -1123,6 +1508,7 @@
# ## select the keys to convert. The array may contain globs.
# ## = [...]
# [processors.converter.tags]
+# measurement = []
# string = []
# integer = []
# unsigned = []
@@ -1135,6 +1521,7 @@
# ## select the keys to convert. The array may contain globs.
# ## = [...]
# [processors.converter.fields]
+# measurement = []
# tag = []
# string = []
# integer = []
@@ -1143,15 +1530,67 @@
# float = []
+# # Dates measurements, tags, and fields that pass through this filter.
+# [[processors.date]]
+# ## New tag to create
+# tag_key = "month"
+#
+# ## New field to create (cannot set both field_key and tag_key)
+# # field_key = "month"
+#
+# ## Date format string, must be a representation of the Go "reference time"
+# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
+# date_format = "Jan"
+#
+# ## If destination is a field, date format can also be one of
+# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
+# # date_format = "unix"
+#
+# ## Offset duration added to the date string when writing the new tag.
+# # date_offset = "0s"
+#
+# ## Timezone to use when creating the tag or field using a reference time
+# ## string. This can be set to one of "UTC", "Local", or to a location name
+# ## in the IANA Time Zone database.
+# ## example: timezone = "America/Los_Angeles"
+# # timezone = "UTC"
+
+
+# # Filter metrics with repeating field values
+# [[processors.dedup]]
+# ## Maximum time to suppress output
+# dedup_interval = "600s"
+
+
+# # Defaults sets default value(s) for specified fields that are not set on incoming metrics.
+# [[processors.defaults]]
+# ## Ensures a set of fields always exists on your metric(s) with their
+# ## respective default value.
+# ## For any given field pair (key = default), if it's not set, a field
+# ## is set on the metric with the specified default.
+# ##
+# ## A field is considered not set if it is nil on the incoming metric;
+# ## or it is not nil but its value is an empty string or is a string
+# ## of one or more spaces.
+# ## =
+# # [processors.defaults.fields]
+# # field_1 = "bar"
+# # time_idle = 0
+# # is_error = true
+
+
# # Map enum values according to given table.
# [[processors.enum]]
# [[processors.enum.mapping]]
# ## Name of the field to map
# field = "status"
#
-# ## Destination field to be used for the mapped value. By default the source
-# ## field is used, overwriting the original value.
-# # dest = "status_code"
+# ## Name of the tag to map
+# # tag = "status"
+#
+# ## Destination tag or field to be used for the mapped value. By default the
+# ## source tag or field is used, overwriting the original value.
+# dest = "status_code"
#
# ## Default value to be used for all values not contained in the mapping
# ## table. When unset, the unmodified value for the field will be used if no
@@ -1161,10 +1600,112 @@
# ## Table of mappings
# [processors.enum.mapping.value_mappings]
# green = 1
-# yellow = 2
+# amber = 2
# red = 3
+# # Run executable as long-running processor plugin
+# [[processors.execd]]
+# ## Program to run as daemon
+# ## eg: command = ["/path/to/your_program", "arg1", "arg2"]
+# command = ["cat"]
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+
+
+# # Performs file path manipulations on tags and fields
+# [[processors.filepath]]
+# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
+# # [[processors.filepath.basename]]
+# # tag = "path"
+# # dest = "basepath"
+#
+# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory
+# # [[processors.filepath.dirname]]
+# # field = "path"
+#
+# ## Treat the tag value as a path, converting it to its the last element without its suffix
+# # [[processors.filepath.stem]]
+# # tag = "path"
+#
+# ## Treat the tag value as a path, converting it to the shortest path name equivalent
+# ## to path by purely lexical processing
+# # [[processors.filepath.clean]]
+# # tag = "path"
+#
+# ## Treat the tag value as a path, converting it to a relative path that is lexically
+# ## equivalent to the source path when joined to 'base_path'
+# # [[processors.filepath.rel]]
+# # tag = "path"
+# # base_path = "/var/log"
+#
+# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
+# ## effect on Windows
+# # [[processors.filepath.toslash]]
+# # tag = "path"
+
+
+# # Add a tag of the network interface name looked up over SNMP by interface number
+# [[processors.ifname]]
+# ## Name of tag holding the interface number
+# # tag = "ifIndex"
+#
+# ## Name of output tag where service name will be added
+# # dest = "ifName"
+#
+# ## Name of tag of the SNMP agent to request the interface name from
+# # agent = "agent"
+#
+# ## Timeout for each request.
+# # timeout = "5s"
+#
+# ## SNMP version; can be 1, 2, or 3.
+# # version = 2
+#
+# ## SNMP community string.
+# # community = "public"
+#
+# ## Number of retries to attempt.
+# # retries = 3
+#
+# ## The GETBULK max-repetitions parameter.
+# # max_repetitions = 10
+#
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA", or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Context Name.
+# # context_name = ""
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+#
+# ## max_parallel_lookups is the maximum number of SNMP requests to
+# ## make at the same time.
+# # max_parallel_lookups = 100
+#
+# ## ordered controls whether or not the metrics need to stay in the
+# ## same order this plugin received them in. If false, this plugin
+# ## may change the order when data is cached. If you need metrics to
+# ## stay in order set this to true. keeping the metrics ordered may
+# ## be slightly slower
+# # ordered = false
+#
+# ## cache_ttl is the amount of time interface names are cached for a
+# ## given agent. After this period elapses if names are needed they
+# ## will be retrieved again.
+# # cache_ttl = "8h"
+
+
# # Apply metric modifications using override semantics.
# [[processors.override]]
# ## All modifications on inputs and aggregators can be overridden:
@@ -1196,6 +1737,27 @@
# data_format = "influx"
+# # Rotate a single valued metric into a multi field metric
+# [[processors.pivot]]
+# ## Tag to use for naming the new field.
+# tag_key = "name"
+# ## Field to use as the value of the new field.
+# value_key = "value"
+
+
+# # Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file
+# [[processors.port_name]]
+# [[processors.port_name]]
+# ## Name of tag holding the port number
+# # tag = "port"
+#
+# ## Name of output tag where service name will be added
+# # dest = "service"
+#
+# ## Default tcp or udp
+# # default_protocol = "tcp"
+
+
# # Print all metrics that pass through this filter.
# [[processors.printer]]
@@ -1208,10 +1770,12 @@
# # key = "resp_code"
# # ## Regular expression to match on a tag value
# # pattern = "^(\\d)\\d\\d$"
-# # ## Pattern for constructing a new value (${1} represents first subgroup)
+# # ## Matches of the pattern will be replaced with this string. Use ${1}
+# # ## notation to use the text of the first submatch.
# # replacement = "${1}xx"
#
# # [[processors.regex.fields]]
+# # ## Field to change
# # key = "request"
# # ## All the power of the Go regular expressions available here
# # ## For example, named subgroups
@@ -1234,6 +1798,84 @@
# [[processors.rename]]
+# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name
+# [[processors.reverse_dns]]
+# ## For optimal performance, you may want to limit which metrics are passed to this
+# ## processor. eg:
+# ## namepass = ["my_metric_*"]
+#
+# ## cache_ttl is how long the dns entries should stay cached for.
+# ## generally longer is better, but if you expect a large number of diverse lookups
+# ## you'll want to consider memory use.
+# cache_ttl = "24h"
+#
+# ## lookup_timeout is how long should you wait for a single dns request to repsond.
+# ## this is also the maximum acceptable latency for a metric travelling through
+# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
+# ## be passed on unaltered.
+# ## multiple simultaneous resolution requests for the same IP will only make a
+# ## single rDNS request, and they will all wait for the answer for this long.
+# lookup_timeout = "3s"
+#
+# ## max_parallel_lookups is the maximum number of dns requests to be in flight
+# ## at the same time. Requesting hitting cached values do not count against this
+# ## total, and neither do mulptiple requests for the same IP.
+# ## It's probably best to keep this number fairly low.
+# max_parallel_lookups = 10
+#
+# ## ordered controls whether or not the metrics need to stay in the same order
+# ## this plugin received them in. If false, this plugin will change the order
+# ## with requests hitting cached results moving through immediately and not
+# ## waiting on slower lookups. This may cause issues for you if you are
+# ## depending on the order of metrics staying the same. If so, set this to true.
+# ## keeping the metrics ordered may be slightly slower.
+# ordered = false
+#
+# [[processors.reverse_dns.lookup]]
+# ## get the ip from the field "source_ip", and put the result in the field "source_name"
+# field = "source_ip"
+# dest = "source_name"
+#
+# [[processors.reverse_dns.lookup]]
+# ## get the ip from the tag "destination_ip", and put the result in the tag
+# ## "destination_name".
+# tag = "destination_ip"
+# dest = "destination_name"
+#
+# ## If you would prefer destination_name to be a field instead, you can use a
+# ## processors.converter after this one, specifying the order attribute.
+
+
+# # Add the S2 Cell ID as a tag based on latitude and longitude fields
+# [[processors.s2geo]]
+# ## The name of the lat and lon fields containing WGS-84 latitude and
+# ## longitude in decimal degrees.
+# # lat_field = "lat"
+# # lon_field = "lon"
+#
+# ## New tag to create
+# # tag_key = "s2_cell_id"
+#
+# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
+# # cell_level = 9
+
+
+# # Process metrics using a Starlark script
+# [[processors.starlark]]
+# ## The Starlark source can be set as a string in this configuration file, or
+# ## by referencing a file containing the script. Only one source or script
+# ## should be set at once.
+# ##
+# ## Source of the Starlark script.
+# source = '''
+# def apply(metric):
+# return metric
+# '''
+#
+# ## File containing a Starlark script.
+# # script = "/usr/local/bin/myscript.star"
+
+
# # Perform string processing on tags, fields, and measurements
# [[processors.strings]]
# ## Convert a tag value to uppercase
@@ -1245,6 +1887,10 @@
# # field = "uri_stem"
# # dest = "uri_stem_normalised"
#
+# ## Convert a field value to titlecase
+# # [[processors.strings.titlecase]]
+# # field = "status"
+#
# ## Trim leading and trailing whitespace using the default cutset
# # [[processors.strings.trim]]
# # field = "message"
@@ -1274,6 +1920,35 @@
# # measurement = "*"
# # old = ":"
# # new = "_"
+#
+# ## Trims strings based on width
+# # [[processors.strings.left]]
+# # field = "message"
+# # width = 10
+#
+# ## Decode a base64 encoded utf-8 string
+# # [[processors.strings.base64decode]]
+# # field = "message"
+
+
+# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
+# [[processors.tag_limit]]
+# ## Maximum number of tags to preserve
+# limit = 10
+#
+# ## List of tags to preferentially preserve
+# keep = ["foo", "bar", "baz"]
+
+
+# # Uses a Go template to create a new tag
+# [[processors.template]]
+# ## Tag to set with the output of the template.
+# tag = "topic"
+#
+# ## Go template used to create the tag value. In order to ease TOML
+# ## escaping requirements, you may wish to use single quotes around the
+# ## template string.
+# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'
# # Print all metrics that pass through this filter.
@@ -1315,7 +1990,7 @@
# # add_rank_fields = []
#
# ## These settings provide a way to know what values the plugin is generating
-# ## when aggregating metrics. The 'add_agregate_field' setting allows to
+# ## when aggregating metrics. The 'add_aggregate_field' setting allows to
# ## specify for which fields the final aggregation value is required. If the
# ## list is non empty, then a field will be added to each every metric for
# ## each field present in this setting. This field will contain
@@ -1326,15 +2001,24 @@
# # add_aggregate_fields = []
+# # Rotate multi field metric into several single field metrics
+# [[processors.unpivot]]
+# ## Tag to use for the name.
+# tag_key = "name"
+# ## Field to use for the name of the value.
+# value_key = "value"
+
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
+
# # Keep the aggregate basicstats of each metric passing through.
# [[aggregators.basicstats]]
# ## The period on which to flush & clear the aggregator.
# period = "30s"
+#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
@@ -1343,6 +2027,18 @@
# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
+# # Report the final metric of a series
+# [[aggregators.final]]
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+#
+# ## The time that a series is not updated until considering it final.
+# series_timeout = "5m"
+
+
# # Create aggregate histograms.
# [[aggregators.histogram]]
# ## The period in which to flush the aggregator.
@@ -1352,16 +2048,24 @@
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
#
+# ## If true, the histogram will be reset on flush instead
+# ## of accumulating the results.
+# reset = false
+#
+# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
+# ## Defaults to true.
+# cumulative = true
+#
# ## Example config that aggregates all fields of the metric.
# # [[aggregators.histogram.config]]
-# # ## The set of buckets.
+# # ## Right borders of buckets (with +Inf implicitly added).
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# # ## The name of metric.
# # measurement_name = "cpu"
#
# ## Example config that aggregates only specific fields of the metric.
# # [[aggregators.histogram.config]]
-# # ## The set of buckets.
+# # ## Right borders of buckets (with +Inf implicitly added).
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# # ## The name of metric.
# # measurement_name = "diskio"
@@ -1369,6 +2073,13 @@
# # fields = ["io_time", "read_time", "write_time"]
+# # Merge metrics into multifield metrics by series key
+# [[aggregators.merge]]
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = true
+
+
# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
@@ -1391,11 +2102,11 @@
# fields = []
-
###############################################################################
# INPUT PLUGINS #
###############################################################################
+
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
@@ -1415,7 +2126,7 @@
# mount_points = ["/"]
## Ignore mount points by filesystem type.
- ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
+ ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
# Read metrics about disk IO by device
@@ -1469,15 +2180,18 @@
# Read metrics about system load & uptime
[[inputs.system]]
- # no configuration
+ ## Uncomment to remove deprecated metrics.
+ # fielddrop = ["uptime_format"]
# # Gather ActiveMQ metrics
# [[inputs.activemq]]
-# ## Required ActiveMQ Endpoint
-# # server = "192.168.50.10"
+# ## ActiveMQ WebConsole URL
+# url = "http://127.0.0.1:8161"
#
-# ## Required ActiveMQ port
+# ## Required ActiveMQ Endpoint
+# ## deprecated in 1.11; use the url option
+# # server = "127.0.0.1"
# # port = 8161
#
# ## Credentials for basic HTTP authentication
@@ -1495,6 +2209,7 @@
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
# # Read stats from aerospike server(s)
@@ -1538,6 +2253,16 @@
# # insecure_skip_verify = false
+# # Monitor APC UPSes connected to apcupsd
+# [[inputs.apcupsd]]
+# # A list of running apcupsd server to connect to.
+# # If not provided will default to tcp://127.0.0.1:3551
+# servers = ["tcp://127.0.0.1:3551"]
+#
+# ## Timeout for dialing server.
+# timeout = "5s"
+
+
# # Gather metrics from Apache Aurora schedulers
# [[inputs.aurora]]
# ## Schedulers are the base addresses of your Aurora Schedulers
@@ -1564,6 +2289,18 @@
# # insecure_skip_verify = false
+# # Gather Azure Storage Queue metrics
+# [[inputs.azure_storage_queue]]
+# ## Required Azure Storage Account name
+# account_name = "mystorageaccount"
+#
+# ## Required Azure Storage Account access key
+# account_key = "storageaccountaccesskey"
+#
+# ## Set to false to disable peeking age of oldest message (executes faster)
+# # peek_oldest_message_age = true
+
+
# # Read metrics of bcache from stats_total and dirty_data
# [[inputs.bcache]]
# ## Bcache sets path
@@ -1586,6 +2323,15 @@
# tubes = ["notifications"]
+# # Read BIND nameserver XML statistics
+# [[inputs.bind]]
+# ## An array of BIND XML statistics URI to gather stats.
+# ## Default is "http://localhost:8053/xml/v3".
+# # urls = ["http://localhost:8053/xml/v3"]
+# # gather_memory_contexts = false
+# # gather_views = false
+
+
# # Collect bond interface status, slaves statuses and failures count
# [[inputs.bond]]
# ## Sets 'proc' directory path
@@ -1641,7 +2387,7 @@
# # insecure_skip_verify = false
-# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
+# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster.
# [[inputs.ceph]]
# ## This is the recommended interval to poll. Too frequent and you will lose
# ## data points due to timeouts during rebalancing and recovery
@@ -1658,6 +2404,8 @@
# ## prefix of MON and OSD socket files, used to determine socket type
# mon_prefix = "ceph-mon"
# osd_prefix = "ceph-osd"
+# mds_prefix = "ceph-mds"
+# rgw_prefix = "ceph-client"
#
# ## suffix used to identify socket files
# socket_suffix = "asok"
@@ -1710,12 +2458,12 @@
# ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
-# #access_key = ""
-# #secret_key = ""
-# #token = ""
-# #role_arn = ""
-# #profile = ""
-# #shared_credential_file = ""
+# # access_key = ""
+# # secret_key = ""
+# # token = ""
+# # role_arn = ""
+# # profile = ""
+# # shared_credential_file = ""
#
# ## Endpoint to make request against, the correct endpoint is automatically
# ## determined and this option should only be set if you wish to override the
@@ -1741,27 +2489,38 @@
# interval = "5m"
#
# ## Configure the TTL for the internal cache of metrics.
-# ## Defaults to 1 hr if not specified
-# #cache_ttl = "10m"
+# # cache_ttl = "1h"
#
# ## Metric Statistic Namespace (required)
# namespace = "AWS/ELB"
#
# ## Maximum requests per second. Note that the global default AWS rate limit is
-# ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
-# ## maximum of 400. Optional - default value is 200.
+# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
+# ## maximum of 50.
# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
-# ratelimit = 200
+# # ratelimit = 25
+#
+# ## Timeout for http requests made by the cloudwatch client.
+# # timeout = "5s"
+#
+# ## Namespace-wide statistic filters. These allow fewer queries to be made to
+# ## cloudwatch.
+# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+# # statistic_exclude = []
#
-# ## Metrics to Pull (optional)
+# ## Metrics to Pull
# ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h
# #[[inputs.cloudwatch.metrics]]
# # names = ["Latency", "RequestCount"]
# #
-# # ## Dimension filters for Metric. These are optional however all dimensions
-# # ## defined for the metric names must be specified in order to retrieve
-# # ## the metric statistics.
+# # ## Statistic filters for Metric. These allow for retrieving specific
+# # ## statistics for an individual metric.
+# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+# # # statistic_exclude = []
+# #
+# # ## Dimension filters for Metric. All dimensions defined for the metric names
+# # ## must be specified in order to retrieve the metric statistics.
# # [[inputs.cloudwatch.metrics.dimensions]]
# # name = "LoadBalancerName"
# # value = "p-example"
@@ -1779,14 +2538,14 @@
# "nf_conntrack_count","nf_conntrack_max"]
#
# ## Directories to search within for the conntrack files above.
-# ## Missing directrories will be ignored.
+# ## Missing directories will be ignored.
# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
# # Gather health check statuses from services registered in Consul
# [[inputs.consul]]
# ## Consul server address
-# # address = "localhost"
+# # address = "localhost:8500"
#
# ## URI scheme for the Consul server, one of "http", "https"
# # scheme = "http"
@@ -1910,7 +2669,7 @@
# # domains = ["."]
#
# ## Query record type.
-# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
+# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# # record_type = "A"
#
# ## Dns server port.
@@ -1933,6 +2692,9 @@
# ## Only collect metrics for these containers, collect all if empty
# container_names = []
#
+# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+# source_tag = false
+#
# ## Containers to include and exclude. Globs accepted.
# ## Note that an empty array for both will include all containers
# container_name_include = []
@@ -1940,6 +2702,8 @@
#
# ## Container states to include and exclude. Globs accepted.
# ## When empty only containers in the "running" state will be captured.
+# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
+# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# # container_state_include = []
# # container_state_exclude = []
#
@@ -1949,8 +2713,10 @@
# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
# ## network (eth0, eth1, ...) stats or not
# perdevice = true
+#
# ## Whether to report for each container total blkio and network stats or not
# total = false
+#
# ## Which environment variables should we use as a tag
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
#
@@ -1975,13 +2741,43 @@
# ##
# ## If no servers are specified, then localhost is used as the host.
# servers = ["localhost:24242"]
+#
# ## Type is one of "user", "domain", "ip", or "global"
# type = "global"
+#
# ## Wildcard matches like "*.com". An empty string "" is same as "*"
# ## If type = "ip" filters should be
# filters = [""]
+# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints.
+# [[inputs.ecs]]
+# ## ECS metadata url.
+# ## Metadata v2 API is used if set explicitly. Otherwise,
+# ## v3 metadata endpoint API is used if available.
+# # endpoint_url = ""
+#
+# ## Containers to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all containers
+# # container_name_include = []
+# # container_name_exclude = []
+#
+# ## Container states to include and exclude. Globs accepted.
+# ## When empty only containers in the "RUNNING" state will be captured.
+# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
+# ## "RESOURCES_PROVISIONED", "STOPPED".
+# # container_status_include = []
+# # container_status_exclude = []
+#
+# ## ecs labels to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all labels as tags
+# ecs_label_include = [ "com.amazonaws.ecs.*" ]
+# ecs_label_exclude = []
+#
+# ## Timeout for queries.
+# # timeout = "5s"
+
+
# # Read stats from one or more Elasticsearch servers or clusters
# [[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers
@@ -2012,11 +2808,21 @@
# ## Only gather cluster_stats from the master node. To work this require local = true
# cluster_stats_only_from_master = true
#
+# ## Indices to collect; can be one or more indices names or _all
+# indices_include = ["_all"]
+#
+# ## One of "shards", "cluster", "indices"
+# indices_level = "shards"
+#
# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
# ## "breaker". Per default, all stats are gathered.
# # node_stats = ["jvm", "http"]
#
+# ## HTTP Basic Authentication username and password.
+# # username = ""
+# # password = ""
+#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
@@ -2025,6 +2831,15 @@
# # insecure_skip_verify = false
+# # Returns ethtool statistics for given interfaces
+# [[inputs.ethtool]]
+# ## List of interfaces to pull metrics for
+# # interface_include = ["eth0"]
+#
+# ## List of interfaces to ignore when pulling metrics.
+# # interface_exclude = ["eth1"]
+
+
# # Read metrics from one or more commands that can output to stdout
# [[inputs.exec]]
# ## Commands array
@@ -2067,15 +2882,24 @@
# # timeout = "5s"
-# # Reload and gather from file[s] on telegraf's interval.
+# # Parse a complete file each interval
# [[inputs.file]]
-# ## Files to parse each interval.
-# ## These accept standard unix glob matching rules, but with the addition of
-# ## ** as a "super asterisk". ie:
-# ## /var/log/**.log -> recursively find all .log files in /var/log
-# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
-# ## /var/log/apache.log -> only read the apache log file
-# files = ["/var/log/apache/access.log"]
+# ## Files to parse each interval. Accept standard unix glob matching rules,
+# ## as well as ** to match recursive files and directories.
+# files = ["/tmp/metrics.out"]
+#
+# ## Name a tag containing the name of the file the data was parsed from. Leave empty
+# ## to disable.
+# # file_tag = ""
+#
+# ## Character encoding to use when interpreting the file contents. Invalid
+# ## characters are replaced using the unicode replacement character. When set
+# ## to the empty string the data is not decoded to text.
+# ## ex: character_encoding = "utf-8"
+# ## character_encoding = "utf-16le"
+# ## character_encoding = "utf-16be"
+# ## character_encoding = ""
+# # character_encoding = ""
#
# ## The dataformat to be read from files
# ## Each data format has its own unique set of configuration options, read
@@ -2107,6 +2931,9 @@
# ## Only count regular files. Defaults to true.
# regular_only = true
#
+# ## Follow all symlinks while walking the directory tree. Defaults to false.
+# follow_symlinks = false
+#
# ## Only count files that are at least this size. If size is
# ## a negative number, only count files that are smaller than the
# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
@@ -2131,10 +2958,23 @@
# ## See https://github.com/gobwas/glob for more examples
# ##
# files = ["/var/log/**.log"]
+#
# ## If true, read the entire file and calculate an md5 checksum.
# md5 = false
+# # Read real time temps from fireboard.io servers
+# [[inputs.fireboard]]
+# ## Specify auth token for your account
+# auth_token = "invalidAuthToken"
+# ## You can override the fireboard server URL if necessary
+# # url = https://fireboard.io/api/v1/devices.json
+# ## You can set a different http_timeout if you need to
+# ## You should set a string using an number and time indicator
+# ## for example "12s" for 12 seconds.
+# # http_timeout = "4s"
+
+
# # Read metrics exposed by fluentd in_monitor plugin
# [[inputs.fluentd]]
# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
@@ -2151,6 +2991,24 @@
# ]
+# # Gather repository information from GitHub hosted repositories.
+# [[inputs.github]]
+# ## List of repositories to monitor.
+# repositories = [
+# "influxdata/telegraf",
+# "influxdata/influxdb"
+# ]
+#
+# ## Github API access token. Unauthenticated requests are limited to 60 per hour.
+# # access_token = ""
+#
+# ## Github API enterprise url. Github Enterprise accounts must specify their base url.
+# # enterprise_base_url = ""
+#
+# ## Timeout for HTTP requests.
+# # http_timeout = "5s"
+
+
# # Read flattened metrics from one or more GrayLog HTTP endpoints
# [[inputs.graylog]]
# ## API endpoint, currently supported API:
@@ -2246,6 +3104,10 @@
# ## Optional HTTP headers
# # headers = {"X-Special-Header" = "Special-Value"}
#
+# ## Optional file with Bearer token
+# ## file content is added as an Authorization header
+# # bearer_token = "/path/to/file"
+#
# ## Optional HTTP Basic Auth Credentials
# # username = "username"
# # password = "pa$$word"
@@ -2267,6 +3129,9 @@
# ## Amount of time allowed to complete the HTTP request
# # timeout = "5s"
#
+# ## List of success status codes
+# # success_status_codes = [200]
+#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -2276,9 +3141,13 @@
# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
+# ## Deprecated in 1.12, use 'urls'
# ## Server address (default http://localhost)
# # address = "http://localhost"
#
+# ## List of urls to query.
+# # urls = ["http://localhost"]
+#
# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
# # http_proxy = "http://localhost:8888"
#
@@ -2291,11 +3160,28 @@
# ## Whether to follow redirects from the server (defaults to false)
# # follow_redirects = false
#
+# ## Optional file with Bearer token
+# ## file content is added as an Authorization header
+# # bearer_token = "/path/to/file"
+#
+# ## Optional HTTP Basic Auth Credentials
+# # username = "username"
+# # password = "pa$$word"
+#
# ## Optional HTTP Request Body
# # body = '''
# # {'fake':'data'}
# # '''
#
+# ## Optional name of the field that will contain the body of the response.
+# ## By default it is set to an empty String indicating that the body's content won't be added
+# # response_body_field = ''
+#
+# ## Maximum allowed HTTP response body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# ## If the response body size exceeds this limit a "body_read_error" will be raised
+# # response_body_max_size = "32MiB"
+#
# ## Optional substring or regex match in body of the response
# # response_string_match = "\"service_status\": \"up\""
# # response_string_match = "ok"
@@ -2311,6 +3197,14 @@
# ## HTTP Request Headers (all values must be strings)
# # [inputs.http_response.headers]
# # Host = "github.com"
+#
+# ## Optional setting to map response http headers into tags
+# ## If the http header is not present on the request, no corresponding tag will be added
+# ## If multiple instances of the http header are present, only the first value will be used
+# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+#
+# ## Interface to use when dialing an address
+# # interface = "eth0"
# # Read flattened metrics from one or more JSON HTTP endpoints
@@ -2363,10 +3257,10 @@
# # Gather Icinga2 status
# [[inputs.icinga2]]
-# ## Required Icinga2 server address (default: "https://localhost:5665")
+# ## Required Icinga2 server address
# # server = "https://localhost:5665"
#
-# ## Required Icinga2 object type ("services" or "hosts, default "services")
+# ## Required Icinga2 object type ("services" or "hosts")
# # object_type = "services"
#
# ## Credentials for basic HTTP authentication
@@ -2384,6 +3278,11 @@
# # insecure_skip_verify = true
+# # Gets counters from all InfiniBand cards and ports installed
+# [[inputs.infiniband]]
+# # no configuration
+
+
# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
# [[inputs.influxdb]]
# ## Works with InfluxDB debug endpoints out of the box,
@@ -2396,6 +3295,10 @@
# "http://localhost:8086/debug/vars"
# ]
#
+# ## Username and password to send using HTTP Basic Authentication.
+# # username = ""
+# # password = ""
+#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
@@ -2433,6 +3336,11 @@
# ## optionally specify the path to the ipmitool executable
# # path = "/usr/bin/ipmitool"
# ##
+# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool.
+# ## Sudo must be configured to allow the telegraf user to run ipmitool
+# ## without a password.
+# # use_sudo = false
+# ##
# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
# # privilege = "ADMINISTRATOR"
# ##
@@ -2475,7 +3383,7 @@
# ## iptables can be restricted to only list command "iptables -nvL".
# use_sudo = false
# ## Setting 'use_lock' to true runs iptables with the "-w" option.
-# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
+# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl")
# use_lock = false
# ## Define an alternate executable, such as "ip6tables". Default is "iptables".
# # binary = "ip6tables"
@@ -2494,7 +3402,7 @@
# # Read jobs and cluster metrics from Jenkins instances
# [[inputs.jenkins]]
-# ## The Jenkins URL
+# ## The Jenkins URL in the format "schema://host:port"
# url = "http://my-jenkins-instance:8080"
# # username = "admin"
# # password = "admin"
@@ -2688,7 +3596,7 @@
# # Read status information from one or more Kibana servers
# [[inputs.kibana]]
-# ## specify a list of one or more Kibana servers
+# ## Specify a list of one or more Kibana servers
# servers = ["http://localhost:5601"]
#
# ## Timeout for HTTP requests
@@ -2715,6 +3623,8 @@
# # namespace = "default"
#
# ## Use bearer token for authorization. ('bearer_token' takes priority)
+# ## If both of these are empty, we'll use the default serviceaccount:
+# ## at: /run/secrets/kubernetes.io/serviceaccount/token
# # bearer_token = "/path/to/bearer/token"
# ## OR
# # bearer_token_string = "abc_123"
@@ -2724,14 +3634,20 @@
#
# ## Optional Resources to exclude from gathering
# ## Leave them with blank with try to gather everything available.
-# ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes",
-# ## "persistentvolumeclaims", "pods", "statefulsets"
+# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
+# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
# # resource_exclude = [ "deployments", "nodes", "statefulsets" ]
#
# ## Optional Resources to include when gathering
# ## Overrides resource_exclude if both set.
# # resource_include = [ "deployments", "nodes", "statefulsets" ]
#
+# ## selectors to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all selectors as tags
+# ## selector_exclude overrides selector_include if both set.
+# # selector_include = []
+# # selector_exclude = ["*"]
+#
# ## Optional TLS Config
# # tls_ca = "/path/to/cafile"
# # tls_cert = "/path/to/certfile"
@@ -2746,10 +3662,17 @@
# url = "http://127.0.0.1:10255"
#
# ## Use bearer token for authorization. ('bearer_token' takes priority)
+# ## If both of these are empty, we'll use the default serviceaccount:
+# ## at: /run/secrets/kubernetes.io/serviceaccount/token
# # bearer_token = "/path/to/bearer/token"
# ## OR
# # bearer_token_string = "abc_123"
#
+# ## Pod labels to be added as tags. An empty array for both include and
+# ## exclude will include all labels.
+# # label_include = []
+# # label_exclude = ["*"]
+#
# ## Set response_timeout (default 5 seconds)
# # response_timeout = "5s"
#
@@ -2773,6 +3696,39 @@
# # no configuration
+# # Read metrics exposed by Logstash
+# [[inputs.logstash]]
+# ## The URL of the exposed Logstash API endpoint.
+# url = "http://127.0.0.1:9600"
+#
+# ## Use Logstash 5 single pipeline API, set to true when monitoring
+# ## Logstash 5.
+# # single_pipeline = false
+#
+# ## Enable optional collection components. Can contain
+# ## "pipelines", "process", and "jvm".
+# # collect = ["pipelines", "process", "jvm"]
+#
+# ## Timeout for HTTP requests.
+# # timeout = "5s"
+#
+# ## Optional HTTP Basic Auth credentials.
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config.
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Use TLS but skip chain & host verification.
+# # insecure_skip_verify = false
+#
+# ## Optional HTTP headers.
+# # [inputs.logstash.headers]
+# # "X-Special-Header" = "Special-Value"
+
+
# # Read metrics from local Lustre service on OST, MDS
# [[inputs.lustre2]]
# ## An array of /proc globs to search for Lustre stats
@@ -2801,6 +3757,26 @@
# # campaign_id = ""
+# # Retrieves information on a specific host in a MarkLogic Cluster
+# [[inputs.marklogic]]
+# ## Base URL of the MarkLogic HTTP Server.
+# url = "http://localhost:8002"
+#
+# ## List of specific hostnames to retrieve information. At least (1) required.
+# # hosts = ["hostname1", "hostname2"]
+#
+# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges
+# # username = "myuser"
+# # password = "mypassword"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
# # Read metrics from one or many mcrouter servers
# [[inputs.mcrouter]]
# ## An array of address to gather stats about. Specify an ip or hostname
@@ -2823,8 +3799,10 @@
# [[inputs.mesos]]
# ## Timeout, in ms.
# timeout = 100
+#
# ## A list of Mesos masters.
# masters = ["http://localhost:5050"]
+#
# ## Master metrics groups to be collected, by default, all enabled.
# master_collections = [
# "resources",
@@ -2832,13 +3810,17 @@
# "system",
# "agents",
# "frameworks",
+# "framework_offers",
# "tasks",
# "messages",
# "evqueue",
# "registrar",
+# "allocator",
# ]
+#
# ## A list of Mesos slaves, default is []
# # slaves = []
+#
# ## Slave metrics groups to be collected, by default, all enabled.
# # slave_collections = [
# # "resources",
@@ -2857,14 +3839,100 @@
# # insecure_skip_verify = false
-# # Collects scores from a minecraft server's scoreboard using the RCON protocol
+# # Collects scores from a Minecraft server's scoreboard using the RCON protocol
# [[inputs.minecraft]]
-# ## server address for minecraft
+# ## Address of the Minecraft server.
# # server = "localhost"
-# ## port for RCON
+#
+# ## Server RCON Port.
# # port = "25575"
-# ## password RCON for mincraft server
-# # password = ""
+#
+# ## Server RCON Password.
+# password = ""
+#
+# ## Uncomment to remove deprecated metric components.
+# # tagdrop = ["server"]
+
+
+# # Retrieve data from MODBUS slave devices
+# [[inputs.modbus]]
+# ## Connection Configuration
+# ##
+# ## The plugin supports connections to PLCs via MODBUS/TCP or
+# ## via serial line communication in binary (RTU) or readable (ASCII) encoding
+# ##
+# ## Device name
+# name = "Device"
+#
+# ## Slave ID - addresses a MODBUS device on the bus
+# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved]
+# slave_id = 1
+#
+# ## Timeout for each request
+# timeout = "1s"
+#
+# ## Maximum number of retries and the time to wait between retries
+# ## when a slave-device is busy.
+# # busy_retries = 0
+# # busy_retries_wait = "100ms"
+#
+# # TCP - connect via Modbus/TCP
+# controller = "tcp://localhost:502"
+#
+# ## Serial (RS485; RS232)
+# # controller = "file:///dev/ttyUSB0"
+# # baud_rate = 9600
+# # data_bits = 8
+# # parity = "N"
+# # stop_bits = 1
+# # transmission_mode = "RTU"
+#
+#
+# ## Measurements
+# ##
+#
+# ## Digital Variables, Discrete Inputs and Coils
+# ## measurement - the (optional) measurement name, defaults to "modbus"
+# ## name - the variable name
+# ## address - variable address
+#
+# discrete_inputs = [
+# { name = "start", address = [0]},
+# { name = "stop", address = [1]},
+# { name = "reset", address = [2]},
+# { name = "emergency_stop", address = [3]},
+# ]
+# coils = [
+# { name = "motor1_run", address = [0]},
+# { name = "motor1_jog", address = [1]},
+# { name = "motor1_stop", address = [2]},
+# ]
+#
+# ## Analog Variables, Input Registers and Holding Registers
+# ## measurement - the (optional) measurement name, defaults to "modbus"
+# ## name - the variable name
+# ## byte_order - the ordering of bytes
+# ## |---AB, ABCD - Big Endian
+# ## |---BA, DCBA - Little Endian
+# ## |---BADC - Mid-Big Endian
+# ## |---CDAB - Mid-Little Endian
+# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation)
+# ## scale - the final numeric variable representation
+# ## address - variable address
+#
+# holding_registers = [
+# { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]},
+# { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]},
+# { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]},
+# { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]},
+# { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]},
+# { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]},
+# ]
+# input_registers = [
+# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]},
+# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]},
+# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]},
+# ]
# # Read metrics from one or many MongoDB servers
@@ -2876,9 +3944,41 @@
# ## mongodb://10.10.3.33:18832,
# servers = ["mongodb://127.0.0.1:27017"]
#
+# ## When true, collect cluster status
+# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
+# ## may have an impact on performance.
+# # gather_cluster_status = true
+#
# ## When true, collect per database stats
# # gather_perdb_stats = false
#
+# ## When true, collect per collection stats
+# # gather_col_stats = false
+#
+# ## List of db where collections stats are collected
+# ## If empty, all db are concerned
+# # col_stats_dbs = ["local"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics and status information about processes managed by Monit
+# [[inputs.monit]]
+# ## Monit HTTPD address
+# address = "http://127.0.0.1:2812"
+#
+# ## Username and Password for Monit
+# # username = ""
+# # password = ""
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
@@ -2937,55 +4037,59 @@
# ## <1.6: metric_version = 1 (or unset)
# metric_version = 2
#
-# ## the limits for metrics form perf_events_statements
-# perf_events_statements_digest_text_limit = 120
-# perf_events_statements_limit = 250
-# perf_events_statements_time_limit = 86400
-# #
-# ## if the list is empty, then metrics are gathered from all databasee tables
-# table_schema_databases = []
-# #
+# ## if the list is empty, then metrics are gathered from all database tables
+# # table_schema_databases = []
+#
# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
-# gather_table_schema = false
-# #
+# # gather_table_schema = false
+#
# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
-# gather_process_list = true
-# #
+# # gather_process_list = false
+#
# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
-# gather_user_statistics = true
-# #
+# # gather_user_statistics = false
+#
# ## gather auto_increment columns and max values from information schema
-# gather_info_schema_auto_inc = true
-# #
+# # gather_info_schema_auto_inc = false
+#
# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
-# gather_innodb_metrics = true
-# #
+# # gather_innodb_metrics = false
+#
# ## gather metrics from SHOW SLAVE STATUS command output
-# gather_slave_status = true
-# #
+# # gather_slave_status = false
+#
# ## gather metrics from SHOW BINARY LOGS command output
-# gather_binary_logs = false
-# #
+# # gather_binary_logs = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES
+# # gather_global_variables = true
+#
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
-# gather_table_io_waits = false
-# #
+# # gather_table_io_waits = false
+#
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
-# gather_table_lock_waits = false
-# #
+# # gather_table_lock_waits = false
+#
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
-# gather_index_io_waits = false
-# #
+# # gather_index_io_waits = false
+#
# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
-# gather_event_waits = false
-# #
+# # gather_event_waits = false
+#
# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
-# gather_file_events_stats = false
-# #
+# # gather_file_events_stats = false
+#
# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
-# gather_perf_events_statements = false
-# #
+# # gather_perf_events_statements = false
+#
+# ## the limits for metrics form perf_events_statements
+# # perf_events_statements_digest_text_limit = 120
+# # perf_events_statements_limit = 250
+# # perf_events_statements_time_limit = 86400
+#
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
-# interval_slow = "30m"
+# ## example: interval_slow = "30m"
+# # interval_slow = ""
#
# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
# # tls_ca = "/etc/telegraf/ca.pem"
@@ -3089,6 +4193,13 @@
#
# # HTTP response timeout (default: 5s)
# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
# # Read Nginx Plus Api documentation
@@ -3101,6 +4212,29 @@
#
# # HTTP response timeout (default: 5s)
# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read Nginx virtual host traffic status module information (nginx-module-sts)
+# [[inputs.nginx_sts]]
+# ## An array of ngx_http_status_module or status URI to gather stats.
+# urls = ["http://localhost/status"]
+#
+# ## HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
@@ -3140,6 +4274,13 @@
#
# ## HTTP response timeout (default: 5s)
# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
# # Read NSQ topic and channel statistics.
@@ -3207,6 +4348,18 @@
# reverse_metric_names = true
+# # Get standard NTP query metrics from OpenNTPD.
+# [[inputs.openntpd]]
+# ## Run ntpctl binary with sudo.
+# # use_sudo = false
+#
+# ## Location of the ntpctl binary.
+# # binary = "/usr/sbin/ntpctl"
+#
+# ## Maximum time the ntpctl binary is allowed to run.
+# # timeout = "5ms"
+
+
# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
# [[inputs.opensmtpd]]
# ## If running as a restricted user you can prepend sudo for additional access:
@@ -3215,10 +4368,42 @@
# ## The default location of the smtpctl binary can be overridden with:
# binary = "/usr/sbin/smtpctl"
#
-# ## The default timeout of 1000ms can be overriden with (in milliseconds):
+# ## The default timeout of 1000ms can be overridden with (in milliseconds):
# timeout = 1000
+# # Read current weather and forecasts data from openweathermap.org
+# [[inputs.openweathermap]]
+# ## OpenWeatherMap API key.
+# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+#
+# ## City ID's to collect weather data from.
+# city_id = ["5391959"]
+#
+# ## Language of the description field. Can be one of "ar", "bg",
+# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu",
+# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru",
+# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw"
+# # lang = "en"
+#
+# ## APIs to fetch; can contain "weather" or "forecast".
+# fetch = ["weather", "forecast"]
+#
+# ## OpenWeatherMap base URL
+# # base_url = "https://api.openweathermap.org/"
+#
+# ## Timeout for HTTP response.
+# # response_timeout = "5s"
+#
+# ## Preferred unit system for temperature and wind speed. Can be one of
+# ## "metric", "imperial", or "standard".
+# # units = "metric"
+#
+# ## Query interval; OpenWeatherMap updates their weather data every 10
+# ## minutes.
+# interval = "10m"
+
+
# # Read metrics of passenger using passenger-status
# [[inputs.passenger]]
# ## Path of passenger-status.
@@ -3260,39 +4445,65 @@
# ## "fcgi://10.0.0.12:9000/status"
# ## "cgi://10.0.10.12:9001/status"
# ##
-# ## Example of multiple gathering from local socket and remove host
+# ## Example of multiple gathering from local socket and remote host
# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
# urls = ["http://localhost/status"]
+#
+# ## Duration allowed to complete HTTP requests.
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
# # Ping given url(s) and return statistics
# [[inputs.ping]]
-# ## List of urls to ping
+# ## Hosts to send ping packets to.
# urls = ["example.org"]
#
-# ## Number of pings to send per collection (ping -c )
+# ## Method used for sending pings, can be either "exec" or "native". When set
+# ## to "exec" the systems ping command will be executed. When set to "native"
+# ## the plugin will send pings directly.
+# ##
+# ## While the default is "exec" for backwards compatibility, new deployments
+# ## are encouraged to use the "native" method for improved compatibility and
+# ## performance.
+# # method = "exec"
+#
+# ## Number of ping packets to send per interval. Corresponds to the "-c"
+# ## option of the ping command.
# # count = 1
#
-# ## Interval, in s, at which to ping. 0 == default (ping -i )
-# ## Not available in Windows.
+# ## Time to wait between sending ping packets in seconds. Operates like the
+# ## "-i" option of the ping command.
# # ping_interval = 1.0
#
-# ## Per-ping timeout, in s. 0 == no timeout (ping -W )
+# ## If set, the time to wait for a ping response in seconds. Operates like
+# ## the "-W" option of the ping command.
# # timeout = 1.0
#
-# ## Total-ping deadline, in s. 0 == no deadline (ping -w )
+# ## If set, the total ping deadline, in seconds. Operates like the -w option
+# ## of the ping command.
# # deadline = 10
#
-# ## Interface or source address to send ping from (ping -I )
-# ## on Darwin and Freebsd only source address possible: (ping -S )
+# ## Interface or source address to send ping from. Operates like the -I or -S
+# ## option of the ping command.
# # interface = ""
#
-# ## Specify the ping executable binary, default is "ping"
+# ## Specify the ping executable binary.
# # binary = "ping"
#
-# ## Arguments for ping command
-# ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored
+# ## Arguments for ping command. When arguments is not empty, the command from
+# ## the binary option will be used and other options (ping_interval, timeout,
+# ## etc) will be ignored.
# # arguments = ["-c", "3"]
+#
+# ## Use only IPv6 addresses when resolving a hostname.
+# # ipv6 = false
# # Measure postfix queue statistics
@@ -3309,6 +4520,18 @@
# unix_sockets = ["/var/run/pdns.controlsocket"]
+# # Read metrics from one or many PowerDNS Recursor servers
+# [[inputs.powerdns_recursor]]
+# ## Path to the Recursor control socket.
+# unix_sockets = ["/var/run/pdns_recursor.controlsocket"]
+#
+# ## Directory to create receive socket. This default is likely not writable,
+# ## please reference the full plugin documentation for a recommended setup.
+# # socket_dir = "/var/run/"
+# ## Socket permissions for the receive socket.
+# # socket_mode = "0666"
+
+
# # Monitor process cpu and memory usage
# [[inputs.procstat]]
# ## PID file to monitor process
@@ -3334,9 +4557,15 @@
# ## Field name prefix
# # prefix = ""
#
-# ## Add PID as a tag instead of a field; useful to differentiate between
-# ## processes whose tags are otherwise the same. Can create a large number
-# ## of series, use judiciously.
+# ## When true add the full cmdline as a tag.
+# # cmdline_tag = false
+#
+# ## Add the PID as a tag instead of as a field. When collecting multiple
+# ## processes with otherwise matching tags this setting should be enabled to
+# ## ensure each process has a unique identity.
+# ##
+# ## Enabling this option may result in a large number of series, especially
+# ## when processes have a short lifetime.
# # pid_tag = false
#
# ## Method to use when finding process IDs. Can be one of 'pgrep', or
@@ -3346,7 +4575,7 @@
# # pid_finder = "pgrep"
-# # Reads last_run_summary.yaml file and converts to measurments
+# # Reads last_run_summary.yaml file and converts to measurements
# [[inputs.puppetagent]]
# ## Location of puppet last run summary file
# location = "/var/lib/puppet/state/last_run_summary.yaml"
@@ -3395,6 +4624,15 @@
# ## Note that an empty array for both will include all queues
# queue_name_include = []
# queue_name_exclude = []
+#
+# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement.
+# ## If neither are specified, metrics for all federation upstreams are gathered.
+# ## Federation link metrics will only be gathered for queues and exchanges
+# ## whose non-federation metrics will be collected (e.g a queue excluded
+# ## by the 'queue_name_exclude' option will also be excluded from federation).
+# ## Globs accepted.
+# # federation_upstream_include = ["dataCentre-*"]
+# # federation_upstream_exclude = []
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
@@ -3403,9 +4641,32 @@
# urls = ["http://localhost:8080/_raindrops"]
-# # Read metrics from one or many redis servers
-# [[inputs.redis]]
-# ## specify servers via a url matching:
+# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs
+# [[inputs.redfish]]
+# ## Server url
+# address = "https://127.0.0.1:5000"
+#
+# ## Username, Password for hardware server
+# username = "root"
+# password = "password123456"
+#
+# ## ComputerSystemId
+# computer_system_id="2M220100SL"
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from one or many redis servers
+# [[inputs.redis]]
+# ## specify servers via a url matching:
# ## [protocol://][:password]@address[:port]
# ## e.g.
# ## tcp://localhost:6379
@@ -3485,13 +4746,13 @@
# [[inputs.smart]]
# ## Optionally specify the path to the smartctl executable
# # path = "/usr/bin/smartctl"
-# #
+#
# ## On most platforms smartctl requires root access.
# ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
# ## Sudo must be configured to to allow the telegraf user to run smartctl
-# ## with out password.
+# ## without a password.
# # use_sudo = false
-# #
+#
# ## Skip checking disks in this power mode. Defaults to
# ## "standby" to not wake up disks that have stoped rotating.
# ## See --nocheck in the man pages for smartctl.
@@ -3499,79 +4760,66 @@
# ## power mode and might require changing this value to
# ## "never" depending on your disks.
# # nocheck = "standby"
-# #
-# ## Gather detailed metrics for each SMART Attribute.
-# ## Defaults to "false"
-# ##
+#
+# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed
+# ## information from each drive into the 'smart_attribute' measurement.
# # attributes = false
-# #
+#
# ## Optionally specify devices to exclude from reporting.
# # excludes = [ "/dev/pass6" ]
-# #
+#
# ## Optionally specify devices and device type, if unset
# ## a scan (smartctl --scan) for S.M.A.R.T. devices will
# ## done and all found will be included except for the
# ## excluded in excludes.
# # devices = [ "/dev/ada0 -d atacam" ]
+#
+# ## Timeout for the smartctl command to complete.
+# # timeout = "30s"
# # Retrieves SNMP values from remote agents
# [[inputs.snmp]]
-# agents = [ "127.0.0.1:161" ]
-# ## Timeout for each SNMP query.
-# timeout = "5s"
-# ## Number of retries to attempt within timeout.
-# retries = 3
-# ## SNMP version, values can be 1, 2, or 3
-# version = 2
+# ## Agent addresses to retrieve values from.
+# ## example: agents = ["udp://127.0.0.1:161"]
+# ## agents = ["tcp://127.0.0.1:161"]
+# agents = ["udp://127.0.0.1:161"]
+#
+# ## Timeout for each request.
+# # timeout = "5s"
+#
+# ## SNMP version; can be 1, 2, or 3.
+# # version = 2
#
# ## SNMP community string.
-# community = "public"
-#
-# ## The GETBULK max-repetitions parameter
-# max_repetitions = 10
-#
-# ## SNMPv3 auth parameters
-# #sec_name = "myuser"
-# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
-# #auth_password = "pass"
-# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
-# #context_name = ""
-# #priv_protocol = "" # Values: "DES", "AES", ""
-# #priv_password = ""
-#
-# ## measurement name
-# name = "system"
-# [[inputs.snmp.field]]
-# name = "hostname"
-# oid = ".1.0.0.1.1"
-# [[inputs.snmp.field]]
-# name = "uptime"
-# oid = ".1.0.0.1.2"
-# [[inputs.snmp.field]]
-# name = "load"
-# oid = ".1.0.0.1.3"
-# [[inputs.snmp.field]]
-# oid = "HOST-RESOURCES-MIB::hrMemorySize"
+# # community = "public"
#
-# [[inputs.snmp.table]]
-# ## measurement name
-# name = "remote_servers"
-# inherit_tags = [ "hostname" ]
-# [[inputs.snmp.table.field]]
-# name = "server"
-# oid = ".1.0.0.0.1.0"
-# is_tag = true
-# [[inputs.snmp.table.field]]
-# name = "connections"
-# oid = ".1.0.0.0.1.1"
-# [[inputs.snmp.table.field]]
-# name = "latency"
-# oid = ".1.0.0.0.1.2"
+# ## Number of retries to attempt.
+# # retries = 3
#
-# [[inputs.snmp.table]]
-# ## auto populate table's fields using the MIB
-# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
+# ## The GETBULK max-repetitions parameter.
+# # max_repetitions = 10
+#
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA", or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Context Name.
+# # context_name = ""
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+#
+# ## Add fields and tables defining the variables you wish to collect. This
+# ## example collects the system uptime and interface variables. Reference the
+# ## full plugin documentation for configuration details.
# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
@@ -3685,41 +4933,62 @@
#
# ## specify a list of one or more Solr cores (default - all)
# # cores = ["main"]
+#
+# ## Optional HTTP Basic Auth Credentials
+# # username = "username"
+# # password = "pa$$word"
# # Read metrics from Microsoft SQL Server
# [[inputs.sqlserver]]
-# ## Specify instances to monitor with a list of connection strings.
-# ## All connection parameters are optional.
-# ## By default, the host is localhost, listening on default port, TCP 1433.
-# ## for Windows, the user is the currently running AD user (SSO).
-# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
-# ## parameters.
-# # servers = [
-# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;",
-# # ]
-#
-# ## Optional parameter, setting this to 2 will use a new version
-# ## of the collection queries that break compatibility with the original
-# ## dashboards.
-# query_version = 2
-#
-# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
-# # azuredb = false
-#
-# ## If you would like to exclude some of the metrics queries, list them here
-# ## Possible choices:
-# ## - PerformanceCounters
-# ## - WaitStatsCategorized
-# ## - DatabaseIO
-# ## - DatabaseProperties
-# ## - CPUHistory
-# ## - DatabaseSize
-# ## - DatabaseStats
-# ## - MemoryClerk
-# ## - VolumeSpace
-# ## - PerformanceMetrics
-# # exclude_query = [ 'DatabaseIO' ]
+# ## Specify instances to monitor with a list of connection strings.
+# ## All connection parameters are optional.
+# ## By default, the host is localhost, listening on default port, TCP 1433.
+# ## for Windows, the user is the currently running AD user (SSO).
+# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
+# ## parameters, in particular, tls connections can be created like so:
+# ## "encrypt=true;certificate=;hostNameInCertificate="
+# # servers = [
+# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;",
+# # ]
+#
+# ## Optional parameter, setting this to 2 will use a new version
+# ## of the collection queries that break compatibility with the original
+# ## dashboards.
+# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB
+# query_version = 2
+#
+# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
+# # azuredb = false
+#
+# ## Possible queries
+# ## Version 2:
+# ## - PerformanceCounters
+# ## - WaitStatsCategorized
+# ## - DatabaseIO
+# ## - ServerProperties
+# ## - MemoryClerk
+# ## - Schedulers
+# ## - SqlRequests
+# ## - VolumeSpace
+# ## - Cpu
+# ## Version 1:
+# ## - PerformanceCounters
+# ## - WaitStatsCategorized
+# ## - CPUHistory
+# ## - DatabaseIO
+# ## - DatabaseSize
+# ## - DatabaseStats
+# ## - DatabaseProperties
+# ## - MemoryClerk
+# ## - VolumeSpace
+# ## - PerformanceMetrics
+#
+# ## A list of queries to include. If not specified, all the above listed queries are used.
+# # include_query = []
+#
+# ## A list of queries to explicitly ignore.
+# exclude_query = [ 'Schedulers' , 'SqlRequests']
# # Gather timeseries from Google Cloud Platform v3 monitoring API
@@ -3802,6 +5071,11 @@
# # value = 'one_of("sda", "sdb")'
+# # Get synproxy counter statistics from procfs
+# [[inputs.synproxy]]
+# # no configuration
+
+
# # Sysstat metrics collector
# [[inputs.sysstat]]
# ## Path to the sadc command.
@@ -3811,18 +5085,15 @@
# ## Arch: /usr/lib/sa/sadc
# ## RHEL/CentOS: /usr/lib64/sa/sadc
# sadc_path = "/usr/lib/sa/sadc" # required
-# #
-# #
+#
# ## Path to the sadf command, if it is not in PATH
# # sadf_path = "/usr/bin/sadf"
-# #
-# #
+#
# ## Activities is a list of activities, that are passed as argument to the
# ## sadc collector utility (e.g: DISK, SNMP etc...)
# ## The more activities that are added, the more data is collected.
# # activities = ["DISK"]
-# #
-# #
+#
# ## Group metrics to measurements.
# ##
# ## If group is false each metric will be prefixed with a description
@@ -3830,8 +5101,7 @@
# ##
# ## If Group is true, corresponding metrics are grouped to a single measurement.
# # group = true
-# #
-# #
+#
# ## Options for the sadf command. The values on the left represent the sadf
# ## options and the values on the right their description (which are used for
# ## grouping and prefixing metrics).
@@ -3855,8 +5125,7 @@
# -w = "task"
# # -H = "hugepages" # only available for newer linux distributions
# # "-I ALL" = "interrupts" # requires INT activity
-# #
-# #
+#
# ## Device tags can be used to add additional tags for devices.
# ## For example the configuration below adds a tag vg with value rootvg for
# ## all metrics with sda devices.
@@ -3864,6 +5133,17 @@
# # vg = "rootvg"
+# # Gather systemd units state
+# [[inputs.systemd_units]]
+# ## Set timeout for systemctl execution
+# # timeout = "1s"
+# #
+# ## Filter for a specific unit type, default is "service", other possible
+# ## values are "socket", "target", "device", "mount", "automount", "swap",
+# ## "timer", "path", "slice" and "scope ":
+# # unittype = "service"
+
+
# # Reads metrics from a Teamspeak 3 Server via ServerQuery
# [[inputs.teamspeak]]
# ## Server address for Teamspeak 3 ServerQuery
@@ -3943,7 +5223,10 @@
# ## The default location of the unbound-control binary can be overridden with:
# # binary = "/usr/sbin/unbound-control"
#
-# ## The default timeout of 1s can be overriden with:
+# ## The default location of the unbound config file can be overridden with:
+# # config_file = "/etc/unbound/unbound.conf"
+#
+# ## The default timeout of 1s can be overridden with:
# # timeout = "1s"
#
# ## When set to true, thread metrics are tagged with the thread id.
@@ -3954,6 +5237,19 @@
# thread_as_tag = false
+# # Read uWSGI metrics.
+# [[inputs.uwsgi]]
+# ## List with urls of uWSGI Stats servers. URL must match pattern:
+# ## scheme://address[:port]
+# ##
+# ## For example:
+# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"]
+# servers = ["tcp://127.0.0.1:1717"]
+#
+# ## General connection timeout
+# # timeout = "5s"
+
+
# # A plugin to collect stats from Varnish HTTP Cache
# [[inputs.varnish]]
# ## If running as a restricted user you can prepend sudo for additional access:
@@ -3969,13 +5265,20 @@
# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
#
# ## Optional name for the varnish instance (or working directory) to query
-# ## Usually appened after -n in varnish cli
+# ## Usually append after -n in varnish cli
# # instance_name = instanceName
#
# ## Timeout for varnishstat command
# # timeout = "1s"
+# # Collect Wireguard server interface and peer statistics
+# [[inputs.wireguard]]
+# ## Optional list of Wireguard device/interface names to query.
+# ## If omitted, all Wireguard interfaces are queried.
+# # devices = ["wg0"]
+
+
# # Monitor wifi signal strength and quality
# [[inputs.wireless]]
# ## Sets 'proc' directory path
@@ -3991,13 +5294,14 @@
# ## Timeout for SSL connection
# # timeout = "5s"
#
+# ## Pass a different name into the TLS request (Server Name Indication)
+# ## example: server_name = "myhost.example.org"
+# # server_name = ""
+#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
-#
-# ## Use TLS but skip chain & host verification
-# # insecure_skip_verify = false
# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
@@ -4037,11 +5341,11 @@
# # insecure_skip_verify = true
-
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
+
# # AMQP consumer plugin
# [[inputs.amqp_consumer]]
# ## Broker to consume from.
@@ -4057,7 +5361,7 @@
# # username = ""
# # password = ""
#
-# ## Exchange to declare and consume from.
+# ## Name of the exchange to declare. If unset, no exchange will be declared.
# exchange = "telegraf"
#
# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
@@ -4071,7 +5375,7 @@
#
# ## Additional exchange arguments.
# # exchange_arguments = { }
-# # exchange_arguments = {"hash_propery" = "timestamp"}
+# # exchange_arguments = {"hash_property" = "timestamp"}
#
# ## AMQP queue name.
# queue = "telegraf"
@@ -4079,7 +5383,11 @@
# ## AMQP queue durability can be "transient" or "durable".
# queue_durability = "durable"
#
-# ## Binding Key.
+# ## If true, queue will be passively declared.
+# # queue_passive = false
+#
+# ## A binding between the exchange and queue using this binding key is
+# ## created. If unset, no binding is created.
# binding_key = "#"
#
# ## Maximum number of messages server should give to the worker.
@@ -4107,6 +5415,10 @@
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# # content_encoding = "identity"
+#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -4136,6 +5448,98 @@
# ]
+# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms
+# [[inputs.cisco_telemetry_mdt]]
+# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
+# ## using the grpc transport.
+# transport = "grpc"
+#
+# ## Address and port to host telemetry listener
+# service_address = ":57000"
+#
+# ## Enable TLS; grpc transport only.
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Enable TLS client authentication and define allowed CA certificates; grpc
+# ## transport only.
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
+# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
+#
+# ## Define aliases to map telemetry encoding paths to simple measurement names
+# [inputs.cisco_telemetry_mdt.aliases]
+# ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
+
+
+# # Read metrics from one or many ClickHouse servers
+# [[inputs.clickhouse]]
+# ## Username for authorization on ClickHouse server
+# ## example: user = "default""
+# username = "default"
+#
+# ## Password for authorization on ClickHouse server
+# ## example: password = "super_secret"
+#
+# ## HTTP(s) timeout while getting metrics values
+# ## The timeout includes connection time, any redirects, and reading the response body.
+# ## example: timeout = 1s
+# # timeout = 5s
+#
+# ## List of servers for metrics scraping
+# ## metrics scrape via HTTP(s) clickhouse interface
+# ## https://clickhouse.tech/docs/en/interfaces/http/
+# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"]
+# servers = ["http://127.0.0.1:8123"]
+#
+# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster
+# ## with using same "user:password" described in "user" and "password" parameters
+# ## and get this server hostname list from "system.clusters" table
+# ## see
+# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
+# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
+# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
+# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
+# ## example: auto_discovery = false
+# # auto_discovery = true
+#
+# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+# ## when this filter present then "WHERE cluster IN (...)" filter will apply
+# ## please use only full cluster names here, regexp and glob filters is not allowed
+# ## for "/etc/clickhouse-server/config.d/remote.xml"
+# ##
+# ##
+# ##
+# ##
+# ## clickhouse-ru-1.local 9000
+# ## clickhouse-ru-2.local 9000
+# ##
+# ##
+# ## clickhouse-eu-1.local 9000
+# ## clickhouse-eu-2.local 9000
+# ##
+# ##
+# ##
+# ##
+# ##
+# ##
+# ## example: cluster_include = ["my-own-cluster"]
+# # cluster_include = []
+#
+# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply
+# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
+# # cluster_exclude = []
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
# # Read metrics from Google PubSub
# [[inputs.cloud_pubsub]]
# ## Required. Name of Google Cloud Platform (GCP) Project that owns
@@ -4262,9 +5666,211 @@
# data_format = "influx"
-# # Influx HTTP write listener
+# # Read logging output from the Docker engine
+# [[inputs.docker_log]]
+# ## Docker Endpoint
+# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
+# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
+# # endpoint = "unix:///var/run/docker.sock"
+#
+# ## When true, container logs are read from the beginning; otherwise
+# ## reading begins at the end of the log.
+# # from_beginning = false
+#
+# ## Timeout for Docker API calls.
+# # timeout = "5s"
+#
+# ## Containers to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all containers
+# # container_name_include = []
+# # container_name_exclude = []
+#
+# ## Container states to include and exclude. Globs accepted.
+# ## When empty only containers in the "running" state will be captured.
+# # container_state_include = []
+# # container_state_exclude = []
+#
+# ## docker labels to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all labels as tags
+# # docker_label_include = []
+# # docker_label_exclude = []
+#
+# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+# source_tag = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Azure Event Hubs service input plugin
+# [[inputs.eventhub_consumer]]
+# ## The default behavior is to create a new Event Hub client from environment variables.
+# ## This requires one of the following sets of environment variables to be set:
+# ##
+# ## 1) Expected Environment Variables:
+# ## - "EVENTHUB_NAMESPACE"
+# ## - "EVENTHUB_NAME"
+# ## - "EVENTHUB_CONNECTION_STRING"
+# ##
+# ## 2) Expected Environment Variables:
+# ## - "EVENTHUB_NAMESPACE"
+# ## - "EVENTHUB_NAME"
+# ## - "EVENTHUB_KEY_NAME"
+# ## - "EVENTHUB_KEY_VALUE"
+#
+# ## Uncommenting the option below will create an Event Hub client based solely on the connection string.
+# ## This can either be the associated environment variable or hard coded directly.
+# # connection_string = ""
+#
+# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister
+# # persistence_dir = ""
+#
+# ## Change the default consumer group
+# # consumer_group = ""
+#
+# ## By default the event hub receives all messages present on the broker, alternative modes can be set below.
+# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339).
+# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run).
+# # from_timestamp =
+# # latest = true
+#
+# ## Set a custom prefetch count for the receiver(s)
+# # prefetch_count = 1000
+#
+# ## Add an epoch to the receiver(s)
+# # epoch = 0
+#
+# ## Change to set a custom user agent, "telegraf" is used by default
+# # user_agent = "telegraf"
+#
+# ## To consume from a specific partition, set the partition_ids option.
+# ## An empty array will result in receiving from all partitions.
+# # partition_ids = ["0","1"]
+#
+# ## Max undelivered messages
+# # max_undelivered_messages = 1000
+#
+# ## Set either option below to true to use a system property as timestamp.
+# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime.
+# ## It is recommended to use this setting when the data itself has no timestamp.
+# # enqueued_time_as_ts = true
+# # iot_hub_enqueued_time_as_ts = true
+#
+# ## Tags or fields to create from keys present in the application property bag.
+# ## These could for example be set by message enrichments in Azure IoT Hub.
+# # application_property_tags = []
+# # application_property_fields = []
+#
+# ## Tag or field name to use for metadata
+# ## By default all metadata is disabled
+# # sequence_number_field = "SequenceNumber"
+# # enqueued_time_field = "EnqueuedTime"
+# # offset_field = "Offset"
+# # partition_id_tag = "PartitionID"
+# # partition_key_tag = "PartitionKey"
+# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID"
+# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID"
+# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod"
+# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID"
+# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Run executable as long-running input plugin
+# [[inputs.execd]]
+# ## Program to run as daemon
+# command = ["telegraf-smartctl", "-d", "/dev/sda"]
+#
+# ## Define how the process is signaled on each collection interval.
+# ## Valid values are:
+# ## "none" : Do not signal anything.
+# ## The process must output metrics by itself.
+# ## "STDIN" : Send a newline on STDIN.
+# ## "SIGHUP" : Send a HUP signal. Not available on Windows.
+# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows.
+# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows.
+# signal = "none"
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # gNMI telemetry input plugin
+# [[inputs.gnmi]]
+# ## Address and port of the gNMI GRPC server
+# addresses = ["10.49.234.114:57777"]
+#
+# ## define credentials
+# username = "cisco"
+# password = "cisco"
+#
+# ## gNMI encoding requested (one of: "proto", "json", "json_ietf")
+# # encoding = "proto"
+#
+# ## redial in case of failures after
+# redial = "10s"
+#
+# ## enable client-side TLS and define CA to authenticate the device
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # insecure_skip_verify = true
+#
+# ## define client-side TLS certificate & key to authenticate to the device
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## gNMI subscription prefix (optional, can usually be left empty)
+# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+# # origin = ""
+# # prefix = ""
+# # target = ""
+#
+# ## Define additional aliases to map telemetry encoding paths to simple measurement names
+# #[inputs.gnmi.aliases]
+# # ifcounters = "openconfig:/interfaces/interface/state/counters"
+#
+# [[inputs.gnmi.subscription]]
+# ## Name of the measurement that will be emitted
+# name = "ifcounters"
+#
+# ## Origin and path of the subscription
+# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+# ##
+# ## origin usually refers to a (YANG) data model implemented by the device
+# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath)
+# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr
+# origin = "openconfig-interfaces"
+# path = "/interfaces/interface/state/counters"
+#
+# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval
+# subscription_mode = "sample"
+# sample_interval = "10s"
+#
+# ## Suppress redundant transmissions when measured values are unchanged
+# # suppress_redundant = false
+#
+# ## If suppression is enabled, send updates at least every X seconds anyway
+# # heartbeat_interval = "60s"
+
+
+# # Accept metrics over InfluxDB 1.x HTTP API
# [[inputs.http_listener]]
-# ## Address and port to host HTTP listener on
+# ## Address and port to host InfluxDB listener on
# service_address = ":8186"
#
# ## maximum duration before timing out read of the request
@@ -4272,13 +5878,19 @@
# ## maximum duration before timing out write of the response
# write_timeout = "10s"
#
-# ## Maximum allowed http request body size in bytes.
-# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
-# max_body_size = "500MiB"
+# ## Maximum allowed HTTP request body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# max_body_size = "32MiB"
+#
+# ## Optional tag name used to store the database.
+# ## If the write has a database in the query string then it will be kept in this tag name.
+# ## This tag can be used in downstream outputs.
+# ## The default value of nothing means it will be off and the database will not be recorded.
+# # database_tag = ""
#
-# ## Maximum line size allowed to be sent in bytes.
-# ## 0 means to use the default of 65536 bytes (64 kibibytes)
-# max_line_size = "64KiB"
+# ## If set the retention policy specified in the write query will be added as
+# ## the value of this tag name.
+# # retention_policy_tag = ""
#
# ## Set one or more allowed client CA certificate file names to
# ## enable mutually authenticated TLS connections
@@ -4314,6 +5926,10 @@
# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
# # max_body_size = "500MB"
#
+# ## Part of the request to consume. Available options are "body" and
+# ## "query".
+# # data_source = "body"
+#
# ## Set one or more allowed client CA certificate file names to
# ## enable mutually authenticated TLS connections
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
@@ -4327,6 +5943,11 @@
# # basic_username = "foobar"
# # basic_password = "barfoo"
#
+# ## Optional setting to map http headers into tags
+# ## If the http header is not present on the request, no corresponding tag will be added
+# ## If multiple instances of the http header are present, only the first value will be used
+# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -4334,9 +5955,9 @@
# data_format = "influx"
-# # Influx HTTP write listener
+# # Accept metrics over InfluxDB 1.x HTTP API
# [[inputs.influxdb_listener]]
-# ## Address and port to host HTTP listener on
+# ## Address and port to host InfluxDB listener on
# service_address = ":8186"
#
# ## maximum duration before timing out read of the request
@@ -4344,13 +5965,19 @@
# ## maximum duration before timing out write of the response
# write_timeout = "10s"
#
-# ## Maximum allowed http request body size in bytes.
-# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
-# max_body_size = "500MiB"
+# ## Maximum allowed HTTP request body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# max_body_size = "32MiB"
+#
+# ## Optional tag name used to store the database.
+# ## If the write has a database in the query string then it will be kept in this tag name.
+# ## This tag can be used in downstream outputs.
+# ## The default value of nothing means it will be off and the database will not be recorded.
+# # database_tag = ""
#
-# ## Maximum line size allowed to be sent in bytes.
-# ## 0 means to use the default of 65536 bytes (64 kibibytes)
-# max_line_size = "64KiB"
+# ## If set the retention policy specified in the write query will be added as
+# ## the value of this tag name.
+# # retention_policy_tag = ""
#
# ## Set one or more allowed client CA certificate file names to
# ## enable mutually authenticated TLS connections
@@ -4401,9 +6028,13 @@
# "/interfaces",
# ]
#
-# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
-# ## channel will be opened with server
-# ssl_cert = "/etc/telegraf/cert.pem"
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
#
# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
# ## Failed streams/calls will not be retried if 0 is provided
@@ -4413,39 +6044,50 @@
# str_as_tags = false
-# # Read metrics from Kafka topic(s)
+# # Read metrics from Kafka topics
# [[inputs.kafka_consumer]]
-# ## kafka servers
+# ## Kafka brokers.
# brokers = ["localhost:9092"]
-# ## topic(s) to consume
+#
+# ## Topics to consume.
# topics = ["telegraf"]
-# ## Add topic as tag if topic_tag is not empty
+#
+# ## When set this tag will be added to all metrics with the topic as the value.
# # topic_tag = ""
#
# ## Optional Client id
# # client_id = "Telegraf"
#
# ## Set the minimal supported Kafka version. Setting this enables the use of new
-# ## Kafka features and APIs. Of particular interest, lz4 compression
-# ## requires at least version 0.10.0.0.
+# ## Kafka features and APIs. Must be 0.10.2.0 or greater.
# ## ex: version = "1.1.0"
# # version = ""
#
# ## Optional TLS Config
+# # enable_tls = true
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
-# ## Optional SASL Config
+# ## SASL authentication credentials. These settings should typically be used
+# ## with TLS encryption enabled using the "enable_tls" option.
# # sasl_username = "kafka"
# # sasl_password = "secret"
#
-# ## the name of the consumer group
-# consumer_group = "telegraf_metrics_consumers"
-# ## Offset (must be either "oldest" or "newest")
-# offset = "oldest"
+# ## SASL protocol version. When connecting to Azure EventHub set to 0.
+# # sasl_version = 1
+#
+# ## Name of the consumer group.
+# # consumer_group = "telegraf_metrics_consumers"
+#
+# ## Initial offset position; one of "oldest" or "newest".
+# # offset = "oldest"
+#
+# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky".
+# # balance_strategy = "range"
+#
# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
# ## larger messages are dropped
# max_message_len = 1000000
@@ -4471,12 +6113,16 @@
# [[inputs.kafka_consumer_legacy]]
# ## topic(s) to consume
# topics = ["telegraf"]
+#
# ## an array of Zookeeper connection strings
# zookeeper_peers = ["localhost:2181"]
+#
# ## Zookeeper Chroot
# zookeeper_chroot = ""
+#
# ## the name of the consumer group
# consumer_group = "telegraf_metrics_consumers"
+#
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
#
@@ -4547,6 +6193,14 @@
# table_name = "default"
+# # Read metrics off Arista LANZ, via socket
+# [[inputs.lanz]]
+# ## URL to Arista LANZ endpoint
+# servers = [
+# "tcp://127.0.0.1:50001"
+# ]
+
+
# # Stream and parse log file(s).
# [[inputs.logparser]]
# ## Log files to parse.
@@ -4603,9 +6257,23 @@
# # Read metrics from MQTT topic(s)
# [[inputs.mqtt_consumer]]
-# ## MQTT broker URLs to be used. The format should be scheme://host:port,
-# ## schema can be tcp, ssl, or ws.
-# servers = ["tcp://localhost:1883"]
+# ## Broker URLs for the MQTT server or cluster. To connect to multiple
+# ## clusters or standalone servers, use a seperate plugin instance.
+# ## example: servers = ["tcp://localhost:1883"]
+# ## servers = ["ssl://localhost:1883"]
+# ## servers = ["ws://localhost:1883"]
+# servers = ["tcp://127.0.0.1:1883"]
+#
+# ## Topics that will be subscribed to.
+# topics = [
+# "telegraf/host01/cpu",
+# "telegraf/+/mem",
+# "sensors/#",
+# ]
+#
+# ## The message topic will be stored in a tag specified by this value. If set
+# ## to the empty string no topic tag will be created.
+# # topic_tag = "topic"
#
# ## QoS policy for messages
# ## 0 = at most once
@@ -4614,10 +6282,10 @@
# ##
# ## When using a QoS of 1 or 2, you should enable persistent_session to allow
# ## resuming unacknowledged messages.
-# qos = 0
+# # qos = 0
#
# ## Connection timeout for initial connection in seconds
-# connection_timeout = "30s"
+# # connection_timeout = "30s"
#
# ## Maximum messages to read from the broker that have not been written by an
# ## output. For best throughput set based on the number of metrics within
@@ -4629,21 +6297,17 @@
# ## waiting until the next flush_interval.
# # max_undelivered_messages = 1000
#
-# ## Topics to subscribe to
-# topics = [
-# "telegraf/host01/cpu",
-# "telegraf/+/mem",
-# "sensors/#",
-# ]
+# ## Persistent session disables clearing of the client session on connection.
+# ## In order for this option to work you must also set client_id to identify
+# ## the client. To receive messages that arrived while the client is offline,
+# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
+# ## publishing.
+# # persistent_session = false
#
-# # if true, messages that can't be delivered while the subscriber is offline
-# # will be delivered when it comes back (such as on service restart).
-# # NOTE: if true, client_id MUST be set
-# persistent_session = false
-# # If empty, a random client ID will be generated.
-# client_id = ""
+# ## If unset, a random client ID will be generated.
+# # client_id = ""
#
-# ## username and password to connect MQTT server.
+# ## Username and password to connect MQTT server.
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
#
@@ -4665,13 +6329,30 @@
# [[inputs.nats_consumer]]
# ## urls of NATS servers
# servers = ["nats://localhost:4222"]
-# ## Use Transport Layer Security
-# secure = false
+#
# ## subject(s) to consume
# subjects = ["telegraf"]
+#
# ## name a queue group
# queue_group = "telegraf_consumers"
#
+# ## Optional credentials
+# # username = ""
+# # password = ""
+#
+# ## Optional NATS 2.0 and NATS NGS compatible user credentials
+# # credentials = "/etc/telegraf/nats.creds"
+#
+# ## Use Transport Layer Security
+# # secure = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
# ## Sets the limits for pending msgs and bytes for each subscription
# ## These shouldn't need to be adjusted except in very high throughput scenarios
# # pending_message_limit = 65536
@@ -4698,8 +6379,10 @@
# [[inputs.nsq_consumer]]
# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
# # server = "localhost:4150"
+#
# ## An array representing the NSQD TCP HTTP Endpoints
# nsqd = ["localhost:4150"]
+#
# ## An array representing the NSQLookupd HTTP Endpoints
# nsqlookupd = ["localhost:4161"]
# topic = "telegraf"
@@ -4729,7 +6412,7 @@
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
-# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
# ##
# ## All connection parameters are optional.
# ##
@@ -4742,7 +6425,7 @@
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
-# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
# ##
# ## All connection parameters are optional.
# ##
@@ -4777,7 +6460,7 @@
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
-# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
# #
# ## All connection parameters are optional. #
# ## Without the dbname parameter, the driver will default to a database
@@ -4814,7 +6497,10 @@
# ## field is used to define custom tags (separated by commas)
# ## The optional "measurement" value can be used to override the default
# ## output measurement name ("postgresql").
-# #
+# ##
+# ## The script option can be used to specify the .sql file path.
+# ## If script and sqlquery options specified at same time, sqlquery will be used
+# ##
# ## Structure :
# ## [[inputs.postgresql_extensible.query]]
# ## sqlquery string
@@ -4840,6 +6526,18 @@
# ## An array of urls to scrape metrics from.
# urls = ["http://localhost:9100/metrics"]
#
+# ## Metric version controls the mapping from Prometheus metrics into
+# ## Telegraf metrics. When using the prometheus_client output, use the same
+# ## value in both plugins to ensure metrics are round-tripped without
+# ## modification.
+# ##
+# ## example: metric_version = 1; deprecated in 1.13
+# ## metric_version = 2; recommended version
+# # metric_version = 1
+#
+# ## Url tag name (tag containing scrapped url. optional, default is "url")
+# # url_tag = "scrapeUrl"
+#
# ## An array of Kubernetes services to scrape metrics from.
# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
#
@@ -4853,12 +6551,25 @@
# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
# ## - prometheus.io/port: If port is not 9102 use this annotation
# # monitor_kubernetes_pods = true
+# ## Restricts Kubernetes monitoring to a single namespace
+# ## ex: monitor_kubernetes_pods_namespace = "default"
+# # monitor_kubernetes_pods_namespace = ""
+# # label selector to target pods which have the label
+# # kubernetes_label_selector = "env=dev,app=nginx"
+# # field selector to target pods
+# # eg. To scrape pods on a specific node
+# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
#
# ## Use bearer token for authorization. ('bearer_token' takes priority)
# # bearer_token = "/path/to/bearer/token"
# ## OR
# # bearer_token_string = "abc_123"
#
+# ## HTTP Basic Authentication username and password. ('bearer_token' and
+# ## 'bearer_token_string' take priority)
+# # username = ""
+# # password = ""
+#
# ## Specify timeout duration for slower prometheus clients (default is 3s)
# # response_timeout = "3s"
#
@@ -4870,6 +6581,49 @@
# # insecure_skip_verify = false
+# # SFlow V5 Protocol Listener
+# [[inputs.sflow]]
+# ## Address to listen for sFlow packets.
+# ## example: service_address = "udp://:6343"
+# ## service_address = "udp4://:6343"
+# ## service_address = "udp6://:6343"
+# service_address = "udp://:6343"
+#
+# ## Set the size of the operating system's receive buffer.
+# ## example: read_buffer_size = "64KiB"
+# # read_buffer_size = ""
+
+
+# # Receive SNMP traps
+# [[inputs.snmp_trap]]
+# ## Transport, local address, and port to listen on. Transport must
+# ## be "udp://". Omit local address to listen on all interfaces.
+# ## example: "udp://127.0.0.1:1234"
+# ##
+# ## Special permissions may be required to listen on a port less than
+# ## 1024. See README.md for details
+# ##
+# # service_address = "udp://:162"
+# ## Timeout running snmptranslate command
+# # timeout = "5s"
+# ## Snmp version, defaults to 2c
+# # version = "2c"
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA" or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+
+
# # Generic socket listener capable of handling multiple socket types.
# [[inputs.socket_listener]]
# ## URL to listen on
@@ -4884,6 +6638,13 @@
# # service_address = "unix:///tmp/telegraf.sock"
# # service_address = "unixgram:///tmp/telegraf.sock"
#
+# ## Change the file mode bits on unix sockets. These permissions may not be
+# ## respected by some platforms, to safely restrict write permissions it is best
+# ## to place the socket into a directory that has previously been created
+# ## with the desired permissions.
+# ## ex: socket_mode = "777"
+# # socket_mode = ""
+#
# ## Maximum number of concurrent connections.
# ## Only applies to stream sockets (e.g. TCP).
# ## 0 (default) is unlimited.
@@ -4918,6 +6679,10 @@
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# # data_format = "influx"
+#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# # content_encoding = "identity"
# # Statsd UDP/TCP Server
@@ -4952,7 +6717,7 @@
# delete_timings = true
#
# ## Percentiles to calculate for timing & histogram stats
-# percentiles = [90]
+# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]
#
# ## separator to use between elements of a statsd metric
# metric_separator = "_"
@@ -4961,6 +6726,9 @@
# ## http://docs.datadoghq.com/guides/dogstatsd/
# parse_data_dog_tags = false
#
+# ## Parses datadog extensions to the statsd format
+# datadog_extensions = false
+#
# ## Statsd data translation templates, more info can be read here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
# # templates = [
@@ -4977,6 +6745,18 @@
# percentile_limit = 1000
+# # Suricata stats plugin
+# [[inputs.suricata]]
+# ## Data sink for Suricata stats log
+# # This is expected to be a filename of a
+# # unix socket to be created for listening.
+# source = "/var/run/suricata-stats.sock"
+#
+# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect"
+# # becomes "detect_alert" when delimiter is "_".
+# delimiter = "_"
+
+
# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
# [[inputs.syslog]]
# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
@@ -5008,10 +6788,10 @@
# ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
# ## or the non-transparent framing technique (RFC6587#section-3.4.2).
-# ## Must be one of "octect-counting", "non-transparent".
+# ## Must be one of "octet-counting", "non-transparent".
# # framing = "octet-counting"
#
-# ## The trailer to be expected in case of non-trasparent framing (default = "LF").
+# ## The trailer to be expected in case of non-transparent framing (default = "LF").
# ## Must be one of "LF", or "NUL".
# # trailer = "LF"
#
@@ -5027,9 +6807,9 @@
# # sdparam_separator = "_"
-# # Stream a log file, like the tail -f command
+# # Parse the new lines appended to a file
# [[inputs.tail]]
-# ## files to tail.
+# ## File names or a pattern to tail.
# ## These accept standard unix glob matching rules, but with the addition of
# ## ** as a "super asterisk". ie:
# ## "/var/log/**.log" -> recursively find all .log files in /var/log
@@ -5039,14 +6819,30 @@
# ## See https://github.com/gobwas/glob for more examples
# ##
# files = ["/var/mymetrics.out"]
+#
# ## Read file from beginning.
-# from_beginning = false
+# # from_beginning = false
+#
# ## Whether file is a named pipe
-# pipe = false
+# # pipe = false
#
# ## Method used to watch for file updates. Can be either "inotify" or "poll".
# # watch_method = "inotify"
#
+# ## Maximum lines of the file to process that have not yet be written by the
+# ## output. For best throughput set based on the number of metrics on each
+# ## line and the size of the output's metric_batch_size.
+# # max_undelivered_lines = 1000
+#
+# ## Character encoding to use when interpreting the file contents. Invalid
+# ## characters are replaced using the unicode replacement character. When set
+# ## to the empty string the data is not decoded to text.
+# ## ex: character_encoding = "utf-8"
+# ## character_encoding = "utf-16le"
+# ## character_encoding = "utf-16be"
+# ## character_encoding = ""
+# # character_encoding = ""
+#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -5078,6 +6874,8 @@
#
# ## VMs
# ## Typical VM metrics (if omitted or empty, all metrics are collected)
+# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
+# # vm_exclude = [] # Inventory paths to exclude
# vm_metric_include = [
# "cpu.demand.average",
# "cpu.idle.summation",
@@ -5119,6 +6917,8 @@
#
# ## Hosts
# ## Typical host metrics (if omitted or empty, all metrics are collected)
+# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
+# # host_exclude [] # Inventory paths to exclude
# host_metric_include = [
# "cpu.coreUtilization.average",
# "cpu.costop.summation",
@@ -5167,33 +6967,43 @@
# "storageAdapter.write.average",
# "sys.uptime.latest",
# ]
+# ## Collect IP addresses? Valid values are "ipv4" and "ipv6"
+# # ip_addresses = ["ipv6", "ipv4" ]
+#
# # host_metric_exclude = [] ## Nothing excluded by default
# # host_instances = true ## true by default
#
+#
# ## Clusters
+# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+# # cluster_exclude = [] # Inventory paths to exclude
# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
# # cluster_metric_exclude = [] ## Nothing excluded by default
# # cluster_instances = false ## false by default
#
# ## Datastores
+# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
+# # datastore_exclude = [] # Inventory paths to exclude
# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
# # datastore_metric_exclude = [] ## Nothing excluded by default
-# # datastore_instances = false ## false by default for Datastores only
+# # datastore_instances = false ## false by default
#
# ## Datacenters
+# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+# # datacenter_exclude = [] # Inventory paths to exclude
# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
-# # datacenter_instances = false ## false by default for Datastores only
+# # datacenter_instances = false ## false by default
#
# ## Plugin Settings
# ## separator character to use for measurement and field names (default: "_")
# # separator = "_"
#
-# ## number of objects to retreive per query for realtime resources (vms and hosts)
+# ## number of objects to retrieve per query for realtime resources (vms and hosts)
# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# # max_query_objects = 256
#
-# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
+# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores)
# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# # max_query_metrics = 256
#
@@ -5201,11 +7011,6 @@
# # collect_concurrency = 1
# # discover_concurrency = 1
#
-# ## whether or not to force discovery of new objects on initial gather call before collecting metrics
-# ## when true for large environments this may cause errors for time elapsed while collecting metrics
-# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
-# # force_discover_on_init = false
-#
# ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
# # object_discovery_interval = "300s"
#
@@ -5220,6 +7025,17 @@
# ## preserve the full precision when averaging takes place.
# # use_int_samples = true
#
+# ## Custom attributes from vCenter can be very useful for queries in order to slice the
+# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled
+# ## by default, since they can add a considerable amount of tags to the resulting metrics. To
+# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+# ## to select the attributes you want to include.
+# ## By default, since they can add a considerable amount of tags to the resulting metrics. To
+# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+# ## to select the attributes you want to include.
+# # custom_attribute_include = []
+# # custom_attribute_exclude = ["*"]
+#
# ## Optional SSL Config
# # ssl_ca = "/path/to/cafile"
# # ssl_cert = "/path/to/certfile"
diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf
index f0bfbdba0bff3..5b70928994158 100644
--- a/etc/telegraf_windows.conf
+++ b/etc/telegraf_windows.conf
@@ -9,9 +9,9 @@
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
-# Environment variables can be used anywhere in this config file, simply prepend
-# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
-# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
+# Environment variables can be used anywhere in this config file, simply surround
+# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
+# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Global tags can be specified here in key="value" format.
@@ -35,10 +35,9 @@
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
- ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
- ## output, and will flush this buffer on a successful write. Oldest metrics
- ## are dropped first when this buffer fills.
- ## This buffer only fills when writes fail to output plugin(s).
+ ## Maximum number of unwritten metrics per output. Increasing this value
+ ## allows for longer periods of output downtime without dropping metrics at the
+ ## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
@@ -64,13 +63,32 @@
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
- ## Logging configuration:
- ## Run telegraf with debug log messages.
- debug = false
- ## Run telegraf in quiet mode (error log messages only).
- quiet = false
- ## Specify the log file name. The empty string means to log to stderr.
- logfile = "/Program Files/Telegraf/telegraf.log"
+ ## Log at debug level.
+ # debug = false
+ ## Log only error level messages.
+ # quiet = false
+
+ ## Log target controls the destination for logs and can be one of "file",
+ ## "stderr" or, on Windows, "eventlog". When set to "file", the output file
+ ## is determined by the "logfile" setting.
+ # logtarget = "file"
+
+ ## Name of the file to be logged to when using the "file" logtarget. If set to
+ ## the empty string then logs are written to stderr.
+ # logfile = ""
+
+ ## The logfile will be rotated after the time interval specified. When set
+ ## to 0 no time based rotation is performed. Logs are rotated only when
+ ## written to, if there is no log activity rotation may be delayed.
+ # logfile_rotation_interval = "0d"
+
+ ## The logfile will be rotated when it becomes larger than the specified
+ ## size. When set to 0 no size based rotation is performed.
+ # logfile_rotation_max_size = "0MB"
+
+ ## Maximum number of rotated archives to keep, any older logs are deleted.
+ ## If set to -1, no archives are removed.
+ # logfile_rotation_max_archives = 5
## Override default hostname, if empty use os.Hostname()
hostname = ""
@@ -79,9 +97,10 @@
###############################################################################
-# OUTPUTS #
+# OUTPUT PLUGINS #
###############################################################################
+
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
@@ -93,8 +112,16 @@
# urls = ["http://127.0.0.1:8086"]
## The target database for metrics; will be created as needed.
+ ## For UDP url endpoint database needs to be configured on server side.
# database = "telegraf"
+ ## The value of this tag will be used to determine the database. If this
+ ## tag is not set the 'database' option is used as the default.
+ # database_tag = ""
+
+ ## If true, the 'database_tag' will not be included in the written metric.
+ # exclude_database_tag = false
+
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
@@ -104,6 +131,13 @@
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
+ ## The value of this tag will be used to determine the retention policy. If this
+ ## tag is not set the 'retention_policy' option is used as the default.
+ # retention_policy_tag = ""
+
+ ## If true, the 'retention_policy_tag' will not be included in the written metric.
+ # exclude_retention_policy_tag = false
+
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
@@ -145,11 +179,64 @@
## existing data has been written.
# influx_uint_support = false
+# # Configuration for sending metrics to InfluxDB
+# [[outputs.influxdb_v2]]
+# ## The URLs of the InfluxDB cluster nodes.
+# ##
+# ## Multiple URLs can be specified for a single cluster, only ONE of the
+# ## urls will be written to each interval.
+# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
+# urls = ["http://127.0.0.1:9999"]
+#
+# ## Token for authentication.
+# token = ""
+#
+# ## Organization is the name of the organization you wish to write to; must exist.
+# organization = ""
+#
+# ## Destination bucket to write into.
+# bucket = ""
+#
+# ## The value of this tag will be used to determine the bucket. If this
+# ## tag is not set the 'bucket' option is used as the default.
+# # bucket_tag = ""
+#
+# ## If true, the bucket tag will not be added to the metric.
+# # exclude_bucket_tag = false
+#
+# ## Timeout for HTTP messages.
+# # timeout = "5s"
+#
+# ## Additional HTTP headers
+# # http_headers = {"X-Special-Header" = "Special-Value"}
+#
+# ## HTTP Proxy override, if unset values the standard proxy environment
+# ## variables are consulted to determine which proxy, if any, should be used.
+# # http_proxy = "http://corporate.proxy:3128"
+#
+# ## HTTP User-Agent
+# # user_agent = "telegraf"
+#
+# ## Content-Encoding for write request body, can be set to "gzip" to
+# ## compress body or "identity" to apply no encoding.
+# # content_encoding = "gzip"
+#
+# ## Enable or disable uint support for writing uints influxdb 2.0.
+# # influx_uint_support = false
+#
+# ## Optional TLS Config for use on HTTP connections.
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
###############################################################################
-# INPUTS #
+# INPUT PLUGINS #
###############################################################################
+
# Windows Performance Counters plugin.
# These are the recommended method of monitoring system metrics on windows,
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
@@ -184,8 +271,8 @@
"% Disk Time",
"% Disk Read Time",
"% Disk Write Time",
- "Current Disk Queue Length",
"% Free Space",
+ "Current Disk Queue Length",
"Free Megabytes",
]
Measurement = "win_disk"
@@ -251,7 +338,6 @@
"Standby Cache Reserve Bytes",
"Standby Cache Normal Priority Bytes",
"Standby Cache Core Bytes",
-
]
# Use 6 x - to remove the Instance bit from the query.
Instances = ["------"]
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000000000..b91c39b98c650
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,158 @@
+module github.com/influxdata/telegraf
+
+go 1.13
+
+require (
+ cloud.google.com/go v0.53.0
+ cloud.google.com/go/datastore v1.1.0 // indirect
+ cloud.google.com/go/pubsub v1.2.0
+ code.cloudfoundry.org/clock v1.0.0 // indirect
+ collectd.org v0.3.0
+ github.com/Azure/azure-event-hubs-go/v3 v3.2.0
+ github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687
+ github.com/Azure/go-autorest/autorest v0.9.3
+ github.com/Azure/go-autorest/autorest/azure/auth v0.4.2
+ github.com/BurntSushi/toml v0.3.1
+ github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee
+ github.com/Microsoft/ApplicationInsights-Go v0.4.2
+ github.com/Microsoft/go-winio v0.4.9 // indirect
+ github.com/Shopify/sarama v1.24.1
+ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
+ github.com/aerospike/aerospike-client-go v1.27.0
+ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4
+ github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9
+ github.com/apache/thrift v0.12.0
+ github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect
+ github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740
+ github.com/armon/go-metrics v0.3.0 // indirect
+ github.com/aws/aws-sdk-go v1.30.9
+ github.com/benbjohnson/clock v1.0.3
+ github.com/bitly/go-hostpool v0.1.0 // indirect
+ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869
+ github.com/caio/go-tdigest v2.3.0+incompatible // indirect
+ github.com/cenkalti/backoff v2.0.0+incompatible // indirect
+ github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6
+ github.com/cockroachdb/apd v1.1.0 // indirect
+ github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037
+ github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect
+ github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect
+ github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible
+ github.com/dimchansky/utfbom v1.1.0
+ github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect
+ github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133
+ github.com/docker/go-connections v0.3.0 // indirect
+ github.com/docker/go-units v0.3.3 // indirect
+ github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166
+ github.com/eclipse/paho.mqtt.golang v1.2.0
+ github.com/ericchiang/k8s v1.2.0
+ github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
+ github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96
+ github.com/go-logfmt/logfmt v0.4.0
+ github.com/go-ole/go-ole v1.2.1 // indirect
+ github.com/go-redis/redis v6.12.0+incompatible
+ github.com/go-sql-driver/mysql v1.5.0
+ github.com/goburrow/modbus v0.1.0
+ github.com/goburrow/serial v0.1.0 // indirect
+ github.com/gobwas/glob v0.2.3
+ github.com/gofrs/uuid v2.1.0+incompatible
+ github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
+ github.com/golang/geo v0.0.0-20190916061304-5b978397cfec
+ github.com/golang/protobuf v1.3.5
+ github.com/google/go-cmp v0.4.0
+ github.com/google/go-github v17.0.0+incompatible
+ github.com/google/go-querystring v1.0.0 // indirect
+ github.com/gopcua/opcua v0.1.12
+ github.com/gorilla/mux v1.6.2
+ github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
+ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
+ github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0
+ github.com/hashicorp/consul v1.2.1
+ github.com/hashicorp/go-msgpack v0.5.5 // indirect
+ github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect
+ github.com/hashicorp/memberlist v0.1.5 // indirect
+ github.com/hashicorp/serf v0.8.1 // indirect
+ github.com/influxdata/go-syslog/v2 v2.0.1
+ github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4
+ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65
+ github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8
+ github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
+ github.com/jackc/pgx v3.6.0+incompatible
+ github.com/jcmturner/gofork v1.0.0 // indirect
+ github.com/kardianos/service v1.0.0
+ github.com/karrick/godirwalk v1.12.0
+ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
+ github.com/klauspost/compress v1.9.2 // indirect
+ github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee
+ github.com/kylelemons/godebug v1.1.0 // indirect
+ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect
+ github.com/lib/pq v1.3.0 // indirect
+ github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.1
+ github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe
+ github.com/miekg/dns v1.0.14
+ github.com/mitchellh/go-testing-interface v1.0.0 // indirect
+ github.com/multiplay/go-ts3 v1.0.0
+ github.com/naoina/go-stringutil v0.1.0 // indirect
+ github.com/nats-io/nats-server/v2 v2.1.4
+ github.com/nats-io/nats.go v1.9.1
+ github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0
+ github.com/nsqio/go-nsq v1.0.7
+ github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029
+ github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
+ github.com/opencontainers/image-spec v1.0.1 // indirect
+ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
+ github.com/opentracing/opentracing-go v1.0.2 // indirect
+ github.com/openzipkin/zipkin-go-opentracing v0.3.4
+ github.com/pkg/errors v0.9.1
+ github.com/prometheus/client_golang v1.5.1
+ github.com/prometheus/client_model v0.2.0
+ github.com/prometheus/common v0.9.1
+ github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664
+ github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect
+ github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect
+ github.com/shirou/gopsutil v2.20.7+incompatible
+ github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect
+ github.com/sirupsen/logrus v1.4.2
+ github.com/soniah/gosnmp v1.25.0
+ github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8
+ github.com/stretchr/testify v1.5.1
+ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62
+ github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect
+ github.com/tidwall/gjson v1.6.0
+ github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect
+ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
+ github.com/vjeantet/grok v1.0.0
+ github.com/vmware/govmomi v0.19.0
+ github.com/wavefronthq/wavefront-sdk-go v0.9.2
+ github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf
+ github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect
+ github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect
+ go.starlark.net v0.0.0-20191227232015-caa3e9aa5008
+ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect
+ golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
+ golang.org/x/net v0.0.0-20200301022130-244492dfa37a
+ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
+ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
+ golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6
+ golang.org/x/text v0.3.3
+ golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect
+ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4
+ gonum.org/v1/gonum v0.6.2 // indirect
+ google.golang.org/api v0.20.0
+ google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24
+ google.golang.org/grpc v1.28.0
+ gopkg.in/fatih/pool.v2 v2.0.0 // indirect
+ gopkg.in/gorethink/gorethink.v3 v3.0.5
+ gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
+ gopkg.in/ldap.v3 v3.1.0
+ gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
+ gopkg.in/olivere/elastic.v5 v5.0.70
+ gopkg.in/yaml.v2 v2.2.5
+ gotest.tools v2.2.0+incompatible // indirect
+ honnef.co/go/tools v0.0.1-2020.1.3 // indirect
+ k8s.io/apimachinery v0.17.1 // indirect
+)
+
+// replaced due to https://github.com/satori/go.uuid/issues/73
+replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000000000..ed84b5f2556b4
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,906 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
+cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o=
+code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
+collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00=
+collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc=
+github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg=
+github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs=
+github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc=
+github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
+github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg=
+github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
+github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68=
+github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
+github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ=
+github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8=
+github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY=
+github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4=
+github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk=
+github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk=
+github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
+github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
+github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=
+github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w=
+github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM=
+github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg=
+github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg=
+github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ=
+github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/sarama v1.24.1 h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI=
+github.com/Shopify/sarama v1.24.1/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU=
+github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
+github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE=
+github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ=
+github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc=
+github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos=
+github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA=
+github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY=
+github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU=
+github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
+github.com/aws/aws-sdk-go v1.30.9 h1:DntpBUKkchINPDbhEzDRin1eEn1TG9TZFlzWPf0i8to=
+github.com/aws/aws-sdk-go v1.30.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
+github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY=
+github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI=
+github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY=
+github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo=
+github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ=
+github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
+github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4=
+github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
+github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8=
+github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o=
+github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
+github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=
+github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg=
+github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133 h1:Kus8nU6ctI/u/l86ljUJl6GpUtmO7gtD/krn4u5dr0M=
+github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
+github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
+github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ=
+github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
+github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI=
+github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg=
+github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
+github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
+github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8=
+github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
+github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-redis/redis v6.12.0+incompatible h1:s+64XI+z/RXqGHz2fQSgRJOEwqqSXeX3dliF7iVkMbE=
+github.com/go-redis/redis v6.12.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro=
+github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg=
+github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA=
+github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA=
+github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
+github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/gopcua/opcua v0.1.12 h1:TenluCr1CPB1NHjb9tX6yprc0eUmthznXxSc5mnJPBo=
+github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8=
+github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
+github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
+github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ=
+github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ=
+github.com/hashicorp/consul v1.2.1 h1:66MuuTfV4aOXTQM7cjAIKUWFOITSk4XZlMhE09ymVbg=
+github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
+github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E=
+github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg=
+github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM=
+github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4=
+github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s=
+github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo=
+github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 h1:K3A5vHPs/p8OjI4SL3l1+hs/98mhxTVDcV1Ap0c265E=
+github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM=
+github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY=
+github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8=
+github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q=
+github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI=
+github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
+github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
+github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q=
+github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
+github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
+github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
+github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME=
+github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
+github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw=
+github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw=
+github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0=
+github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
+github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y=
+github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
+github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk=
+github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4=
+github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U=
+github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg=
+github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
+github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA=
+github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk=
+github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw=
+github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0=
+github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc=
+github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
+github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
+github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg=
+github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY=
+github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws=
+github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw=
+github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
+github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g=
+github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg=
+github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY=
+github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ=
+github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY=
+github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w=
+github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc=
+github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g=
+github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw=
+github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
+github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk=
+github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY=
+github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/shirou/gopsutil v2.20.7+incompatible h1:Ymv4OD12d6zm+2yONe39VSmp2XooJe8za7ngOLW/o/w=
+github.com/shirou/gopsutil v2.20.7+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A=
+github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ=
+github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ=
+github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o=
+github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw=
+github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg=
+github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
+github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc=
+github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
+github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
+github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
+github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0=
+github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ=
+github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo=
+github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY=
+github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
+github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk=
+github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU=
+github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q=
+github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg=
+github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk=
+github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk=
+github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
+go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.starlark.net v0.0.0-20191227232015-caa3e9aa5008 h1:PUpdYMZifLwPlUnFfT/2Hkqr7p0SSpOR7xrDiPaD52k=
+go.starlark.net v0.0.0-20191227232015-caa3e9aa5008/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w=
+golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c h1:Vco5b+cuG5NNfORVxZy6bYZQ7rsigisU1WQFkvQ0L5E=
+golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8=
+golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM=
+golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8=
+golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4=
+golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc=
+golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q=
+gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE=
+google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
+gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg=
+gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY=
+gopkg.in/fsnotify.v1 v1.2.1/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU=
+gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
+gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
+gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
+gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4=
+gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
+gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
+gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
+gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE=
+gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE=
+gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk=
+gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM=
+k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/input.go b/input.go
index 071ab7d9df603..08cfd75b9d562 100644
--- a/input.go
+++ b/input.go
@@ -1,11 +1,7 @@
package telegraf
type Input interface {
- // SampleConfig returns the default configuration of the Input
- SampleConfig() string
-
- // Description returns a one-sentence description on the Input
- Description() string
+ PluginDescriber
// Gather takes in an accumulator and adds the metrics that the Input
// gathers. This is called every "interval"
diff --git a/internal/choice/choice.go b/internal/choice/choice.go
new file mode 100644
index 0000000000000..33c26096ddfc1
--- /dev/null
+++ b/internal/choice/choice.go
@@ -0,0 +1,36 @@
+// Package choice provides basic functions for working with
+// plugin options that must be one of several values.
+package choice
+
+import "fmt"
+
+// Contains return true if the choice in the list of choices.
+func Contains(choice string, choices []string) bool {
+ for _, item := range choices {
+ if item == choice {
+ return true
+ }
+ }
+ return false
+}
+
+// CheckSContains returns an error if a choice is not one of
+// the available choices.
+func Check(choice string, available []string) error {
+ if !Contains(choice, available) {
+ return fmt.Errorf("unknown choice %s", choice)
+ }
+ return nil
+}
+
+// CheckSliceContains returns an error if the choices is not a subset of
+// available.
+func CheckSlice(choices, available []string) error {
+ for _, choice := range choices {
+ err := Check(choice, available)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/internal/content_coding.go b/internal/content_coding.go
new file mode 100644
index 0000000000000..daefa20eea633
--- /dev/null
+++ b/internal/content_coding.go
@@ -0,0 +1,182 @@
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "io"
+)
+
+// NewStreamContentDecoder returns a reader that will decode the stream
+// according to the encoding type.
+func NewStreamContentDecoder(encoding string, r io.Reader) (io.Reader, error) {
+ switch encoding {
+ case "gzip":
+ return NewGzipReader(r)
+ case "identity", "":
+ return r, nil
+ default:
+ return nil, errors.New("invalid value for content_encoding")
+ }
+}
+
+// GzipReader is similar to gzip.Reader but reads only a single gzip stream per read.
+type GzipReader struct {
+ r io.Reader
+ z *gzip.Reader
+ endOfStream bool
+}
+
+func NewGzipReader(r io.Reader) (io.Reader, error) {
+ // We need a read that implements ByteReader in order to line up the next
+ // stream.
+ br := bufio.NewReader(r)
+
+ // Reads the first gzip stream header.
+ z, err := gzip.NewReader(br)
+ if err != nil {
+ return nil, err
+ }
+
+ // Prevent future calls to Read from reading the following gzip header.
+ z.Multistream(false)
+
+ return &GzipReader{r: br, z: z}, nil
+}
+
+func (r *GzipReader) Read(b []byte) (int, error) {
+ if r.endOfStream {
+ // Reads the next gzip header and prepares for the next stream.
+ err := r.z.Reset(r.r)
+ if err != nil {
+ return 0, err
+ }
+ r.z.Multistream(false)
+ r.endOfStream = false
+ }
+
+ n, err := r.z.Read(b)
+
+ // Since multistream is disabled, io.EOF indicates the end of the gzip
+ // sequence. On the next read we must read the next gzip header.
+ if err == io.EOF {
+ r.endOfStream = true
+ return n, nil
+ }
+ return n, err
+
+}
+
+// NewContentEncoder returns a ContentEncoder for the encoding type.
+func NewContentEncoder(encoding string) (ContentEncoder, error) {
+ switch encoding {
+ case "gzip":
+ return NewGzipEncoder()
+ case "identity", "":
+ return NewIdentityEncoder(), nil
+ default:
+ return nil, errors.New("invalid value for content_encoding")
+ }
+}
+
+// NewContentDecoder returns a ContentDecoder for the encoding type.
+func NewContentDecoder(encoding string) (ContentDecoder, error) {
+ switch encoding {
+ case "gzip":
+ return NewGzipDecoder()
+ case "identity", "":
+ return NewIdentityDecoder(), nil
+ default:
+ return nil, errors.New("invalid value for content_encoding")
+ }
+}
+
+// ContentEncoder applies a wrapper encoding to byte buffers.
+type ContentEncoder interface {
+ Encode([]byte) ([]byte, error)
+}
+
+// GzipEncoder compresses the buffer using gzip at the default level.
+type GzipEncoder struct {
+ writer *gzip.Writer
+ buf *bytes.Buffer
+}
+
+func NewGzipEncoder() (*GzipEncoder, error) {
+ var buf bytes.Buffer
+ return &GzipEncoder{
+ writer: gzip.NewWriter(&buf),
+ buf: &buf,
+ }, nil
+}
+
+func (e *GzipEncoder) Encode(data []byte) ([]byte, error) {
+ e.buf.Reset()
+ e.writer.Reset(e.buf)
+
+ _, err := e.writer.Write(data)
+ if err != nil {
+ return nil, err
+ }
+ err = e.writer.Close()
+ if err != nil {
+ return nil, err
+ }
+ return e.buf.Bytes(), nil
+}
+
+// IdentityEncoder is a null encoder that applies no transformation.
+type IdentityEncoder struct{}
+
+func NewIdentityEncoder() *IdentityEncoder {
+ return &IdentityEncoder{}
+}
+
+func (*IdentityEncoder) Encode(data []byte) ([]byte, error) {
+ return data, nil
+}
+
+// ContentDecoder removes a wrapper encoding from byte buffers.
+type ContentDecoder interface {
+ Decode([]byte) ([]byte, error)
+}
+
+// GzipDecoder decompresses buffers with gzip compression.
+type GzipDecoder struct {
+ reader *gzip.Reader
+ buf *bytes.Buffer
+}
+
+func NewGzipDecoder() (*GzipDecoder, error) {
+ return &GzipDecoder{
+ reader: new(gzip.Reader),
+ buf: new(bytes.Buffer),
+ }, nil
+}
+
+func (d *GzipDecoder) Decode(data []byte) ([]byte, error) {
+ d.reader.Reset(bytes.NewBuffer(data))
+ d.buf.Reset()
+
+ _, err := d.buf.ReadFrom(d.reader)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+ err = d.reader.Close()
+ if err != nil {
+ return nil, err
+ }
+ return d.buf.Bytes(), nil
+}
+
+// IdentityDecoder is a null decoder that returns the input.
+type IdentityDecoder struct{}
+
+func NewIdentityDecoder() *IdentityDecoder {
+ return &IdentityDecoder{}
+}
+
+func (*IdentityDecoder) Decode(data []byte) ([]byte, error) {
+ return data, nil
+}
diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go
new file mode 100644
index 0000000000000..85496df59c5b6
--- /dev/null
+++ b/internal/content_coding_test.go
@@ -0,0 +1,94 @@
+package internal
+
+import (
+ "bytes"
+ "io/ioutil"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestGzipEncodeDecode(t *testing.T) {
+ enc, err := NewGzipEncoder()
+ require.NoError(t, err)
+ dec, err := NewGzipDecoder()
+ require.NoError(t, err)
+
+ payload, err := enc.Encode([]byte("howdy"))
+ require.NoError(t, err)
+
+ actual, err := dec.Decode(payload)
+ require.NoError(t, err)
+
+ require.Equal(t, "howdy", string(actual))
+}
+
+func TestGzipReuse(t *testing.T) {
+ enc, err := NewGzipEncoder()
+ require.NoError(t, err)
+ dec, err := NewGzipDecoder()
+ require.NoError(t, err)
+
+ payload, err := enc.Encode([]byte("howdy"))
+ require.NoError(t, err)
+
+ actual, err := dec.Decode(payload)
+ require.NoError(t, err)
+
+ require.Equal(t, "howdy", string(actual))
+
+ payload, err = enc.Encode([]byte("doody"))
+ require.NoError(t, err)
+
+ actual, err = dec.Decode(payload)
+ require.NoError(t, err)
+
+ require.Equal(t, "doody", string(actual))
+}
+
+func TestIdentityEncodeDecode(t *testing.T) {
+ enc := NewIdentityEncoder()
+ dec := NewIdentityDecoder()
+
+ payload, err := enc.Encode([]byte("howdy"))
+ require.NoError(t, err)
+
+ actual, err := dec.Decode(payload)
+ require.NoError(t, err)
+
+ require.Equal(t, "howdy", string(actual))
+}
+
+func TestStreamIdentityDecode(t *testing.T) {
+ var r bytes.Buffer
+ n, err := r.Write([]byte("howdy"))
+ require.NoError(t, err)
+ require.Equal(t, 5, n)
+
+ dec, err := NewStreamContentDecoder("identity", &r)
+ require.NoError(t, err)
+
+ data, err := ioutil.ReadAll(dec)
+ require.NoError(t, err)
+
+ require.Equal(t, []byte("howdy"), data)
+}
+
+func TestStreamGzipDecode(t *testing.T) {
+ enc, err := NewGzipEncoder()
+ require.NoError(t, err)
+ written, err := enc.Encode([]byte("howdy"))
+ require.NoError(t, err)
+
+ w := bytes.NewBuffer(written)
+
+ dec, err := NewStreamContentDecoder("gzip", w)
+ require.NoError(t, err)
+
+ b := make([]byte, 10)
+ n, err := dec.Read(b)
+ require.NoError(t, err)
+ require.Equal(t, 5, n)
+
+ require.Equal(t, []byte("howdy"), b[:n])
+}
diff --git a/internal/docker/docker.go b/internal/docker/docker.go
new file mode 100644
index 0000000000000..1808944ae620b
--- /dev/null
+++ b/internal/docker/docker.go
@@ -0,0 +1,36 @@
+package docker
+
+import "strings"
+
+// Adapts some of the logic from the actual Docker library's image parsing
+// routines:
+// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go
+func ParseImage(image string) (string, string) {
+ domain := ""
+ remainder := ""
+
+ i := strings.IndexRune(image, '/')
+
+ if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") {
+ remainder = image
+ } else {
+ domain, remainder = image[:i], image[i+1:]
+ }
+
+ imageName := ""
+ imageVersion := "unknown"
+
+ i = strings.LastIndex(remainder, ":")
+ if i > -1 {
+ imageVersion = remainder[i+1:]
+ imageName = remainder[:i]
+ } else {
+ imageName = remainder
+ }
+
+ if domain != "" {
+ imageName = domain + "/" + imageName
+ }
+
+ return imageName, imageVersion
+}
diff --git a/internal/docker/docker_test.go b/internal/docker/docker_test.go
new file mode 100644
index 0000000000000..14591ab87a1f9
--- /dev/null
+++ b/internal/docker/docker_test.go
@@ -0,0 +1,59 @@
+package docker_test
+
+import (
+ "testing"
+
+ "github.com/influxdata/telegraf/internal/docker"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseImage(t *testing.T) {
+ tests := []struct {
+ image string
+ parsedName string
+ parsedVersion string
+ }{
+ {
+ image: "postgres",
+ parsedName: "postgres",
+ parsedVersion: "unknown",
+ },
+ {
+ image: "postgres:latest",
+ parsedName: "postgres",
+ parsedVersion: "latest",
+ },
+ {
+ image: "coreos/etcd",
+ parsedName: "coreos/etcd",
+ parsedVersion: "unknown",
+ },
+ {
+ image: "coreos/etcd:latest",
+ parsedName: "coreos/etcd",
+ parsedVersion: "latest",
+ },
+ {
+ image: "quay.io/postgres",
+ parsedName: "quay.io/postgres",
+ parsedVersion: "unknown",
+ },
+ {
+ image: "quay.io:4443/coreos/etcd",
+ parsedName: "quay.io:4443/coreos/etcd",
+ parsedVersion: "unknown",
+ },
+ {
+ image: "quay.io:4443/coreos/etcd:latest",
+ parsedName: "quay.io:4443/coreos/etcd",
+ parsedVersion: "latest",
+ },
+ }
+ for _, tt := range tests {
+ t.Run("parse name "+tt.image, func(t *testing.T) {
+ imageName, imageVersion := docker.ParseImage(tt.image)
+ require.Equal(t, tt.parsedName, imageName)
+ require.Equal(t, tt.parsedVersion, imageVersion)
+ })
+ }
+}
diff --git a/internal/exec.go b/internal/exec.go
new file mode 100644
index 0000000000000..7fe95c0b94932
--- /dev/null
+++ b/internal/exec.go
@@ -0,0 +1,44 @@
+package internal
+
+import (
+ "bytes"
+ "os/exec"
+ "time"
+)
+
+// CombinedOutputTimeout runs the given command with the given timeout and
+// returns the combined output of stdout and stderr.
+// If the command times out, it attempts to kill the process.
+func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
+ var b bytes.Buffer
+ c.Stdout = &b
+ c.Stderr = &b
+ if err := c.Start(); err != nil {
+ return nil, err
+ }
+ err := WaitTimeout(c, timeout)
+ return b.Bytes(), err
+}
+
+// StdOutputTimeout runs the given command with the given timeout and
+// returns the output of stdout.
+// If the command times out, it attempts to kill the process.
+func StdOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
+ var b bytes.Buffer
+ c.Stdout = &b
+ c.Stderr = nil
+ if err := c.Start(); err != nil {
+ return nil, err
+ }
+ err := WaitTimeout(c, timeout)
+ return b.Bytes(), err
+}
+
+// RunTimeout runs the given command with the given timeout.
+// If the command times out, it attempts to kill the process.
+func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
+ if err := c.Start(); err != nil {
+ return err
+ }
+ return WaitTimeout(c, timeout)
+}
diff --git a/internal/exec_unix.go b/internal/exec_unix.go
new file mode 100644
index 0000000000000..d41aae825d6d5
--- /dev/null
+++ b/internal/exec_unix.go
@@ -0,0 +1,58 @@
+// +build !windows
+
+package internal
+
+import (
+ "log"
+ "os/exec"
+ "syscall"
+ "time"
+)
+
+// KillGrace is the amount of time we allow a process to shutdown before
+// sending a SIGKILL.
+const KillGrace = 5 * time.Second
+
+// WaitTimeout waits for the given command to finish with a timeout.
+// It assumes the command has already been started.
+// If the command times out, it attempts to kill the process.
+func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
+ var kill *time.Timer
+ term := time.AfterFunc(timeout, func() {
+ err := c.Process.Signal(syscall.SIGTERM)
+ if err != nil {
+ log.Printf("E! [agent] Error terminating process: %s", err)
+ return
+ }
+
+ kill = time.AfterFunc(KillGrace, func() {
+ err := c.Process.Kill()
+ if err != nil {
+ log.Printf("E! [agent] Error killing process: %s", err)
+ return
+ }
+ })
+ })
+
+ err := c.Wait()
+
+ // Shutdown all timers
+ if kill != nil {
+ kill.Stop()
+ }
+ termSent := !term.Stop()
+
+ // If the process exited without error treat it as success. This allows a
+ // process to do a clean shutdown on signal.
+ if err == nil {
+ return nil
+ }
+
+ // If SIGTERM was sent then treat any process error as a timeout.
+ if termSent {
+ return TimeoutErr
+ }
+
+ // Otherwise there was an error unrelated to termination.
+ return err
+}
diff --git a/internal/exec_windows.go b/internal/exec_windows.go
new file mode 100644
index 0000000000000..f010bdd96756b
--- /dev/null
+++ b/internal/exec_windows.go
@@ -0,0 +1,41 @@
+// +build windows
+
+package internal
+
+import (
+ "log"
+ "os/exec"
+ "time"
+)
+
+// WaitTimeout waits for the given command to finish with a timeout.
+// It assumes the command has already been started.
+// If the command times out, it attempts to kill the process.
+func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
+ timer := time.AfterFunc(timeout, func() {
+ err := c.Process.Kill()
+ if err != nil {
+ log.Printf("E! [agent] Error killing process: %s", err)
+ return
+ }
+ })
+
+ err := c.Wait()
+
+ // Shutdown all timers
+ termSent := !timer.Stop()
+
+ // If the process exited without error treat it as success. This allows a
+ // process to do a clean shutdown on signal.
+ if err == nil {
+ return nil
+ }
+
+ // If SIGTERM was sent then treat any process error as a timeout.
+ if termSent {
+ return TimeoutErr
+ }
+
+ // Otherwise there was an error unrelated to termination.
+ return err
+}
diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go
index b21d93520841a..d4e7ffd8743bd 100644
--- a/internal/globpath/globpath.go
+++ b/internal/globpath/globpath.go
@@ -21,7 +21,7 @@ func Compile(path string) (*GlobPath, error) {
out := GlobPath{
hasMeta: hasMeta(path),
HasSuperMeta: hasSuperMeta(path),
- path: path,
+ path: filepath.FromSlash(path),
}
// if there are no glob meta characters in the path, don't bother compiling
@@ -41,8 +41,9 @@ func Compile(path string) (*GlobPath, error) {
return &out, nil
}
-// Match returns all files matching the expression
-// If it's a static path, returns path
+// Match returns all files matching the expression.
+// If it's a static path, returns path.
+// All returned path will have the host platform separator.
func (g *GlobPath) Match() []string {
if !g.hasMeta {
return []string{g.path}
@@ -82,7 +83,8 @@ func (g *GlobPath) Match() []string {
return out
}
-// MatchString test a string against the glob
+// MatchString tests the path string against the glob. The path should contain
+// the host platform separator.
func (g *GlobPath) MatchString(path string) bool {
if !g.HasSuperMeta {
res, _ := filepath.Match(g.path, path)
@@ -96,6 +98,7 @@ func (g *GlobPath) MatchString(path string) bool {
// - any directory under these roots may contain a matching file
// - no file outside of these roots can match the pattern
// Note that it returns both files and directories.
+// All returned path will have the host platform separator.
func (g *GlobPath) GetRoots() []string {
if !g.hasMeta {
return []string{g.path}
diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go
index 476ba924346e0..60562d8f8f1ae 100644
--- a/internal/globpath/globpath_test.go
+++ b/internal/globpath/globpath_test.go
@@ -87,3 +87,14 @@ func TestMatch_ErrPermission(t *testing.T) {
require.Equal(t, test.expected, actual)
}
}
+
+func TestWindowsSeparator(t *testing.T) {
+ if runtime.GOOS != "windows" {
+ t.Skip("Skipping Windows only test")
+ }
+
+ glob, err := Compile("testdata/nested1")
+ require.NoError(t, err)
+ ok := glob.MatchString("testdata\\nested1")
+ require.True(t, ok)
+}
diff --git a/internal/goplugin/noplugin.go b/internal/goplugin/noplugin.go
new file mode 100644
index 0000000000000..23d8634c46520
--- /dev/null
+++ b/internal/goplugin/noplugin.go
@@ -0,0 +1,9 @@
+// +build !goplugin
+
+package goplugin
+
+import "errors"
+
+func LoadExternalPlugins(rootDir string) error {
+ return errors.New("go plugin support is not enabled")
+}
diff --git a/internal/goplugin/plugin.go b/internal/goplugin/plugin.go
new file mode 100644
index 0000000000000..7e58ec32e92c2
--- /dev/null
+++ b/internal/goplugin/plugin.go
@@ -0,0 +1,42 @@
+// +build goplugin
+
+package goplugin
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "plugin"
+ "strings"
+)
+
+// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.)
+// in the specified directory.
+func LoadExternalPlugins(rootDir string) error {
+ return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error {
+ // Stop if there was an error.
+ if err != nil {
+ return err
+ }
+
+ // Ignore directories.
+ if info.IsDir() {
+ return nil
+ }
+
+ // Ignore files that aren't shared libraries.
+ ext := strings.ToLower(path.Ext(pth))
+ if ext != ".so" && ext != ".dll" {
+ return nil
+ }
+
+ // Load plugin.
+ _, err = plugin.Open(pth)
+ if err != nil {
+ return fmt.Errorf("error loading %s: %s", pth, err)
+ }
+
+ return nil
+ })
+}
diff --git a/internal/http.go b/internal/http.go
new file mode 100644
index 0000000000000..1c3dd49577557
--- /dev/null
+++ b/internal/http.go
@@ -0,0 +1,143 @@
+package internal
+
+import (
+ "crypto/subtle"
+ "net"
+ "net/http"
+ "net/url"
+)
+
+type BasicAuthErrorFunc func(rw http.ResponseWriter)
+
+// AuthHandler returns a http handler that requires HTTP basic auth
+// credentials to match the given username and password.
+func AuthHandler(username, password, realm string, onError BasicAuthErrorFunc) func(h http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ return &basicAuthHandler{
+ username: username,
+ password: password,
+ realm: realm,
+ onError: onError,
+ next: h,
+ }
+ }
+}
+
+type basicAuthHandler struct {
+ username string
+ password string
+ realm string
+ onError BasicAuthErrorFunc
+ next http.Handler
+}
+
+func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if h.username != "" || h.password != "" {
+ reqUsername, reqPassword, ok := req.BasicAuth()
+ if !ok ||
+ subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 ||
+ subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 {
+
+ rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"")
+ h.onError(rw)
+ http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
+ return
+ }
+ }
+
+ h.next.ServeHTTP(rw, req)
+}
+
+type GenericAuthErrorFunc func(rw http.ResponseWriter)
+
+// GenericAuthHandler returns a http handler that requires `Authorization: `
+func GenericAuthHandler(credentials string, onError GenericAuthErrorFunc) func(h http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ return &genericAuthHandler{
+ credentials: credentials,
+ onError: onError,
+ next: h,
+ }
+ }
+}
+
+// Generic auth scheme handler - exact match on `Authorization: `
+type genericAuthHandler struct {
+ credentials string
+ onError GenericAuthErrorFunc
+ next http.Handler
+}
+
+func (h *genericAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if h.credentials != "" {
+ // Scheme checking
+ authorization := req.Header.Get("Authorization")
+ if subtle.ConstantTimeCompare([]byte(authorization), []byte(h.credentials)) != 1 {
+
+ h.onError(rw)
+ http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
+ return
+ }
+ }
+
+ h.next.ServeHTTP(rw, req)
+}
+
+// ErrorFunc is a callback for writing an error response.
+type ErrorFunc func(rw http.ResponseWriter, code int)
+
+// IPRangeHandler returns a http handler that requires the remote address to be
+// in the specified network.
+func IPRangeHandler(network []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ return &ipRangeHandler{
+ network: network,
+ onError: onError,
+ next: h,
+ }
+ }
+}
+
+type ipRangeHandler struct {
+ network []*net.IPNet
+ onError ErrorFunc
+ next http.Handler
+}
+
+func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if len(h.network) == 0 {
+ h.next.ServeHTTP(rw, req)
+ return
+ }
+
+ remoteIPString, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ h.onError(rw, http.StatusForbidden)
+ return
+ }
+
+ remoteIP := net.ParseIP(remoteIPString)
+ if remoteIP == nil {
+ h.onError(rw, http.StatusForbidden)
+ return
+ }
+
+ for _, net := range h.network {
+ if net.Contains(remoteIP) {
+ h.next.ServeHTTP(rw, req)
+ return
+ }
+ }
+
+ h.onError(rw, http.StatusForbidden)
+}
+
+func OnClientError(client *http.Client, err error) {
+ // Close connection after a timeout error. If this is a HTTP2
+ // connection this ensures that next interval a new connection will be
+ // used and name lookup will be performed.
+ // https://github.com/golang/go/issues/36026
+ if err, ok := err.(*url.Error); ok && err.Timeout() {
+ client.CloseIdleConnections()
+ }
+}
diff --git a/internal/internal.go b/internal/internal.go
index 133b19e9bd20b..777128f667bf6 100644
--- a/internal/internal.go
+++ b/internal/internal.go
@@ -5,19 +5,17 @@ import (
"bytes"
"compress/gzip"
"context"
- "crypto/rand"
"errors"
"fmt"
"io"
- "log"
"math"
- "math/big"
+ "math/rand"
"os"
"os/exec"
- "regexp"
"runtime"
"strconv"
"strings"
+ "sync"
"syscall"
"time"
"unicode"
@@ -48,6 +46,15 @@ type Size struct {
Size int64
}
+type Number struct {
+ Value float64
+}
+
+type ReadWaitCloser struct {
+ pipeReader *io.PipeReader
+ wg sync.WaitGroup
+}
+
// SetVersion sets the telegraf agent version
func SetVersion(v string) error {
if version != "" {
@@ -64,7 +71,8 @@ func Version() string {
// ProductToken returns a tag for Telegraf that can be used in user agents.
func ProductToken() string {
- return fmt.Sprintf("Telegraf/%s Go/%s", Version(), runtime.Version())
+ return fmt.Sprintf("Telegraf/%s Go/%s",
+ Version(), strings.TrimPrefix(runtime.Version(), "go"))
}
// UnmarshalTOML parses the duration from the TOML config file
@@ -123,6 +131,16 @@ func (s *Size) UnmarshalTOML(b []byte) error {
return nil
}
+func (n *Number) UnmarshalTOML(b []byte) error {
+ value, err := strconv.ParseFloat(string(b), 64)
+ if err != nil {
+ return err
+ }
+
+ n.Value = value
+ return nil
+}
+
// ReadLines reads contents from a file and splits them by new lines.
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
func ReadLines(filename string) ([]string, error) {
@@ -185,53 +203,6 @@ func SnakeCase(in string) string {
return string(out)
}
-// CombinedOutputTimeout runs the given command with the given timeout and
-// returns the combined output of stdout and stderr.
-// If the command times out, it attempts to kill the process.
-func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
- var b bytes.Buffer
- c.Stdout = &b
- c.Stderr = &b
- if err := c.Start(); err != nil {
- return nil, err
- }
- err := WaitTimeout(c, timeout)
- return b.Bytes(), err
-}
-
-// RunTimeout runs the given command with the given timeout.
-// If the command times out, it attempts to kill the process.
-func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
- if err := c.Start(); err != nil {
- return err
- }
- return WaitTimeout(c, timeout)
-}
-
-// WaitTimeout waits for the given command to finish with a timeout.
-// It assumes the command has already been started.
-// If the command times out, it attempts to kill the process.
-func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
- timer := time.AfterFunc(timeout, func() {
- err := c.Process.Kill()
- if err != nil {
- log.Printf("E! FATAL error killing process: %s", err)
- return
- }
- })
-
- err := c.Wait()
- isTimeout := timer.Stop()
-
- if err != nil {
- return err
- } else if isTimeout == false {
- return TimeoutErr
- }
-
- return err
-}
-
// RandomSleep will sleep for a random amount of time up to max.
// If the shutdown channel is closed, it will return before it has finished
// sleeping.
@@ -239,12 +210,8 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
if max == 0 {
return
}
- maxSleep := big.NewInt(max.Nanoseconds())
- var sleepns int64
- if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
- sleepns = j.Int64()
- }
+ sleepns := rand.Int63n(max.Nanoseconds())
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
select {
@@ -262,11 +229,7 @@ func RandomDuration(max time.Duration) time.Duration {
return 0
}
- var sleepns int64
- maxSleep := big.NewInt(max.Nanoseconds())
- if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
- sleepns = j.Int64()
- }
+ sleepns := rand.Int63n(max.Nanoseconds())
return time.Duration(sleepns)
}
@@ -315,14 +278,25 @@ func ExitStatus(err error) (int, bool) {
return 0, false
}
+func (r *ReadWaitCloser) Close() error {
+ err := r.pipeReader.Close()
+ r.wg.Wait() // wait for the gzip goroutine finish
+ return err
+}
+
// CompressWithGzip takes an io.Reader as input and pipes
// it through a gzip.Writer returning an io.Reader containing
// the gzipped data.
// An error is returned if passing data to the gzip.Writer fails
-func CompressWithGzip(data io.Reader) (io.Reader, error) {
+func CompressWithGzip(data io.Reader) (io.ReadCloser, error) {
pipeReader, pipeWriter := io.Pipe()
gzipWriter := gzip.NewWriter(pipeWriter)
+ rc := &ReadWaitCloser{
+ pipeReader: pipeReader,
+ }
+
+ rc.wg.Add(1)
var err error
go func() {
_, err = io.Copy(gzipWriter, data)
@@ -330,66 +304,121 @@ func CompressWithGzip(data io.Reader) (io.Reader, error) {
// subsequent reads from the read half of the pipe will
// return no bytes and the error err, or EOF if err is nil.
pipeWriter.CloseWithError(err)
+ rc.wg.Done()
}()
return pipeReader, err
}
-// ParseTimestamp with no location provided parses a timestamp value as UTC
-func ParseTimestamp(timestamp interface{}, format string) (time.Time, error) {
- return ParseTimestampWithLocation(timestamp, format, "UTC")
+// ParseTimestamp parses a Time according to the standard Telegraf options.
+// These are generally displayed in the toml similar to:
+// json_time_key= "timestamp"
+// json_time_format = "2006-01-02T15:04:05Z07:00"
+// json_timezone = "America/Los_Angeles"
+//
+// The format can be one of "unix", "unix_ms", "unix_us", "unix_ns", or a Go
+// time layout suitable for time.Parse.
+//
+// When using the "unix" format, a optional fractional component is allowed.
+// Specific unix time precisions cannot have a fractional component.
+//
+// Unix times may be an int64, float64, or string. When using a Go format
+// string the timestamp must be a string.
+//
+// The location is a location string suitable for time.LoadLocation. Unix
+// times do not use the location string, a unix time is always return in the
+// UTC location.
+func ParseTimestamp(format string, timestamp interface{}, location string) (time.Time, error) {
+ switch format {
+ case "unix", "unix_ms", "unix_us", "unix_ns":
+ return parseUnix(format, timestamp)
+ default:
+ if location == "" {
+ location = "UTC"
+ }
+ return parseTime(format, timestamp, location)
+ }
}
-// ParseTimestamp parses a timestamp value as a unix epoch of various precision.
-//
-// format = "unix": epoch is assumed to be in seconds and can come as number or string. Can have a decimal part.
-// format = "unix_ms": epoch is assumed to be in milliseconds and can come as number or string. Cannot have a decimal part.
-// format = "unix_us": epoch is assumed to be in microseconds and can come as number or string. Cannot have a decimal part.
-// format = "unix_ns": epoch is assumed to be in nanoseconds and can come as number or string. Cannot have a decimal part.
-func ParseTimestampWithLocation(timestamp interface{}, format string, location string) (time.Time, error) {
- timeInt, timeFractional := int64(0), int64(0)
- timeEpochStr, ok := timestamp.(string)
- var err error
+func parseUnix(format string, timestamp interface{}) (time.Time, error) {
+ integer, fractional, err := parseComponents(timestamp)
+ if err != nil {
+ return time.Unix(0, 0), err
+ }
- if !ok {
- timeEpochFloat, ok := timestamp.(float64)
- if !ok {
- return time.Time{}, fmt.Errorf("time: %v could not be converted to string nor float64", timestamp)
+ switch strings.ToLower(format) {
+ case "unix":
+ return time.Unix(integer, fractional).UTC(), nil
+ case "unix_ms":
+ return time.Unix(0, integer*1e6).UTC(), nil
+ case "unix_us":
+ return time.Unix(0, integer*1e3).UTC(), nil
+ case "unix_ns":
+ return time.Unix(0, integer).UTC(), nil
+ default:
+ return time.Unix(0, 0), errors.New("unsupported type")
+ }
+}
+
+// Returns the integers before and after an optional decimal point. Both '.'
+// and ',' are supported for the decimal point. The timestamp can be an int64,
+// float64, or string.
+// ex: "42.5" -> (42, 5, nil)
+func parseComponents(timestamp interface{}) (int64, int64, error) {
+ switch ts := timestamp.(type) {
+ case string:
+ parts := strings.SplitN(ts, ".", 2)
+ if len(parts) == 2 {
+ return parseUnixTimeComponents(parts[0], parts[1])
+ }
+
+ parts = strings.SplitN(ts, ",", 2)
+ if len(parts) == 2 {
+ return parseUnixTimeComponents(parts[0], parts[1])
}
- intPart, frac := math.Modf(timeEpochFloat)
- timeInt, timeFractional = int64(intPart), int64(frac*1e9)
- } else {
- splitted := regexp.MustCompile("[.,]").Split(timeEpochStr, 2)
- timeInt, err = strconv.ParseInt(splitted[0], 10, 64)
+
+ integer, err := strconv.ParseInt(ts, 10, 64)
if err != nil {
- loc, err := time.LoadLocation(location)
- if err != nil {
- return time.Time{}, fmt.Errorf("location: %s could not be loaded as a location", location)
- }
- return time.ParseInLocation(format, timeEpochStr, loc)
+ return 0, 0, err
}
+ return integer, 0, nil
+ case int64:
+ return ts, 0, nil
+ case float64:
+ integer, fractional := math.Modf(ts)
+ return int64(integer), int64(fractional * 1e9), nil
+ default:
+ return 0, 0, errors.New("unsupported type")
+ }
+}
- if len(splitted) == 2 {
- if len(splitted[1]) > 9 {
- splitted[1] = splitted[1][:9] //truncates decimal part to nanoseconds precision
- }
- nanosecStr := splitted[1] + strings.Repeat("0", 9-len(splitted[1])) //adds 0's to the right to obtain a valid number of nanoseconds
+func parseUnixTimeComponents(first, second string) (int64, int64, error) {
+ integer, err := strconv.ParseInt(first, 10, 64)
+ if err != nil {
+ return 0, 0, err
+ }
- timeFractional, err = strconv.ParseInt(nanosecStr, 10, 64)
- if err != nil {
- return time.Time{}, err
- }
- }
+ // Convert to nanoseconds, dropping any greater precision.
+ buf := []byte("000000000")
+ copy(buf, second)
+
+ fractional, err := strconv.ParseInt(string(buf), 10, 64)
+ if err != nil {
+ return 0, 0, err
}
- if strings.EqualFold(format, "unix") {
- return time.Unix(timeInt, timeFractional).UTC(), nil
- } else if strings.EqualFold(format, "unix_ms") {
- return time.Unix(timeInt/1000, (timeInt%1000)*1e6).UTC(), nil
- } else if strings.EqualFold(format, "unix_us") {
- return time.Unix(0, timeInt*1e3).UTC(), nil
- } else if strings.EqualFold(format, "unix_ns") {
- return time.Unix(0, timeInt).UTC(), nil
- } else {
- return time.Time{}, errors.New("Invalid unix format")
+ return integer, fractional, nil
+}
+
+// ParseTime parses a string timestamp according to the format string.
+func parseTime(format string, timestamp interface{}, location string) (time.Time, error) {
+ switch ts := timestamp.(type) {
+ case string:
+ loc, err := time.LoadLocation(location)
+ if err != nil {
+ return time.Unix(0, 0), err
+ }
+ return time.ParseInLocation(format, ts, loc)
+ default:
+ return time.Unix(0, 0), errors.New("unsupported type")
}
}
diff --git a/internal/internal_test.go b/internal/internal_test.go
index da2fe01c51ee3..25f0503ba20a8 100644
--- a/internal/internal_test.go
+++ b/internal/internal_test.go
@@ -3,8 +3,12 @@ package internal
import (
"bytes"
"compress/gzip"
+ "crypto/rand"
+ "io"
"io/ioutil"
+ "log"
"os/exec"
+ "regexp"
"testing"
"time"
@@ -64,6 +68,30 @@ func TestRunTimeout(t *testing.T) {
assert.True(t, elapsed < time.Millisecond*75)
}
+// Verifies behavior of a command that doesn't get killed.
+func TestRunTimeoutFastExit(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test due to random failures.")
+ }
+ if echobin == "" {
+ t.Skip("'echo' binary not available on OS, skipping.")
+ }
+ cmd := exec.Command(echobin)
+ start := time.Now()
+ err := RunTimeout(cmd, time.Millisecond*20)
+ buf := &bytes.Buffer{}
+ log.SetOutput(buf)
+ elapsed := time.Since(start)
+
+ require.NoError(t, err)
+ // Verify that command gets killed in 20ms, with some breathing room
+ assert.True(t, elapsed < time.Millisecond*75)
+
+ // Verify "process already finished" log doesn't occur.
+ time.Sleep(time.Millisecond * 75)
+ require.Equal(t, "", buf.String())
+}
+
func TestCombinedOutputTimeout(t *testing.T) {
// TODO: Fix this test
t.Skip("Test failing too often, skip for now and revisit later.")
@@ -207,13 +235,45 @@ func TestCompressWithGzip(t *testing.T) {
assert.Equal(t, testData, string(output))
}
+type mockReader struct {
+ readN uint64 // record the number of calls to Read
+}
+
+func (r *mockReader) Read(p []byte) (n int, err error) {
+ r.readN++
+ return rand.Read(p)
+}
+
+func TestCompressWithGzipEarlyClose(t *testing.T) {
+ mr := &mockReader{}
+
+ rc, err := CompressWithGzip(mr)
+ assert.NoError(t, err)
+
+ n, err := io.CopyN(ioutil.Discard, rc, 10000)
+ assert.NoError(t, err)
+ assert.Equal(t, int64(10000), n)
+
+ r1 := mr.readN
+ err = rc.Close()
+ assert.NoError(t, err)
+
+ n, err = io.CopyN(ioutil.Discard, rc, 10000)
+ assert.Error(t, io.EOF, err)
+ assert.Equal(t, int64(0), n)
+
+ r2 := mr.readN
+ // no more read to the source after closing
+ assert.Equal(t, r1, r2)
+}
+
func TestVersionAlreadySet(t *testing.T) {
err := SetVersion("foo")
- assert.Nil(t, err)
+ assert.NoError(t, err)
err = SetVersion("bar")
- assert.NotNil(t, err)
+ assert.Error(t, err)
assert.IsType(t, VersionAlreadySetError, err)
assert.Equal(t, "foo", Version())
@@ -306,32 +366,126 @@ func TestAlignTime(t *testing.T) {
}
func TestParseTimestamp(t *testing.T) {
- time, err := ParseTimestamp("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000")
- assert.Nil(t, err)
- assert.EqualValues(t, int64(1550699434029665000), time.UnixNano())
-
- time, err = ParseTimestamp("2019-02-20 21:50:34.029665-04:00", "2006-01-02 15:04:05.000000-07:00")
- assert.Nil(t, err)
- assert.EqualValues(t, int64(1550713834029665000), time.UnixNano())
+ rfc3339 := func(value string) time.Time {
+ tm, err := time.Parse(time.RFC3339Nano, value)
+ if err != nil {
+ panic(err)
+ }
+ return tm
+ }
- time, err = ParseTimestamp("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000-06:00")
- assert.NotNil(t, err)
+ tests := []struct {
+ name string
+ format string
+ timestamp interface{}
+ location string
+ expected time.Time
+ err bool
+ }{
+ {
+ name: "parse layout string in utc",
+ format: "2006-01-02 15:04:05",
+ timestamp: "2019-02-20 21:50:34",
+ location: "UTC",
+ expected: rfc3339("2019-02-20T21:50:34Z"),
+ },
+ {
+ name: "parse layout string with invalid timezone",
+ format: "2006-01-02 15:04:05",
+ timestamp: "2019-02-20 21:50:34",
+ location: "InvalidTimeZone",
+ err: true,
+ },
+ {
+ name: "layout regression 6386",
+ format: "02.01.2006 15:04:05",
+ timestamp: "09.07.2019 00:11:00",
+ expected: rfc3339("2019-07-09T00:11:00Z"),
+ },
+ {
+ name: "default location is utc",
+ format: "2006-01-02 15:04:05",
+ timestamp: "2019-02-20 21:50:34",
+ expected: rfc3339("2019-02-20T21:50:34Z"),
+ },
+ {
+ name: "unix seconds without fractional",
+ format: "unix",
+ timestamp: "1568338208",
+ expected: rfc3339("2019-09-13T01:30:08Z"),
+ },
+ {
+ name: "unix seconds with fractional",
+ format: "unix",
+ timestamp: "1568338208.500",
+ expected: rfc3339("2019-09-13T01:30:08.500Z"),
+ },
+ {
+ name: "unix seconds with fractional and comma decimal point",
+ format: "unix",
+ timestamp: "1568338208,500",
+ expected: rfc3339("2019-09-13T01:30:08.500Z"),
+ },
+ {
+ name: "unix seconds extra precision",
+ format: "unix",
+ timestamp: "1568338208.00000050042",
+ expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
+ },
+ {
+ name: "unix seconds integer",
+ format: "unix",
+ timestamp: int64(1568338208),
+ expected: rfc3339("2019-09-13T01:30:08Z"),
+ },
+ {
+ name: "unix seconds float",
+ format: "unix",
+ timestamp: float64(1568338208.500),
+ expected: rfc3339("2019-09-13T01:30:08.500Z"),
+ },
+ {
+ name: "unix milliseconds",
+ format: "unix_ms",
+ timestamp: "1568338208500",
+ expected: rfc3339("2019-09-13T01:30:08.500Z"),
+ },
+ {
+ name: "unix milliseconds with fractional is ignored",
+ format: "unix_ms",
+ timestamp: "1568338208500.42",
+ expected: rfc3339("2019-09-13T01:30:08.500Z"),
+ },
+ {
+ name: "unix microseconds",
+ format: "unix_us",
+ timestamp: "1568338208000500",
+ expected: rfc3339("2019-09-13T01:30:08.000500Z"),
+ },
+ {
+ name: "unix nanoseconds",
+ format: "unix_ns",
+ timestamp: "1568338208000000500",
+ expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tm, err := ParseTimestamp(tt.format, tt.timestamp, tt.location)
+ if tt.err {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, tm)
+ }
+ })
+ }
}
-func TestParseTimestampWithLocation(t *testing.T) {
- time, err := ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "UTC")
- assert.Nil(t, err)
- assert.EqualValues(t, int64(1550699434029665000), time.UnixNano())
-
- time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "America/New_York")
- assert.Nil(t, err)
- assert.EqualValues(t, int64(1550717434029665000), time.UnixNano())
-
- //Provided location is ignored if an offset is successfully parsed
- time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665-07:00", "2006-01-02 15:04:05.000000-07:00", "America/New_York")
- assert.Nil(t, err)
- assert.EqualValues(t, int64(1550724634029665000), time.UnixNano())
-
- time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "InvalidTimeZone")
- assert.NotNil(t, err)
+func TestProductToken(t *testing.T) {
+ token := ProductToken()
+ // Telegraf version depends on the call to SetVersion, it cannot be set
+ // multiple times and is not thread-safe.
+ re := regexp.MustCompile(`^Telegraf/[^\s]+ Go/\d+.\d+(.\d+)?$`)
+ require.True(t, re.MatchString(token), token)
}
diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go
deleted file mode 100644
index 38369d03b4539..0000000000000
--- a/internal/models/running_processor.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package models
-
-import (
- "sync"
-
- "github.com/influxdata/telegraf"
-)
-
-type RunningProcessor struct {
- Name string
-
- sync.Mutex
- Processor telegraf.Processor
- Config *ProcessorConfig
-}
-
-type RunningProcessors []*RunningProcessor
-
-func (rp RunningProcessors) Len() int { return len(rp) }
-func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
-func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
-
-// FilterConfig containing a name and filter
-type ProcessorConfig struct {
- Name string
- Order int64
- Filter Filter
-}
-
-func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) {
- metric.Drop()
-}
-
-func containsMetric(item telegraf.Metric, metrics []telegraf.Metric) bool {
- for _, m := range metrics {
- if item == m {
- return true
- }
- }
- return false
-}
-
-func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
- rp.Lock()
- defer rp.Unlock()
-
- ret := []telegraf.Metric{}
-
- for _, metric := range in {
- // In processors when a filter selects a metric it is sent through the
- // processor. Otherwise the metric continues downstream unmodified.
- if ok := rp.Config.Filter.Select(metric); !ok {
- ret = append(ret, metric)
- continue
- }
-
- rp.Config.Filter.Modify(metric)
- if len(metric.FieldList()) == 0 {
- rp.metricFiltered(metric)
- continue
- }
-
- // This metric should pass through the filter, so call the filter Apply
- // function and append results to the output slice.
- ret = append(ret, rp.Processor.Apply(metric)...)
- }
-
- return ret
-}
diff --git a/internal/process/process.go b/internal/process/process.go
new file mode 100644
index 0000000000000..3f88aac57b317
--- /dev/null
+++ b/internal/process/process.go
@@ -0,0 +1,191 @@
+package process
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os/exec"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/influxdata/telegraf"
+)
+
+// Process is a long-running process manager that will restart processes if they stop.
+type Process struct {
+ Cmd *exec.Cmd
+ Stdin io.WriteCloser
+ Stdout io.ReadCloser
+ Stderr io.ReadCloser
+ ReadStdoutFn func(io.Reader)
+ ReadStderrFn func(io.Reader)
+ RestartDelay time.Duration
+ Log telegraf.Logger
+
+ name string
+ args []string
+ pid int32
+ cancel context.CancelFunc
+ mainLoopWg sync.WaitGroup
+}
+
+// New creates a new process wrapper
+func New(command []string) (*Process, error) {
+ if len(command) == 0 {
+ return nil, errors.New("no command")
+ }
+
+ p := &Process{
+ RestartDelay: 5 * time.Second,
+ name: command[0],
+ args: []string{},
+ }
+
+ if len(command) > 1 {
+ p.args = command[1:]
+ }
+
+ return p, nil
+}
+
+// Start the process. A &Process can only be started once. It will restart itself
+// as necessary.
+func (p *Process) Start() error {
+ ctx, cancel := context.WithCancel(context.Background())
+ p.cancel = cancel
+
+ if err := p.cmdStart(); err != nil {
+ return err
+ }
+
+ p.mainLoopWg.Add(1)
+ go func() {
+ if err := p.cmdLoop(ctx); err != nil {
+ p.Log.Errorf("Process quit with message: %v", err)
+ }
+ p.mainLoopWg.Done()
+ }()
+
+ return nil
+}
+
+// Stop is called when the process isn't needed anymore
+func (p *Process) Stop() {
+ if p.cancel != nil {
+ // signal our intent to shutdown and not restart the process
+ p.cancel()
+ }
+ // close stdin so the app can shut down gracefully.
+ p.Stdin.Close()
+ p.mainLoopWg.Wait()
+}
+
+func (p *Process) cmdStart() error {
+ p.Cmd = exec.Command(p.name, p.args...)
+
+ var err error
+ p.Stdin, err = p.Cmd.StdinPipe()
+ if err != nil {
+ return fmt.Errorf("error opening stdin pipe: %w", err)
+ }
+
+ p.Stdout, err = p.Cmd.StdoutPipe()
+ if err != nil {
+ return fmt.Errorf("error opening stdout pipe: %w", err)
+ }
+
+ p.Stderr, err = p.Cmd.StderrPipe()
+ if err != nil {
+ return fmt.Errorf("error opening stderr pipe: %w", err)
+ }
+
+ p.Log.Infof("Starting process: %s %s", p.name, p.args)
+
+ if err := p.Cmd.Start(); err != nil {
+ return fmt.Errorf("error starting process: %s", err)
+ }
+ atomic.StoreInt32(&p.pid, int32(p.Cmd.Process.Pid))
+ return nil
+}
+
+func (p *Process) Pid() int {
+ pid := atomic.LoadInt32(&p.pid)
+ return int(pid)
+}
+
+// cmdLoop watches an already running process, restarting it when appropriate.
+func (p *Process) cmdLoop(ctx context.Context) error {
+ for {
+ err := p.cmdWait(ctx)
+ if isQuitting(ctx) {
+ p.Log.Infof("Process %s shut down", p.Cmd.Path)
+ return nil
+ }
+
+ p.Log.Errorf("Process %s exited: %v", p.Cmd.Path, err)
+ p.Log.Infof("Restarting in %s...", time.Duration(p.RestartDelay))
+
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-time.After(time.Duration(p.RestartDelay)):
+ // Continue the loop and restart the process
+ if err := p.cmdStart(); err != nil {
+ return err
+ }
+ }
+ }
+}
+
+// cmdWait waits for the process to finish.
+func (p *Process) cmdWait(ctx context.Context) error {
+ var wg sync.WaitGroup
+
+ if p.ReadStdoutFn == nil {
+ p.ReadStdoutFn = defaultReadPipe
+ }
+ if p.ReadStderrFn == nil {
+ p.ReadStderrFn = defaultReadPipe
+ }
+
+ processCtx, processCancel := context.WithCancel(context.Background())
+ defer processCancel()
+
+ wg.Add(1)
+ go func() {
+ p.ReadStdoutFn(p.Stdout)
+ wg.Done()
+ }()
+
+ wg.Add(1)
+ go func() {
+ p.ReadStderrFn(p.Stderr)
+ wg.Done()
+ }()
+
+ wg.Add(1)
+ go func() {
+ select {
+ case <-ctx.Done():
+ gracefulStop(processCtx, p.Cmd, 5*time.Second)
+ case <-processCtx.Done():
+ }
+ wg.Done()
+ }()
+
+ err := p.Cmd.Wait()
+ processCancel()
+ wg.Wait()
+ return err
+}
+
+func isQuitting(ctx context.Context) bool {
+ return ctx.Err() != nil
+}
+
+func defaultReadPipe(r io.Reader) {
+ io.Copy(ioutil.Discard, r)
+}
diff --git a/internal/process/process_posix.go b/internal/process/process_posix.go
new file mode 100644
index 0000000000000..7b42b7da13214
--- /dev/null
+++ b/internal/process/process_posix.go
@@ -0,0 +1,23 @@
+// +build !windows
+
+package process
+
+import (
+ "context"
+ "os/exec"
+ "syscall"
+ "time"
+)
+
+func gracefulStop(ctx context.Context, cmd *exec.Cmd, timeout time.Duration) {
+ select {
+ case <-time.After(timeout):
+ cmd.Process.Signal(syscall.SIGTERM)
+ case <-ctx.Done():
+ }
+ select {
+ case <-time.After(timeout):
+ cmd.Process.Kill()
+ case <-ctx.Done():
+ }
+}
diff --git a/internal/process/process_test.go b/internal/process/process_test.go
new file mode 100644
index 0000000000000..7a7c8c6f33fd6
--- /dev/null
+++ b/internal/process/process_test.go
@@ -0,0 +1,74 @@
+// +build !windows
+
+package process
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "sync/atomic"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+// test that a restarting process resets pipes properly
+func TestRestartingRebindsPipes(t *testing.T) {
+ exe, err := os.Executable()
+ require.NoError(t, err)
+
+ p, err := New([]string{exe, "-external"})
+ p.RestartDelay = 100 * time.Nanosecond
+ p.Log = testutil.Logger{}
+ require.NoError(t, err)
+
+ linesRead := int64(0)
+ p.ReadStdoutFn = func(r io.Reader) {
+ scanner := bufio.NewScanner(r)
+
+ for scanner.Scan() {
+ atomic.AddInt64(&linesRead, 1)
+ }
+ }
+
+ require.NoError(t, p.Start())
+
+ for atomic.LoadInt64(&linesRead) < 1 {
+ time.Sleep(1 * time.Millisecond)
+ }
+
+ syscall.Kill(p.Pid(), syscall.SIGKILL)
+
+ for atomic.LoadInt64(&linesRead) < 2 {
+ time.Sleep(1 * time.Millisecond)
+ }
+
+ p.Stop()
+}
+
+var external = flag.Bool("external", false,
+ "if true, run externalProcess instead of tests")
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if *external {
+ externalProcess()
+ os.Exit(0)
+ }
+ code := m.Run()
+ os.Exit(code)
+}
+
+// externalProcess is an external "misbehaving" process that won't exit
+// cleanly.
+func externalProcess() {
+ wait := make(chan int, 0)
+ fmt.Fprintln(os.Stdout, "started")
+ <-wait
+ os.Exit(2)
+}
diff --git a/internal/process/process_windows.go b/internal/process/process_windows.go
new file mode 100644
index 0000000000000..0995d52469b07
--- /dev/null
+++ b/internal/process/process_windows.go
@@ -0,0 +1,17 @@
+// +build windows
+
+package process
+
+import (
+ "context"
+ "os/exec"
+ "time"
+)
+
+func gracefulStop(ctx context.Context, cmd *exec.Cmd, timeout time.Duration) {
+ select {
+ case <-time.After(timeout):
+ cmd.Process.Kill()
+ case <-ctx.Done():
+ }
+}
diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go
new file mode 100644
index 0000000000000..a167b7cb78f7e
--- /dev/null
+++ b/internal/rotate/file_writer.go
@@ -0,0 +1,185 @@
+package rotate
+
+// Rotating things
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// FilePerm defines the permissions that Writer will use for all
+// the files it creates.
+const (
+ FilePerm = os.FileMode(0644)
+ DateFormat = "2006-01-02"
+)
+
+// FileWriter implements the io.Writer interface and writes to the
+// filename specified.
+// Will rotate at the specified interval and/or when the current file size exceeds maxSizeInBytes
+// At rotation time, current file is renamed and a new file is created.
+// If the number of archives exceeds maxArchives, older files are deleted.
+type FileWriter struct {
+ filename string
+ filenameRotationTemplate string
+ current *os.File
+ interval time.Duration
+ maxSizeInBytes int64
+ maxArchives int
+ expireTime time.Time
+ bytesWritten int64
+ sync.Mutex
+}
+
+// NewFileWriter creates a new file writer.
+func NewFileWriter(filename string, interval time.Duration, maxSizeInBytes int64, maxArchives int) (io.WriteCloser, error) {
+ if interval == 0 && maxSizeInBytes <= 0 {
+ // No rotation needed so a basic io.Writer will do the trick
+ return openFile(filename)
+ }
+
+ w := &FileWriter{
+ filename: filename,
+ interval: interval,
+ maxSizeInBytes: maxSizeInBytes,
+ maxArchives: maxArchives,
+ filenameRotationTemplate: getFilenameRotationTemplate(filename),
+ }
+
+ if err := w.openCurrent(); err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+func openFile(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, FilePerm)
+}
+
+func getFilenameRotationTemplate(filename string) string {
+ // Extract the file extension
+ fileExt := filepath.Ext(filename)
+ // Remove the file extension from the filename (if any)
+ stem := strings.TrimSuffix(filename, fileExt)
+ return stem + ".%s-%s" + fileExt
+}
+
+// Write writes p to the current file, then checks to see if
+// rotation is necessary.
+func (w *FileWriter) Write(p []byte) (n int, err error) {
+ w.Lock()
+ defer w.Unlock()
+ if n, err = w.current.Write(p); err != nil {
+ return 0, err
+ }
+ w.bytesWritten += int64(n)
+
+ if err = w.rotateIfNeeded(); err != nil {
+ return 0, err
+ }
+
+ return n, nil
+}
+
+// Close closes the current file. Writer is unusable after this
+// is called.
+func (w *FileWriter) Close() (err error) {
+ w.Lock()
+ defer w.Unlock()
+
+ // Rotate before closing
+ if err = w.rotate(); err != nil {
+ return err
+ }
+
+ w.current = nil
+ return nil
+}
+
+func (w *FileWriter) openCurrent() (err error) {
+ // In case ModTime() fails, we use time.Now()
+ w.expireTime = time.Now().Add(w.interval)
+ w.bytesWritten = 0
+ w.current, err = openFile(w.filename)
+
+ if err != nil {
+ return err
+ }
+
+ // Goal here is to rotate old pre-existing files.
+ // For that we use fileInfo.ModTime, instead of time.Now().
+ // Example: telegraf is restarted every 23 hours and
+ // the rotation interval is set to 24 hours.
+ // With time.now() as a reference we'd never rotate the file.
+ if fileInfo, err := w.current.Stat(); err == nil {
+ w.expireTime = fileInfo.ModTime().Add(w.interval)
+ w.bytesWritten = fileInfo.Size()
+ }
+
+ if err = w.rotateIfNeeded(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *FileWriter) rotateIfNeeded() error {
+ if (w.interval > 0 && time.Now().After(w.expireTime)) ||
+ (w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) {
+ if err := w.rotate(); err != nil {
+ //Ignore rotation errors and keep the log open
+ fmt.Printf("unable to rotate the file '%s', %s", w.filename, err.Error())
+ }
+ return w.openCurrent()
+ }
+ return nil
+}
+
+func (w *FileWriter) rotate() (err error) {
+ if err = w.current.Close(); err != nil {
+ return err
+ }
+
+ // Use year-month-date for readability, unix time to make the file name unique with second precision
+ now := time.Now()
+ rotatedFilename := fmt.Sprintf(w.filenameRotationTemplate, now.Format(DateFormat), strconv.FormatInt(now.Unix(), 10))
+ if err = os.Rename(w.filename, rotatedFilename); err != nil {
+ return err
+ }
+
+ if err = w.purgeArchivesIfNeeded(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (w *FileWriter) purgeArchivesIfNeeded() (err error) {
+ if w.maxArchives == -1 {
+ //Skip archiving
+ return nil
+ }
+
+ var matches []string
+ if matches, err = filepath.Glob(fmt.Sprintf(w.filenameRotationTemplate, "*", "*")); err != nil {
+ return err
+ }
+
+ //if there are more archives than the configured maximum, then purge older files
+ if len(matches) > w.maxArchives {
+ //sort files alphanumerically to delete older files first
+ sort.Strings(matches)
+ for _, filename := range matches[:len(matches)-w.maxArchives] {
+ if err = os.Remove(filename); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go
new file mode 100644
index 0000000000000..ca29b9a2f45d6
--- /dev/null
+++ b/internal/rotate/file_writer_test.go
@@ -0,0 +1,148 @@
+package rotate
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFileWriter_NoRotation(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "RotationNo")
+ require.NoError(t, err)
+ writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0)
+ require.NoError(t, err)
+ defer func() { writer.Close(); os.RemoveAll(tempDir) }()
+
+ _, err = writer.Write([]byte("Hello World"))
+ require.NoError(t, err)
+ _, err = writer.Write([]byte("Hello World 2"))
+ require.NoError(t, err)
+ files, _ := ioutil.ReadDir(tempDir)
+ assert.Equal(t, 1, len(files))
+}
+
+func TestFileWriter_TimeRotation(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "RotationTime")
+ require.NoError(t, err)
+ interval, _ := time.ParseDuration("1s")
+ writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1)
+ require.NoError(t, err)
+ defer func() { writer.Close(); os.RemoveAll(tempDir) }()
+
+ _, err = writer.Write([]byte("Hello World"))
+ require.NoError(t, err)
+ time.Sleep(1 * time.Second)
+ _, err = writer.Write([]byte("Hello World 2"))
+ require.NoError(t, err)
+ files, _ := ioutil.ReadDir(tempDir)
+ assert.Equal(t, 2, len(files))
+}
+
+func TestFileWriter_ReopenTimeRotation(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "RotationTime")
+ require.NoError(t, err)
+ interval, _ := time.ParseDuration("1s")
+ filePath := filepath.Join(tempDir, "test.log")
+ err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644)
+ time.Sleep(1 * time.Second)
+ assert.NoError(t, err)
+ writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1)
+ require.NoError(t, err)
+ defer func() { writer.Close(); os.RemoveAll(tempDir) }()
+
+ files, _ := ioutil.ReadDir(tempDir)
+ assert.Equal(t, 2, len(files))
+}
+
+func TestFileWriter_SizeRotation(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "RotationSize")
+ require.NoError(t, err)
+ maxSize := int64(9)
+ writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
+ require.NoError(t, err)
+ defer func() { writer.Close(); os.RemoveAll(tempDir) }()
+
+ _, err = writer.Write([]byte("Hello World"))
+ require.NoError(t, err)
+ _, err = writer.Write([]byte("World 2"))
+ require.NoError(t, err)
+ files, _ := ioutil.ReadDir(tempDir)
+ assert.Equal(t, 2, len(files))
+}
+
+func TestFileWriter_ReopenSizeRotation(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "RotationSize")
+ require.NoError(t, err)
+ maxSize := int64(12)
+ filePath := filepath.Join(tempDir, "test.log")
+ err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644)
+ assert.NoError(t, err)
+ writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
+ require.NoError(t, err)
+ defer func() { writer.Close(); os.RemoveAll(tempDir) }()
+
+ _, err = writer.Write([]byte("Hello World Again"))
+ require.NoError(t, err)
+ files, _ := ioutil.ReadDir(tempDir)
+ assert.Equal(t, 2, len(files))
+}
+
+func TestFileWriter_DeleteArchives(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "RotationDeleteArchives")
+ require.NoError(t, err)
+ maxSize := int64(5)
+ writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2)
+ require.NoError(t, err)
+ defer func() { writer.Close(); os.RemoveAll(tempDir) }()
+
+ _, err = writer.Write([]byte("First file"))
+ require.NoError(t, err)
+ // File names include the date with second precision
+ // So, to force rotation with different file names
+ // we need to wait
+ time.Sleep(1 * time.Second)
+ _, err = writer.Write([]byte("Second file"))
+ require.NoError(t, err)
+ time.Sleep(1 * time.Second)
+ _, err = writer.Write([]byte("Third file"))
+ require.NoError(t, err)
+
+ files, _ := ioutil.ReadDir(tempDir)
+ assert.Equal(t, 3, len(files))
+
+ for _, tempFile := range files {
+ var bytes []byte
+ var err error
+ path := filepath.Join(tempDir, tempFile.Name())
+ if bytes, err = ioutil.ReadFile(path); err != nil {
+ t.Error(err.Error())
+ return
+ }
+ contents := string(bytes)
+
+ if contents != "" && contents != "Second file" && contents != "Third file" {
+ t.Error("Should have deleted the eldest log file")
+ return
+ }
+ }
+}
+
+func TestFileWriter_CloseRotates(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "RotationClose")
+ require.NoError(t, err)
+ defer os.RemoveAll(tempDir)
+ maxSize := int64(9)
+ writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
+ require.NoError(t, err)
+
+ writer.Close()
+
+ files, _ := ioutil.ReadDir(tempDir)
+ assert.Equal(t, 1, len(files))
+ assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name())
+}
diff --git a/internal/snmp/config.go b/internal/snmp/config.go
new file mode 100644
index 0000000000000..e616e75709737
--- /dev/null
+++ b/internal/snmp/config.go
@@ -0,0 +1,34 @@
+package snmp
+
+import (
+ "github.com/influxdata/telegraf/internal"
+)
+
+type ClientConfig struct {
+ // Timeout to wait for a response.
+ Timeout internal.Duration `toml:"timeout"`
+ Retries int `toml:"retries"`
+ // Values: 1, 2, 3
+ Version uint8 `toml:"version"`
+
+ // Parameters for Version 1 & 2
+ Community string `toml:"community"`
+
+ // Parameters for Version 2 & 3
+ MaxRepetitions uint8 `toml:"max_repetitions"`
+
+ // Parameters for Version 3
+ ContextName string `toml:"context_name"`
+ // Values: "noAuthNoPriv", "authNoPriv", "authPriv"
+ SecLevel string `toml:"sec_level"`
+ SecName string `toml:"sec_name"`
+ // Values: "MD5", "SHA", "". Default: ""
+ AuthProtocol string `toml:"auth_protocol"`
+ AuthPassword string `toml:"auth_password"`
+ // Values: "DES", "AES", "". Default: ""
+ PrivProtocol string `toml:"priv_protocol"`
+ PrivPassword string `toml:"priv_password"`
+ EngineID string `toml:"-"`
+ EngineBoots uint32 `toml:"-"`
+ EngineTime uint32 `toml:"-"`
+}
diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go
new file mode 100644
index 0000000000000..23a15594ed6f7
--- /dev/null
+++ b/internal/snmp/wrapper.go
@@ -0,0 +1,180 @@
+package snmp
+
+import (
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/soniah/gosnmp"
+)
+
+// GosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection.
+type GosnmpWrapper struct {
+ *gosnmp.GoSNMP
+}
+
+// Host returns the value of GoSNMP.Target.
+func (gsw GosnmpWrapper) Host() string {
+ return gsw.Target
+}
+
+// Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the
+// connection is using SNMPv1 or newer.
+// Also, if any error is encountered, it will just once reconnect and try again.
+func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error {
+ var err error
+ // On error, retry once.
+ // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function.
+ for i := 0; i < 2; i++ {
+ if gsw.Version == gosnmp.Version1 {
+ err = gsw.GoSNMP.Walk(oid, fn)
+ } else {
+ err = gsw.GoSNMP.BulkWalk(oid, fn)
+ }
+ if err == nil {
+ return nil
+ }
+ if err := gsw.GoSNMP.Connect(); err != nil {
+ return fmt.Errorf("reconnecting: %w", err)
+ }
+ }
+ return err
+}
+
+// Get wraps GoSNMP.GET().
+// If any error is encountered, it will just once reconnect and try again.
+func (gsw GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) {
+ var err error
+ var pkt *gosnmp.SnmpPacket
+ for i := 0; i < 2; i++ {
+ pkt, err = gsw.GoSNMP.Get(oids)
+ if err == nil {
+ return pkt, nil
+ }
+ if err := gsw.GoSNMP.Connect(); err != nil {
+ return nil, fmt.Errorf("reconnecting: %w", err)
+ }
+ }
+ return nil, err
+}
+
+func NewWrapper(s ClientConfig) (GosnmpWrapper, error) {
+ gs := GosnmpWrapper{&gosnmp.GoSNMP{}}
+
+ gs.Timeout = s.Timeout.Duration
+
+ gs.Retries = s.Retries
+
+ switch s.Version {
+ case 3:
+ gs.Version = gosnmp.Version3
+ case 2, 0:
+ gs.Version = gosnmp.Version2c
+ case 1:
+ gs.Version = gosnmp.Version1
+ default:
+ return GosnmpWrapper{}, fmt.Errorf("invalid version")
+ }
+
+ if s.Version < 3 {
+ if s.Community == "" {
+ gs.Community = "public"
+ } else {
+ gs.Community = s.Community
+ }
+ }
+
+ gs.MaxRepetitions = s.MaxRepetitions
+
+ if s.Version == 3 {
+ gs.ContextName = s.ContextName
+
+ sp := &gosnmp.UsmSecurityParameters{}
+ gs.SecurityParameters = sp
+ gs.SecurityModel = gosnmp.UserSecurityModel
+
+ switch strings.ToLower(s.SecLevel) {
+ case "noauthnopriv", "":
+ gs.MsgFlags = gosnmp.NoAuthNoPriv
+ case "authnopriv":
+ gs.MsgFlags = gosnmp.AuthNoPriv
+ case "authpriv":
+ gs.MsgFlags = gosnmp.AuthPriv
+ default:
+ return GosnmpWrapper{}, fmt.Errorf("invalid secLevel")
+ }
+
+ sp.UserName = s.SecName
+
+ switch strings.ToLower(s.AuthProtocol) {
+ case "md5":
+ sp.AuthenticationProtocol = gosnmp.MD5
+ case "sha":
+ sp.AuthenticationProtocol = gosnmp.SHA
+ case "":
+ sp.AuthenticationProtocol = gosnmp.NoAuth
+ default:
+ return GosnmpWrapper{}, fmt.Errorf("invalid authProtocol")
+ }
+
+ sp.AuthenticationPassphrase = s.AuthPassword
+
+ switch strings.ToLower(s.PrivProtocol) {
+ case "des":
+ sp.PrivacyProtocol = gosnmp.DES
+ case "aes":
+ sp.PrivacyProtocol = gosnmp.AES
+ case "":
+ sp.PrivacyProtocol = gosnmp.NoPriv
+ default:
+ return GosnmpWrapper{}, fmt.Errorf("invalid privProtocol")
+ }
+
+ sp.PrivacyPassphrase = s.PrivPassword
+
+ sp.AuthoritativeEngineID = s.EngineID
+
+ sp.AuthoritativeEngineBoots = s.EngineBoots
+
+ sp.AuthoritativeEngineTime = s.EngineTime
+ }
+ return gs, nil
+}
+
+// SetAgent takes a url (scheme://host:port) and sets the wrapped
+// GoSNMP struct's corresponding fields. This shouldn't be called
+// after using the wrapped GoSNMP struct, for example after
+// connecting.
+func (gs *GosnmpWrapper) SetAgent(agent string) error {
+ if !strings.Contains(agent, "://") {
+ agent = "udp://" + agent
+ }
+
+ u, err := url.Parse(agent)
+ if err != nil {
+ return err
+ }
+
+ switch u.Scheme {
+ case "tcp":
+ gs.Transport = "tcp"
+ case "", "udp":
+ gs.Transport = "udp"
+ default:
+ return fmt.Errorf("unsupported scheme: %v", u.Scheme)
+ }
+
+ gs.Target = u.Hostname()
+
+ portStr := u.Port()
+ if portStr == "" {
+ portStr = "161"
+ }
+ port, err := strconv.ParseUint(portStr, 10, 16)
+ if err != nil {
+ return fmt.Errorf("parsing port: %w", err)
+ }
+ gs.Port = uint16(port)
+ return nil
+}
diff --git a/plugins/inputs/syslog/framing.go b/internal/syslog/framing.go
similarity index 100%
rename from plugins/inputs/syslog/framing.go
rename to internal/syslog/framing.go
diff --git a/plugins/inputs/syslog/framing_test.go b/internal/syslog/framing_test.go
similarity index 100%
rename from plugins/inputs/syslog/framing_test.go
rename to internal/syslog/framing_test.go
diff --git a/internal/templating/engine_test.go b/internal/templating/engine_test.go
index b7dd23f384e67..0dfcb89d8d823 100644
--- a/internal/templating/engine_test.go
+++ b/internal/templating/engine_test.go
@@ -3,6 +3,7 @@ package templating
import (
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -20,3 +21,57 @@ func TestEngineAlternateSeparator(t *testing.T) {
}, tags)
require.Equal(t, "", field)
}
+
+func TestEngineWithWildcardTemplate(t *testing.T) {
+ var (
+ defaultTmpl, err = NewDefaultTemplateWithPattern("measurement*")
+ templates = []string{
+ "taskmanagerTask.alarm-detector.Assign.alarmDefinitionId metricsType.process.nodeId.x.alarmDefinitionId.measurement.field rule=1",
+ "taskmanagerTask.*.*.*.* metricsType.process.nodeId.measurement rule=2",
+ }
+ )
+ require.NoError(t, err)
+
+ engine, err := NewEngine(".", defaultTmpl, templates)
+ require.NoError(t, err)
+
+ for _, testCase := range []struct {
+ line string
+ measurement string
+ field string
+ tags map[string]string
+ }{
+ {
+ line: "taskmanagerTask.alarm-detector.Assign.alarmDefinitionId.timeout_errors.duration.p75",
+ measurement: "duration",
+ field: "p75",
+ tags: map[string]string{
+ "metricsType": "taskmanagerTask",
+ "process": "alarm-detector",
+ "nodeId": "Assign",
+ "x": "alarmDefinitionId",
+ "alarmDefinitionId": "timeout_errors",
+ "rule": "1",
+ },
+ },
+ {
+ line: "taskmanagerTask.alarm-detector.Assign.numRecordsInPerSecond.m5_rate",
+ measurement: "numRecordsInPerSecond",
+ tags: map[string]string{
+ "metricsType": "taskmanagerTask",
+ "process": "alarm-detector",
+ "nodeId": "Assign",
+ "rule": "2",
+ },
+ },
+ } {
+ t.Run(testCase.line, func(t *testing.T) {
+ measurement, tags, field, err := engine.Apply(testCase.line)
+ require.NoError(t, err)
+
+ assert.Equal(t, testCase.measurement, measurement)
+ assert.Equal(t, testCase.field, field)
+ assert.Equal(t, testCase.tags, tags)
+ })
+ }
+}
diff --git a/internal/templating/node.go b/internal/templating/node.go
index 83ab1a40cda4e..bf68509a0fe66 100644
--- a/internal/templating/node.go
+++ b/internal/templating/node.go
@@ -55,32 +55,44 @@ func (n *node) search(line string) *Template {
// recursiveSearch performs the actual recursive search
func (n *node) recursiveSearch(lineParts []string) *Template {
- // Nothing to search
+ // nothing to search
if len(lineParts) == 0 || len(n.children) == 0 {
return n.template
}
- // If last element is a wildcard, don't include it in this search since it's sorted
- // to the end but lexicographically it would not always be and sort.Search assumes
- // the slice is sorted.
- length := len(n.children)
- if n.children[length-1].value == "*" {
+ var (
+ hasWildcard bool
+ length = len(n.children)
+ )
+
+ // exclude last child from search if it is a wildcard. sort.Search expects
+ // a lexicographically sorted set of children and we have artificially sorted
+ // wildcards to the end of the child set
+ // wildcards will be searched separately if no exact match is found
+ if hasWildcard = n.children[length-1].value == "*"; hasWildcard {
length--
}
- // Find the index of child with an exact match
i := sort.Search(length, func(i int) bool {
return n.children[i].value >= lineParts[0]
})
- // Found an exact match, so search that child sub-tree
- if i < len(n.children) && n.children[i].value == lineParts[0] {
- return n.children[i].recursiveSearch(lineParts[1:])
+ // given an exact match is found within children set
+ if i < length && n.children[i].value == lineParts[0] {
+ // descend into the matching node
+ if tmpl := n.children[i].recursiveSearch(lineParts[1:]); tmpl != nil {
+ // given a template is found return it
+ return tmpl
+ }
}
- // Not an exact match, see if we have a wildcard child to search
- if n.children[len(n.children)-1].value == "*" {
- return n.children[len(n.children)-1].recursiveSearch(lineParts[1:])
+
+ // given no template is found and the last child is a wildcard
+ if hasWildcard {
+ // also search the wildcard child node
+ return n.children[length].recursiveSearch(lineParts[1:])
}
+
+ // fallback to returning template at this node
return n.template
}
diff --git a/internal/templating/template.go b/internal/templating/template.go
index 472bd2686bac0..235d2f2a58928 100644
--- a/internal/templating/template.go
+++ b/internal/templating/template.go
@@ -124,21 +124,16 @@ type templateSpecs []templateSpec
// Less reports whether the element with
// index j should sort before the element with index k.
func (e templateSpecs) Less(j, k int) bool {
- if len(e[j].filter) == 0 && len(e[k].filter) == 0 {
- jlength := len(strings.Split(e[j].template, e[j].separator))
- klength := len(strings.Split(e[k].template, e[k].separator))
- return jlength < klength
- }
- if len(e[j].filter) == 0 {
+ jlen := len(e[j].filter)
+ klen := len(e[k].filter)
+ if jlen == 0 && klen != 0 {
return true
}
- if len(e[k].filter) == 0 {
+ if klen == 0 && jlen != 0 {
return false
}
-
- jlength := len(strings.Split(e[j].template, e[j].separator))
- klength := len(strings.Split(e[k].template, e[k].separator))
- return jlength < klength
+ return strings.Count(e[j].template, e[j].separator) <
+ strings.Count(e[k].template, e[k].separator)
}
// Swap swaps the elements with indexes i and j.
diff --git a/internal/templating/template_test.go b/internal/templating/template_test.go
new file mode 100644
index 0000000000000..0a1aae5bc1fd4
--- /dev/null
+++ b/internal/templating/template_test.go
@@ -0,0 +1,14 @@
+package templating
+
+import "testing"
+
+func BenchmarkTemplateLess(b *testing.B) {
+ a := templateSpec{
+ template: "aa|bb|cc|dd|ee|ff",
+ separator: "|",
+ }
+ specs := templateSpecs{a, a}
+ for i := 0; i < b.N; i++ {
+ specs.Less(0, 1)
+ }
+}
diff --git a/internal/usage.go b/internal/usage.go
index a49021b43d709..6eff30e6b0b21 100644
--- a/internal/usage.go
+++ b/internal/usage.go
@@ -16,6 +16,9 @@ The commands & flags are:
--aggregator-filter filter the aggregators to enable, separator is :
--config configuration file to load
--config-directory directory containing additional *.conf files
+ --plugin-directory directory containing *.so files, this directory will be
+ searched recursively. Any Plugin found will be loaded
+ and namespaced.
--debug turn on debug logging
--input-filter filter the inputs to enable, separator is :
--input-list print available input plugins.
@@ -25,9 +28,14 @@ The commands & flags are:
--pprof-addr pprof address to listen on, don't activate pprof if empty
--processor-filter filter the processors to enable, separator is :
--quiet run in quiet mode
+ --section-filter filter config sections to output, separator is :
+ Valid values are 'agent', 'global_tags', 'outputs',
+ 'processors', 'aggregators' and 'inputs'
--sample-config print out full sample configuration
- --test gather metrics, print them out, and exit;
- processors, aggregators, and outputs are not run
+ --once enable once mode: gather metrics once, write them, and exit
+ --test enable test mode: gather metrics once and print them
+ --test-wait wait up to this many seconds for service
+ inputs to complete in test or once mode
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--version display the version and exit
@@ -39,7 +47,7 @@ Examples:
# generate config with only cpu input & influxdb output plugins defined
telegraf --input-filter cpu --output-filter influxdb config
- # run a single telegraf collection, outputing metrics to stdout
+ # run a single telegraf collection, outputting metrics to stdout
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
diff --git a/internal/usage_windows.go b/internal/usage_windows.go
index 0bdd73026f58e..7fee6a1f1595c 100644
--- a/internal/usage_windows.go
+++ b/internal/usage_windows.go
@@ -26,14 +26,20 @@ The commands & flags are:
--processor-filter filter the processors to enable, separator is :
--quiet run in quiet mode
--sample-config print out full sample configuration
- --test gather metrics, print them out, and exit;
- processors, aggregators, and outputs are not run
+ --section-filter filter config sections to output, separator is :
+ Valid values are 'agent', 'global_tags', 'outputs',
+ 'processors', 'aggregators' and 'inputs'
+ --once enable once mode: gather metrics once, write them, and exit
+ --test enable test mode: gather metrics once and print them
+ --test-wait wait up to this many seconds for service
+ inputs to complete in test or once mode
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--version display the version and exit
--console run as console application (windows only)
--service operate on the service (windows only)
--service-name service name (windows only)
+ --service-display-name service display name (windows only)
Examples:
@@ -43,7 +49,7 @@ Examples:
# generate config with only cpu input & influxdb output plugins defined
telegraf --input-filter cpu --output-filter influxdb config
- # run a single telegraf collection, outputing metrics to stdout
+ # run a single telegraf collection, outputting metrics to stdout
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
@@ -62,5 +68,5 @@ Examples:
telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf"
# install telegraf service with custom name
- telegraf --service install --service-name=my-telegraf
+ telegraf --service install --service-name=my-telegraf --service-display-name="My Telegraf"
`
diff --git a/logger/event_logger.go b/logger/event_logger.go
new file mode 100644
index 0000000000000..48b645ddedb3c
--- /dev/null
+++ b/logger/event_logger.go
@@ -0,0 +1,49 @@
+package logger
+
+import (
+ "io"
+ "strings"
+
+ "github.com/influxdata/wlog"
+ "github.com/kardianos/service"
+)
+
+const (
+ LogTargetEventlog = "eventlog"
+)
+
+type eventLogger struct {
+ logger service.Logger
+}
+
+func (t *eventLogger) Write(b []byte) (n int, err error) {
+ loc := prefixRegex.FindIndex(b)
+ n = len(b)
+ if loc == nil {
+ err = t.logger.Info(b)
+ } else if n > 2 { //skip empty log messages
+ line := strings.Trim(string(b[loc[1]:]), " \t\r\n")
+ switch rune(b[loc[0]]) {
+ case 'I':
+ err = t.logger.Info(line)
+ case 'W':
+ err = t.logger.Warning(line)
+ case 'E':
+ err = t.logger.Error(line)
+ }
+ }
+
+ return
+}
+
+type eventLoggerCreator struct {
+ serviceLogger service.Logger
+}
+
+func (e *eventLoggerCreator) CreateLogger(config LogConfig) (io.Writer, error) {
+ return wlog.NewWriter(&eventLogger{logger: e.serviceLogger}), nil
+}
+
+func RegisterEventLogger(serviceLogger service.Logger) {
+ registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: serviceLogger})
+}
diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go
new file mode 100644
index 0000000000000..f2d4eb4209e89
--- /dev/null
+++ b/logger/event_logger_test.go
@@ -0,0 +1,100 @@
+//+build windows
+
+package logger
+
+import (
+ "bytes"
+ "encoding/xml"
+ "log"
+ "os/exec"
+ "testing"
+ "time"
+
+ "github.com/kardianos/service"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type Levels int
+
+const (
+ Info Levels = iota + 1
+ Warning
+ Error
+)
+
+type Event struct {
+ Message string `xml:"EventData>Data"`
+ Level Levels `xml:"System>EventID"`
+}
+
+func getEventLog(t *testing.T, since time.Time) []Event {
+ timeStr := since.UTC().Format(time.RFC3339)
+ cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='Telegraf']]]")
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := cmd.Run()
+ require.NoError(t, err)
+ xmlStr := "" + out.String() + " "
+ var events struct {
+ Events []Event `xml:"Event"`
+ }
+ err = xml.Unmarshal([]byte(xmlStr), &events)
+ require.NoError(t, err)
+ return events.Events
+}
+
+func TestEventLog(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+ prepareLogger(t)
+
+ config := LogConfig{
+ LogTarget: LogTargetEventlog,
+ Logfile: "",
+ }
+
+ SetupLogging(config)
+ now := time.Now()
+ log.Println("I! Info message")
+ log.Println("W! Warn message")
+ log.Println("E! Err message")
+ events := getEventLog(t, now)
+ assert.Len(t, events, 3)
+ assert.Contains(t, events, Event{Message: "Info message", Level: Info})
+ assert.Contains(t, events, Event{Message: "Warn message", Level: Warning})
+ assert.Contains(t, events, Event{Message: "Err message", Level: Error})
+}
+
+func TestRestrictedEventLog(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+ prepareLogger(t)
+
+ config := LogConfig{
+ LogTarget: LogTargetEventlog,
+ Quiet: true,
+ }
+
+ SetupLogging(config)
+ //separate previous log messages by small delay
+ time.Sleep(time.Second)
+ now := time.Now()
+ log.Println("I! Info message")
+ log.Println("W! Warning message")
+ log.Println("E! Error message")
+ events := getEventLog(t, now)
+ assert.Len(t, events, 1)
+ assert.Contains(t, events, Event{Message: "Error message", Level: Error})
+}
+
+func prepareLogger(t *testing.T) {
+ svc, err := service.New(nil, &service.Config{Name: "Telegraf"})
+ require.NoError(t, err)
+ svcLogger, err := svc.SystemLogger(nil)
+ require.NoError(t, err)
+ require.NotNil(t, svcLogger)
+ registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: svcLogger})
+}
diff --git a/logger/logger.go b/logger/logger.go
index 6250dedd62529..a276d2e807c6c 100644
--- a/logger/logger.go
+++ b/logger/logger.go
@@ -1,26 +1,61 @@
package logger
import (
+ "errors"
"io"
"log"
"os"
"regexp"
"time"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/rotate"
"github.com/influxdata/wlog"
)
var prefixRegex = regexp.MustCompile("^[DIWE]!")
-// newTelegrafWriter returns a logging-wrapped writer.
-func newTelegrafWriter(w io.Writer) io.Writer {
- return &telegrafLog{
- writer: wlog.NewWriter(w),
+const (
+ LogTargetFile = "file"
+ LogTargetStderr = "stderr"
+)
+
+// LogConfig contains the log configuration settings
+type LogConfig struct {
+ // will set the log level to DEBUG
+ Debug bool
+ //will set the log level to ERROR
+ Quiet bool
+ //stderr, stdout, file or eventlog (Windows only)
+ LogTarget string
+ // will direct the logging output to a file. Empty string is
+ // interpreted as stderr. If there is an error opening the file the
+ // logger will fallback to stderr
+ Logfile string
+ // will rotate when current file at the specified time interval
+ RotationInterval internal.Duration
+ // will rotate when current file size exceeds this parameter.
+ RotationMaxSize internal.Size
+ // maximum rotated files to keep (older ones will be deleted)
+ RotationMaxArchives int
+}
+
+type LoggerCreator interface {
+ CreateLogger(config LogConfig) (io.Writer, error)
+}
+
+var loggerRegistry map[string]LoggerCreator
+
+func registerLogger(name string, loggerCreator LoggerCreator) {
+ if loggerRegistry == nil {
+ loggerRegistry = make(map[string]LoggerCreator)
}
+ loggerRegistry[name] = loggerCreator
}
type telegrafLog struct {
- writer io.Writer
+ writer io.Writer
+ internalWriter io.Writer
}
func (t *telegrafLog) Write(b []byte) (n int, err error) {
@@ -33,31 +68,96 @@ func (t *telegrafLog) Write(b []byte) (n int, err error) {
return t.writer.Write(line)
}
+func (t *telegrafLog) Close() error {
+ var stdErrWriter io.Writer
+ stdErrWriter = os.Stderr
+ // avoid closing stderr
+ if t.internalWriter != stdErrWriter {
+ closer, isCloser := t.internalWriter.(io.Closer)
+ if !isCloser {
+ return errors.New("the underlying writer cannot be closed")
+ }
+ return closer.Close()
+ }
+ return nil
+}
+
+// newTelegrafWriter returns a logging-wrapped writer.
+func newTelegrafWriter(w io.Writer) io.Writer {
+ return &telegrafLog{
+ writer: wlog.NewWriter(w),
+ internalWriter: w,
+ }
+}
+
// SetupLogging configures the logging output.
-// debug will set the log level to DEBUG
-// quiet will set the log level to ERROR
-// logfile will direct the logging output to a file. Empty string is
-// interpreted as stderr. If there is an error opening the file the
-// logger will fallback to stderr.
-func SetupLogging(debug, quiet bool, logfile string) {
+func SetupLogging(config LogConfig) {
+ newLogWriter(config)
+}
+
+type telegrafLogCreator struct {
+}
+
+func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) {
+ var writer, defaultWriter io.Writer
+ defaultWriter = os.Stderr
+
+ switch config.LogTarget {
+ case LogTargetFile:
+ if config.Logfile != "" {
+ var err error
+ if writer, err = rotate.NewFileWriter(config.Logfile, config.RotationInterval.Duration, config.RotationMaxSize.Size, config.RotationMaxArchives); err != nil {
+ log.Printf("E! Unable to open %s (%s), using stderr", config.Logfile, err)
+ writer = defaultWriter
+ }
+ } else {
+ writer = defaultWriter
+ }
+ case LogTargetStderr, "":
+ writer = defaultWriter
+ default:
+ log.Printf("E! Unsupported logtarget: %s, using stderr", config.LogTarget)
+ writer = defaultWriter
+ }
+
+ return newTelegrafWriter(writer), nil
+}
+
+// Keep track what is actually set as a log output, because log package doesn't provide a getter.
+// It allows closing previous writer if re-set and have possibility to test what is actually set
+var actualLogger io.Writer
+
+func newLogWriter(config LogConfig) io.Writer {
log.SetFlags(0)
- if debug {
+ if config.Debug {
wlog.SetLevel(wlog.DEBUG)
}
- if quiet {
+ if config.Quiet {
wlog.SetLevel(wlog.ERROR)
}
+ if !config.Debug && !config.Quiet {
+ wlog.SetLevel(wlog.INFO)
+ }
+ var logWriter io.Writer
+ if logCreator, ok := loggerRegistry[config.LogTarget]; ok {
+ logWriter, _ = logCreator.CreateLogger(config)
+ }
+ if logWriter == nil {
+ logWriter, _ = (&telegrafLogCreator{}).CreateLogger(config)
+ }
- var oFile *os.File
- if logfile != "" {
- var err error
- if oFile, err = os.OpenFile(logfile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModeAppend|0644); err != nil {
- log.Printf("E! Unable to open %s (%s), using stderr", logfile, err)
- oFile = os.Stderr
- }
- } else {
- oFile = os.Stderr
+ if closer, isCloser := actualLogger.(io.Closer); isCloser {
+ closer.Close()
}
+ log.SetOutput(logWriter)
+ actualLogger = logWriter
+
+ return logWriter
+}
- log.SetOutput(newTelegrafWriter(oFile))
+func init() {
+ tlc := &telegrafLogCreator{}
+ registerLogger("", tlc)
+ registerLogger(LogTargetStderr, tlc)
+ registerLogger(LogTargetFile, tlc)
}
diff --git a/logger/logger_test.go b/logger/logger_test.go
index a721cbba7beaa..a5f53ca17e89b 100644
--- a/logger/logger_test.go
+++ b/logger/logger_test.go
@@ -2,12 +2,16 @@ package logger
import (
"bytes"
+ "io"
"io/ioutil"
"log"
"os"
+ "path/filepath"
"testing"
+ "github.com/influxdata/telegraf/internal"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestWriteLogToFile(t *testing.T) {
@@ -15,7 +19,8 @@ func TestWriteLogToFile(t *testing.T) {
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
- SetupLogging(false, false, tmpfile.Name())
+ config := createBasicLogConfig(tmpfile.Name())
+ SetupLogging(config)
log.Printf("I! TEST")
log.Printf("D! TEST") // <- should be ignored
@@ -28,8 +33,9 @@ func TestDebugWriteLogToFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
-
- SetupLogging(true, false, tmpfile.Name())
+ config := createBasicLogConfig(tmpfile.Name())
+ config.Debug = true
+ SetupLogging(config)
log.Printf("D! TEST")
f, err := ioutil.ReadFile(tmpfile.Name())
@@ -41,8 +47,9 @@ func TestErrorWriteLogToFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
-
- SetupLogging(false, true, tmpfile.Name())
+ config := createBasicLogConfig(tmpfile.Name())
+ config.Quiet = true
+ SetupLogging(config)
log.Printf("E! TEST")
log.Printf("I! TEST") // <- should be ignored
@@ -55,8 +62,9 @@ func TestAddDefaultLogLevel(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
-
- SetupLogging(true, false, tmpfile.Name())
+ config := createBasicLogConfig(tmpfile.Name())
+ config.Debug = true
+ SetupLogging(config)
log.Printf("TEST")
f, err := ioutil.ReadFile(tmpfile.Name())
@@ -68,15 +76,16 @@ func TestWriteToTruncatedFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
-
- SetupLogging(true, false, tmpfile.Name())
+ config := createBasicLogConfig(tmpfile.Name())
+ config.Debug = true
+ SetupLogging(config)
log.Printf("TEST")
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
- tmpf, err := os.OpenFile(tmpfile.Name(), os.O_TRUNC, 0644)
+ tmpf, err := os.OpenFile(tmpfile.Name(), os.O_RDWR|os.O_TRUNC, 0644)
assert.NoError(t, err)
assert.NoError(t, tmpf.Close())
@@ -87,6 +96,44 @@ func TestWriteToTruncatedFile(t *testing.T) {
assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n"))
}
+func TestWriteToFileInRotation(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "LogRotation")
+ require.NoError(t, err)
+ config := createBasicLogConfig(filepath.Join(tempDir, "test.log"))
+ config.LogTarget = LogTargetFile
+ config.RotationMaxSize = internal.Size{Size: int64(30)}
+ writer := newLogWriter(config)
+ // Close the writer here, otherwise the temp folder cannot be deleted because the current log file is in use.
+ closer, isCloser := writer.(io.Closer)
+ assert.True(t, isCloser)
+ defer func() { closer.Close(); os.RemoveAll(tempDir) }()
+
+ log.Printf("I! TEST 1") // Writes 31 bytes, will rotate
+ log.Printf("I! TEST") // Writes 29 byes, no rotation expected
+ files, _ := ioutil.ReadDir(tempDir)
+ assert.Equal(t, 2, len(files))
+}
+
+func TestLogTargetSettings(t *testing.T) {
+ config := LogConfig{
+ LogTarget: "",
+ Quiet: true,
+ }
+ SetupLogging(config)
+ logger, isTelegrafLogger := actualLogger.(*telegrafLog)
+ assert.True(t, isTelegrafLogger)
+ assert.Equal(t, logger.internalWriter, os.Stderr)
+
+ config = LogConfig{
+ LogTarget: "stderr",
+ Quiet: true,
+ }
+ SetupLogging(config)
+ logger, isTelegrafLogger = actualLogger.(*telegrafLog)
+ assert.True(t, isTelegrafLogger)
+ assert.Equal(t, logger.internalWriter, os.Stderr)
+}
+
func BenchmarkTelegrafLogWrite(b *testing.B) {
var msg = []byte("test")
var buf bytes.Buffer
@@ -96,3 +143,11 @@ func BenchmarkTelegrafLogWrite(b *testing.B) {
w.Write(msg)
}
}
+
+func createBasicLogConfig(filename string) LogConfig {
+ return LogConfig{
+ Logfile: filename,
+ LogTarget: LogTargetFile,
+ RotationMaxArchives: -1,
+ }
+}
diff --git a/metric.go b/metric.go
index 396321e6ecd49..6c7b1c6c5f75c 100644
--- a/metric.go
+++ b/metric.go
@@ -17,43 +17,93 @@ const (
Histogram
)
+// Tag represents a single tag key and value.
type Tag struct {
Key string
Value string
}
+// Field represents a single field key and value.
type Field struct {
Key string
Value interface{}
}
+// Metric is the type of data that is processed by Telegraf. Input plugins,
+// and to a lesser degree, Processor and Aggregator plugins create new Metrics
+// and Output plugins write them.
type Metric interface {
- // Getting data structure functions
+ // Name is the primary identifier for the Metric and corresponds to the
+ // measurement in the InfluxDB data model.
Name() string
+
+ // Tags returns the tags as a map. This method is deprecated, use TagList instead.
Tags() map[string]string
+
+ // TagList returns the tags as a slice ordered by the tag key in lexical
+ // bytewise ascending order. The returned value should not be modified,
+ // use the AddTag or RemoveTag methods instead.
TagList() []*Tag
+
+ // Fields returns the fields as a map. This method is deprecated, use FieldList instead.
Fields() map[string]interface{}
+
+ // FieldList returns the fields as a slice in an undefined order. The
+ // returned value should not be modified, use the AddField or RemoveField
+ // methods instead.
FieldList() []*Field
+
+ // Time returns the timestamp of the metric.
Time() time.Time
+
+ // Type returns a general type for the entire metric that describes how you
+ // might interpret, aggregate the values.
+ //
+ // This method may be removed in the future and its use is discouraged.
Type() ValueType
- // Name functions
+ // SetName sets the metric name.
SetName(name string)
+
+ // AddPrefix adds a string to the front of the metric name. It is
+ // equivalent to m.SetName(prefix + m.Name()).
+ //
+ // This method is deprecated, use SetName instead.
AddPrefix(prefix string)
+
+ // AddSuffix appends a string to the back of the metric name. It is
+ // equivalent to m.SetName(m.Name() + suffix).
+ //
+ // This method is deprecated, use SetName instead.
AddSuffix(suffix string)
- // Tag functions
+ // GetTag returns the value of a tag and a boolean to indicate if it was set.
GetTag(key string) (string, bool)
+
+ // HasTag returns true if the tag is set on the Metric.
HasTag(key string) bool
+
+ // AddTag sets the tag on the Metric. If the Metric already has the tag
+ // set then the current value is replaced.
AddTag(key, value string)
+
+ // RemoveTag removes the tag if it is set.
RemoveTag(key string)
- // Field functions
+ // GetField returns the value of a field and a boolean to indicate if it was set.
GetField(key string) (interface{}, bool)
+
+ // HasField returns true if the field is set on the Metric.
HasField(key string) bool
+
+ // AddField sets the field on the Metric. If the Metric already has the field
+ // set then the current value is replaced.
AddField(key string, value interface{})
+
+ // RemoveField removes the tag if it is set.
RemoveField(key string)
+ // SetTime sets the timestamp of the Metric.
SetTime(t time.Time)
// HashID returns an unique identifier for the series.
@@ -73,7 +123,13 @@ type Metric interface {
// to any output.
Drop()
- // Mark Metric as an aggregate
+ // SetAggregate indicates the metric is an aggregated value.
+ //
+ // This method may be removed in the future and its use is discouraged.
SetAggregate(bool)
+
+ // IsAggregate returns true if the Metric is an aggregate.
+ //
+ // This method may be removed in the future and its use is discouraged.
IsAggregate() bool
}
diff --git a/metric/builder.go b/metric/builder.go
deleted file mode 100644
index 9a331b9a4cb36..0000000000000
--- a/metric/builder.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package metric
-
-import (
- "time"
-
- "github.com/influxdata/telegraf"
-)
-
-type TimeFunc func() time.Time
-
-type Builder struct {
- TimeFunc
- TimePrecision time.Duration
-
- *metric
-}
-
-func NewBuilder() *Builder {
- b := &Builder{
- TimeFunc: time.Now,
- TimePrecision: 1 * time.Nanosecond,
- }
- b.Reset()
- return b
-}
-
-func (b *Builder) SetName(name string) {
- b.name = name
-}
-
-func (b *Builder) AddTag(key string, value string) {
- b.metric.AddTag(key, value)
-}
-
-func (b *Builder) AddField(key string, value interface{}) {
- b.metric.AddField(key, value)
-}
-
-func (b *Builder) SetTime(tm time.Time) {
- b.tm = tm
-}
-
-func (b *Builder) Reset() {
- b.metric = &metric{
- tp: telegraf.Untyped,
- }
-}
-
-func (b *Builder) Metric() (telegraf.Metric, error) {
- if b.tm.IsZero() {
- b.tm = b.TimeFunc().Truncate(b.TimePrecision)
- }
-
- return b.metric, nil
-}
diff --git a/metric/metric.go b/metric/metric.go
index 29345e63c777e..517645a831280 100644
--- a/metric/metric.go
+++ b/metric/metric.go
@@ -50,13 +50,15 @@ func New(
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
}
- m.fields = make([]*telegraf.Field, 0, len(fields))
- for k, v := range fields {
- v := convertField(v)
- if v == nil {
- continue
+ if len(fields) > 0 {
+ m.fields = make([]*telegraf.Field, 0, len(fields))
+ for k, v := range fields {
+ v := convertField(v)
+ if v == nil {
+ continue
+ }
+ m.AddField(k, v)
}
- m.AddField(k, v)
}
return m, nil
@@ -240,11 +242,11 @@ func (m *metric) Copy() telegraf.Metric {
}
for i, tag := range m.tags {
- m2.tags[i] = tag
+ m2.tags[i] = &telegraf.Tag{Key: tag.Key, Value: tag.Value}
}
for i, field := range m.fields {
- m2.fields[i] = field
+ m2.fields[i] = &telegraf.Field{Key: field.Key, Value: field.Value}
}
return m2
}
diff --git a/metric/metric_test.go b/metric/metric_test.go
index 004fa5915507e..7033d32303f16 100644
--- a/metric/metric_test.go
+++ b/metric/metric_test.go
@@ -334,7 +334,7 @@ func TestValueType(t *testing.T) {
assert.Equal(t, telegraf.Gauge, m.Type())
}
-func TestCopyAggreate(t *testing.T) {
+func TestCopyAggregate(t *testing.T) {
m1 := baseMetric()
m1.SetAggregate(true)
m2 := m1.Copy()
diff --git a/metric/tracking.go b/metric/tracking.go
index 3d8843240f576..e370d9f2a7ccc 100644
--- a/metric/tracking.go
+++ b/metric/tracking.go
@@ -34,8 +34,7 @@ var (
)
func newTrackingID() telegraf.TrackingID {
- atomic.AddUint64(&lastID, 1)
- return telegraf.TrackingID(lastID)
+ return telegraf.TrackingID(atomic.AddUint64(&lastID, 1))
}
func debugFinalizer(d *trackingData) {
diff --git a/metric/tracking_test.go b/metric/tracking_test.go
index f950cfcd120eb..0ca1ca4daa4bc 100644
--- a/metric/tracking_test.go
+++ b/metric/tracking_test.go
@@ -1,6 +1,7 @@
package metric
import (
+ "sync"
"testing"
"time"
@@ -30,6 +31,43 @@ func (d *deliveries) onDelivery(info telegraf.DeliveryInfo) {
d.Info[info.ID()] = info
}
+func TestNewTrackingID(t *testing.T) {
+ var wg sync.WaitGroup
+ var a [100000]telegraf.TrackingID
+ var b [100000]telegraf.TrackingID
+
+ wg.Add(2)
+ go func() {
+ for i := 0; i < len(a); i++ {
+ a[i] = newTrackingID()
+ }
+ wg.Done()
+ }()
+ go func() {
+ for i := 0; i < len(b); i++ {
+ b[i] = newTrackingID()
+ }
+ wg.Done()
+ }()
+ wg.Wait()
+
+ // Find any duplicate TrackingIDs in arrays a and b. Arrays must be sorted in increasing order.
+ for i, j := 0, 0; i < len(a) && j < len(b); {
+ if a[i] == b[j] {
+ t.Errorf("Duplicate TrackingID: a[%d]==%d and b[%d]==%d.", i, a[i], j, b[j])
+ break
+ }
+ if a[i] > b[j] {
+ j++
+ continue
+ }
+ if a[i] < b[j] {
+ i++
+ continue
+ }
+ }
+}
+
func TestTracking(t *testing.T) {
tests := []struct {
name string
diff --git a/internal/models/buffer.go b/models/buffer.go
similarity index 76%
rename from internal/models/buffer.go
rename to models/buffer.go
index 5d036c7280716..9cc1a3d889f38 100644
--- a/internal/models/buffer.go
+++ b/models/buffer.go
@@ -32,7 +32,12 @@ type Buffer struct {
}
// NewBuffer returns a new empty Buffer with the given capacity.
-func NewBuffer(name string, capacity int) *Buffer {
+func NewBuffer(name string, alias string, capacity int) *Buffer {
+ tags := map[string]string{"output": name}
+ if alias != "" {
+ tags["alias"] = alias
+ }
+
b := &Buffer{
buf: make([]telegraf.Metric, capacity),
first: 0,
@@ -43,27 +48,27 @@ func NewBuffer(name string, capacity int) *Buffer {
MetricsAdded: selfstat.Register(
"write",
"metrics_added",
- map[string]string{"output": name},
+ tags,
),
MetricsWritten: selfstat.Register(
"write",
"metrics_written",
- map[string]string{"output": name},
+ tags,
),
MetricsDropped: selfstat.Register(
"write",
"metrics_dropped",
- map[string]string{"output": name},
+ tags,
),
BufferSize: selfstat.Register(
"write",
"buffer_size",
- map[string]string{"output": name},
+ tags,
),
BufferLimit: selfstat.Register(
"write",
"buffer_limit",
- map[string]string{"output": name},
+ tags,
),
}
b.BufferSize.Set(int64(0))
@@ -99,12 +104,14 @@ func (b *Buffer) metricDropped(metric telegraf.Metric) {
metric.Reject()
}
-func (b *Buffer) add(m telegraf.Metric) {
+func (b *Buffer) add(m telegraf.Metric) int {
+ dropped := 0
// Check if Buffer is full
if b.size == b.cap {
b.metricDropped(b.buf[b.last])
+ dropped++
- if b.last == b.batchFirst && b.batchSize > 0 {
+ if b.batchSize > 0 {
b.batchSize--
b.batchFirst = b.next(b.batchFirst)
}
@@ -120,22 +127,27 @@ func (b *Buffer) add(m telegraf.Metric) {
}
b.size = min(b.size+1, b.cap)
+ return dropped
}
-// Add adds metrics to the buffer
-func (b *Buffer) Add(metrics ...telegraf.Metric) {
+// Add adds metrics to the buffer and returns number of dropped metrics.
+func (b *Buffer) Add(metrics ...telegraf.Metric) int {
b.Lock()
defer b.Unlock()
+ dropped := 0
for i := range metrics {
- b.add(metrics[i])
+ if n := b.add(metrics[i]); n != 0 {
+ dropped += n
+ }
}
b.BufferSize.Set(int64(b.length()))
+ return dropped
}
-// Batch returns a slice containing up to batchSize of the most recently added
-// metrics. Metrics are ordered from newest to oldest in the batch. The
+// Batch returns a slice containing up to batchSize of the oldest metrics not
+// yet dropped. Metrics are ordered from oldest to newest in the batch. The
// batch must not be modified by the client.
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
b.Lock()
@@ -147,18 +159,17 @@ func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
return out
}
- b.batchFirst = b.cap + b.last - outLen
- b.batchFirst %= b.cap
+ b.batchFirst = b.first
b.batchSize = outLen
batchIndex := b.batchFirst
for i := range out {
- out[len(out)-1-i] = b.buf[batchIndex]
+ out[i] = b.buf[batchIndex]
b.buf[batchIndex] = nil
batchIndex = b.next(batchIndex)
}
- b.last = b.batchFirst
+ b.first = b.nextby(b.first, b.batchSize)
b.size -= outLen
return out
}
@@ -186,38 +197,22 @@ func (b *Buffer) Reject(batch []telegraf.Metric) {
return
}
- older := b.dist(b.first, b.batchFirst)
free := b.cap - b.size
- restore := min(len(batch), free+older)
+ restore := min(len(batch), free)
+ skip := len(batch) - restore
- // Rotate newer metrics forward the number of metrics that we can restore.
- rb := b.batchFirst
- rp := b.last
- re := b.nextby(rp, restore)
- b.last = re
+ b.first = b.prevby(b.first, restore)
+ b.size = min(b.size+restore, b.cap)
- for rb != rp && rp != re {
- rp = b.prev(rp)
- re = b.prev(re)
-
- if b.buf[re] != nil {
- b.metricDropped(b.buf[re])
- b.first = b.next(b.first)
- }
+ re := b.first
- b.buf[re] = b.buf[rp]
- b.buf[rp] = nil
- }
-
- // Copy metrics from the batch back into the buffer; recall that the
- // batch is in reverse order compared to b.buf
+ // Copy metrics from the batch back into the buffer
for i := range batch {
- if i < restore {
- re = b.prev(re)
- b.buf[re] = batch[i]
- b.size = min(b.size+1, b.cap)
- } else {
+ if i < skip {
b.metricDropped(batch[i])
+ } else {
+ b.buf[re] = batch[i]
+ re = b.next(re)
}
}
@@ -261,6 +256,17 @@ func (b *Buffer) prev(index int) int {
return index
}
+// prevby returns the index that is count older with wrapping.
+func (b *Buffer) prevby(index, count int) int {
+ index -= count
+ for index < 0 {
+ index += b.cap
+ }
+
+ index %= b.cap
+ return index
+}
+
func (b *Buffer) resetBatch() {
b.batchFirst = 0
b.batchSize = 0
diff --git a/internal/models/buffer_test.go b/models/buffer_test.go
similarity index 88%
rename from internal/models/buffer_test.go
rename to models/buffer_test.go
index bc19680d18b6b..9aef94fb86585 100644
--- a/internal/models/buffer_test.go
+++ b/models/buffer_test.go
@@ -49,7 +49,7 @@ func MetricTime(sec int64) telegraf.Metric {
}
func BenchmarkAddMetrics(b *testing.B) {
- buf := NewBuffer("test", 10000)
+ buf := NewBuffer("test", "", 10000)
m := Metric()
for n := 0; n < b.N; n++ {
buf.Add(m)
@@ -64,14 +64,14 @@ func setup(b *Buffer) *Buffer {
}
func TestBuffer_LenEmpty(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
require.Equal(t, 0, b.Len())
}
func TestBuffer_LenOne(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m)
require.Equal(t, 1, b.Len())
@@ -79,7 +79,7 @@ func TestBuffer_LenOne(t *testing.T) {
func TestBuffer_LenFull(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
require.Equal(t, 5, b.Len())
@@ -87,7 +87,7 @@ func TestBuffer_LenFull(t *testing.T) {
func TestBuffer_LenOverfill(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
setup(b)
b.Add(m, m, m, m, m, m)
@@ -95,14 +95,14 @@ func TestBuffer_LenOverfill(t *testing.T) {
}
func TestBuffer_BatchLenZero(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
batch := b.Batch(0)
require.Len(t, batch, 0)
}
func TestBuffer_BatchLenBufferEmpty(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
batch := b.Batch(2)
require.Len(t, batch, 0)
@@ -110,7 +110,7 @@ func TestBuffer_BatchLenBufferEmpty(t *testing.T) {
func TestBuffer_BatchLenUnderfill(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m)
batch := b.Batch(2)
@@ -119,7 +119,7 @@ func TestBuffer_BatchLenUnderfill(t *testing.T) {
func TestBuffer_BatchLenFill(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m)
batch := b.Batch(2)
require.Len(t, batch, 2)
@@ -127,7 +127,7 @@ func TestBuffer_BatchLenFill(t *testing.T) {
func TestBuffer_BatchLenExact(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m)
batch := b.Batch(2)
require.Len(t, batch, 2)
@@ -135,7 +135,7 @@ func TestBuffer_BatchLenExact(t *testing.T) {
func TestBuffer_BatchLenLargerThanBuffer(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
batch := b.Batch(6)
require.Len(t, batch, 5)
@@ -143,7 +143,7 @@ func TestBuffer_BatchLenLargerThanBuffer(t *testing.T) {
func TestBuffer_BatchWrap(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
batch := b.Batch(2)
b.Accept(batch)
@@ -153,7 +153,7 @@ func TestBuffer_BatchWrap(t *testing.T) {
}
func TestBuffer_BatchLatest(t *testing.T) {
- b := setup(NewBuffer("test", 4))
+ b := setup(NewBuffer("test", "", 4))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -161,13 +161,13 @@ func TestBuffer_BatchLatest(t *testing.T) {
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(3),
+ MetricTime(1),
MetricTime(2),
}, batch)
}
func TestBuffer_BatchLatestWrap(t *testing.T) {
- b := setup(NewBuffer("test", 4))
+ b := setup(NewBuffer("test", "", 4))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -177,13 +177,13 @@ func TestBuffer_BatchLatestWrap(t *testing.T) {
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(5),
- MetricTime(4),
+ MetricTime(2),
+ MetricTime(3),
}, batch)
}
func TestBuffer_MultipleBatch(t *testing.T) {
- b := setup(NewBuffer("test", 10))
+ b := setup(NewBuffer("test", "", 10))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -193,23 +193,23 @@ func TestBuffer_MultipleBatch(t *testing.T) {
batch := b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(6),
- MetricTime(5),
- MetricTime(4),
- MetricTime(3),
+ MetricTime(1),
MetricTime(2),
+ MetricTime(3),
+ MetricTime(4),
+ MetricTime(5),
}, batch)
b.Accept(batch)
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(1),
+ MetricTime(6),
}, batch)
b.Accept(batch)
}
func TestBuffer_RejectWithRoom(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -223,16 +223,16 @@ func TestBuffer_RejectWithRoom(t *testing.T) {
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(5),
- MetricTime(4),
- MetricTime(3),
- MetricTime(2),
MetricTime(1),
+ MetricTime(2),
+ MetricTime(3),
+ MetricTime(4),
+ MetricTime(5),
}, batch)
}
func TestBuffer_RejectNothingNewFull(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -246,16 +246,16 @@ func TestBuffer_RejectNothingNewFull(t *testing.T) {
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(5),
- MetricTime(4),
- MetricTime(3),
- MetricTime(2),
MetricTime(1),
+ MetricTime(2),
+ MetricTime(3),
+ MetricTime(4),
+ MetricTime(5),
}, batch)
}
func TestBuffer_RejectNoRoom(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
@@ -275,16 +275,16 @@ func TestBuffer_RejectNoRoom(t *testing.T) {
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(8),
- MetricTime(7),
- MetricTime(6),
- MetricTime(5),
MetricTime(4),
+ MetricTime(5),
+ MetricTime(6),
+ MetricTime(7),
+ MetricTime(8),
}, batch)
}
func TestBuffer_RejectRoomExact(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
batch := b.Batch(2)
@@ -299,16 +299,16 @@ func TestBuffer_RejectRoomExact(t *testing.T) {
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(5),
- MetricTime(4),
- MetricTime(3),
- MetricTime(2),
MetricTime(1),
+ MetricTime(2),
+ MetricTime(3),
+ MetricTime(4),
+ MetricTime(5),
}, batch)
}
func TestBuffer_RejectRoomOverwriteOld(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -324,16 +324,16 @@ func TestBuffer_RejectRoomOverwriteOld(t *testing.T) {
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(6),
- MetricTime(5),
- MetricTime(4),
- MetricTime(3),
MetricTime(2),
+ MetricTime(3),
+ MetricTime(4),
+ MetricTime(5),
+ MetricTime(6),
}, batch)
}
func TestBuffer_RejectPartialRoom(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
@@ -351,16 +351,16 @@ func TestBuffer_RejectPartialRoom(t *testing.T) {
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(7),
- MetricTime(6),
- MetricTime(5),
- MetricTime(4),
MetricTime(3),
+ MetricTime(4),
+ MetricTime(5),
+ MetricTime(6),
+ MetricTime(7),
}, batch)
}
func TestBuffer_RejectNewMetricsWrapped(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -394,16 +394,16 @@ func TestBuffer_RejectNewMetricsWrapped(t *testing.T) {
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(15),
- MetricTime(14),
- MetricTime(13),
- MetricTime(12),
MetricTime(11),
+ MetricTime(12),
+ MetricTime(13),
+ MetricTime(14),
+ MetricTime(15),
}, batch)
}
func TestBuffer_RejectWrapped(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -425,16 +425,16 @@ func TestBuffer_RejectWrapped(t *testing.T) {
batch = b.Batch(5)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(12),
- MetricTime(11),
- MetricTime(10),
- MetricTime(9),
MetricTime(8),
+ MetricTime(9),
+ MetricTime(10),
+ MetricTime(11),
+ MetricTime(12),
}, batch)
}
func TestBuffer_RejectAdjustFirst(t *testing.T) {
- b := setup(NewBuffer("test", 10))
+ b := setup(NewBuffer("test", "", 10))
b.Add(MetricTime(1))
b.Add(MetricTime(2))
b.Add(MetricTime(3))
@@ -467,22 +467,22 @@ func TestBuffer_RejectAdjustFirst(t *testing.T) {
batch = b.Batch(10)
testutil.RequireMetricsEqual(t,
[]telegraf.Metric{
- MetricTime(19),
- MetricTime(18),
- MetricTime(17),
- MetricTime(16),
- MetricTime(15),
- MetricTime(14),
- MetricTime(13),
- MetricTime(12),
- MetricTime(11),
MetricTime(10),
+ MetricTime(11),
+ MetricTime(12),
+ MetricTime(13),
+ MetricTime(14),
+ MetricTime(15),
+ MetricTime(16),
+ MetricTime(17),
+ MetricTime(18),
+ MetricTime(19),
}, batch)
}
func TestBuffer_AddDropsOverwrittenMetrics(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
b.Add(m, m, m, m, m)
@@ -493,7 +493,7 @@ func TestBuffer_AddDropsOverwrittenMetrics(t *testing.T) {
func TestBuffer_AcceptRemovesBatch(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m)
batch := b.Batch(2)
b.Accept(batch)
@@ -502,7 +502,7 @@ func TestBuffer_AcceptRemovesBatch(t *testing.T) {
func TestBuffer_RejectLeavesBatch(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m)
batch := b.Batch(2)
b.Reject(batch)
@@ -511,7 +511,7 @@ func TestBuffer_RejectLeavesBatch(t *testing.T) {
func TestBuffer_AcceptWritesOverwrittenBatch(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
batch := b.Batch(5)
@@ -524,7 +524,7 @@ func TestBuffer_AcceptWritesOverwrittenBatch(t *testing.T) {
func TestBuffer_BatchRejectDropsOverwrittenBatch(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
batch := b.Batch(5)
@@ -537,7 +537,7 @@ func TestBuffer_BatchRejectDropsOverwrittenBatch(t *testing.T) {
func TestBuffer_MetricsOverwriteBatchAccept(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
batch := b.Batch(3)
@@ -549,7 +549,7 @@ func TestBuffer_MetricsOverwriteBatchAccept(t *testing.T) {
func TestBuffer_MetricsOverwriteBatchReject(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
batch := b.Batch(3)
@@ -561,7 +561,7 @@ func TestBuffer_MetricsOverwriteBatchReject(t *testing.T) {
func TestBuffer_MetricsBatchAcceptRemoved(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
batch := b.Batch(3)
@@ -573,7 +573,7 @@ func TestBuffer_MetricsBatchAcceptRemoved(t *testing.T) {
func TestBuffer_WrapWithBatch(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m)
b.Batch(3)
@@ -584,7 +584,7 @@ func TestBuffer_WrapWithBatch(t *testing.T) {
func TestBuffer_BatchNotRemoved(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
b.Batch(2)
require.Equal(t, 5, b.Len())
@@ -592,7 +592,7 @@ func TestBuffer_BatchNotRemoved(t *testing.T) {
func TestBuffer_BatchRejectAcceptNoop(t *testing.T) {
m := Metric()
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(m, m, m, m, m)
batch := b.Batch(2)
b.Reject(batch)
@@ -608,7 +608,7 @@ func TestBuffer_AcceptCallsMetricAccept(t *testing.T) {
accept++
},
}
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(mm, mm, mm)
batch := b.Batch(2)
b.Accept(batch)
@@ -623,7 +623,7 @@ func TestBuffer_AddCallsMetricRejectWhenNoBatch(t *testing.T) {
reject++
},
}
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
setup(b)
b.Add(mm, mm, mm, mm, mm)
b.Add(mm, mm)
@@ -638,7 +638,7 @@ func TestBuffer_AddCallsMetricRejectWhenNotInBatch(t *testing.T) {
reject++
},
}
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
setup(b)
b.Add(mm, mm, mm, mm, mm)
batch := b.Batch(2)
@@ -656,7 +656,7 @@ func TestBuffer_RejectCallsMetricRejectWithOverwritten(t *testing.T) {
reject++
},
}
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(mm, mm, mm, mm, mm)
batch := b.Batch(5)
b.Add(mm, mm)
@@ -673,7 +673,7 @@ func TestBuffer_AddOverwriteAndReject(t *testing.T) {
reject++
},
}
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(mm, mm, mm, mm, mm)
batch := b.Batch(5)
b.Add(mm, mm, mm, mm, mm)
@@ -697,7 +697,7 @@ func TestBuffer_AddOverwriteAndRejectOffset(t *testing.T) {
accept++
},
}
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
b.Add(mm, mm, mm)
b.Add(mm, mm, mm, mm)
require.Equal(t, 2, reject)
@@ -716,7 +716,7 @@ func TestBuffer_AddOverwriteAndRejectOffset(t *testing.T) {
}
func TestBuffer_RejectEmptyBatch(t *testing.T) {
- b := setup(NewBuffer("test", 5))
+ b := setup(NewBuffer("test", "", 5))
batch := b.Batch(2)
b.Add(MetricTime(1))
b.Reject(batch)
diff --git a/internal/models/filter.go b/models/filter.go
similarity index 100%
rename from internal/models/filter.go
rename to models/filter.go
diff --git a/internal/models/filter_test.go b/models/filter_test.go
similarity index 99%
rename from internal/models/filter_test.go
rename to models/filter_test.go
index 84cd1d3970999..d241244b9d704 100644
--- a/internal/models/filter_test.go
+++ b/models/filter_test.go
@@ -97,7 +97,7 @@ func TestFilter_Empty(t *testing.T) {
"foo_bar",
"foo.bar",
"foo-bar",
- "supercalifradjulisticexpialidocious",
+ "supercalifragilisticexpialidocious",
}
for _, measurement := range measurements {
diff --git a/models/log.go b/models/log.go
new file mode 100644
index 0000000000000..2e42a516c2171
--- /dev/null
+++ b/models/log.go
@@ -0,0 +1,102 @@
+package models
+
+import (
+ "log"
+ "reflect"
+
+ "github.com/influxdata/telegraf"
+)
+
+// Logger defines a logging structure for plugins.
+type Logger struct {
+ OnErrs []func()
+ Name string // Name is the plugin name, will be printed in the `[]`.
+}
+
+// NewLogger creates a new logger instance
+func NewLogger(pluginType, name, alias string) *Logger {
+ return &Logger{
+ Name: logName(pluginType, name, alias),
+ }
+}
+
+// OnErr defines a callback that triggers only when errors are about to be written to the log
+func (l *Logger) OnErr(f func()) {
+ l.OnErrs = append(l.OnErrs, f)
+}
+
+// Errorf logs an error message, patterned after log.Printf.
+func (l *Logger) Errorf(format string, args ...interface{}) {
+ for _, f := range l.OnErrs {
+ f()
+ }
+ log.Printf("E! ["+l.Name+"] "+format, args...)
+}
+
+// Error logs an error message, patterned after log.Print.
+func (l *Logger) Error(args ...interface{}) {
+ for _, f := range l.OnErrs {
+ f()
+ }
+ log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...)
+}
+
+// Debugf logs a debug message, patterned after log.Printf.
+func (l *Logger) Debugf(format string, args ...interface{}) {
+ log.Printf("D! ["+l.Name+"] "+format, args...)
+}
+
+// Debug logs a debug message, patterned after log.Print.
+func (l *Logger) Debug(args ...interface{}) {
+ log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...)
+}
+
+// Warnf logs a warning message, patterned after log.Printf.
+func (l *Logger) Warnf(format string, args ...interface{}) {
+ log.Printf("W! ["+l.Name+"] "+format, args...)
+}
+
+// Warn logs a warning message, patterned after log.Print.
+func (l *Logger) Warn(args ...interface{}) {
+ log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...)
+}
+
+// Infof logs an information message, patterned after log.Printf.
+func (l *Logger) Infof(format string, args ...interface{}) {
+ log.Printf("I! ["+l.Name+"] "+format, args...)
+}
+
+// Info logs an information message, patterned after log.Print.
+func (l *Logger) Info(args ...interface{}) {
+ log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...)
+}
+
+// logName returns the log-friendly name/type.
+func logName(pluginType, name, alias string) string {
+ if alias == "" {
+ return pluginType + "." + name
+ }
+ return pluginType + "." + name + "::" + alias
+}
+
+func setLoggerOnPlugin(i interface{}, log telegraf.Logger) {
+ valI := reflect.ValueOf(i)
+
+ if valI.Type().Kind() != reflect.Ptr {
+ valI = reflect.New(reflect.TypeOf(i))
+ }
+
+ field := valI.Elem().FieldByName("Log")
+ if !field.IsValid() {
+ return
+ }
+
+ switch field.Type().String() {
+ case "telegraf.Logger":
+ if field.CanSet() {
+ field.Set(reflect.ValueOf(log))
+ }
+ }
+
+ return
+}
diff --git a/models/log_test.go b/models/log_test.go
new file mode 100644
index 0000000000000..2b5ec39c654e4
--- /dev/null
+++ b/models/log_test.go
@@ -0,0 +1,24 @@
+package models
+
+import (
+ "testing"
+
+ "github.com/influxdata/telegraf/selfstat"
+ "github.com/stretchr/testify/require"
+)
+
+func TestErrorCounting(t *testing.T) {
+ reg := selfstat.Register(
+ "gather",
+ "errors",
+ map[string]string{"input": "test"},
+ )
+ iLog := Logger{Name: "inputs.test"}
+ iLog.OnErr(func() {
+ reg.Incr(1)
+ })
+ iLog.Error("something went wrong")
+ iLog.Errorf("something went wrong")
+
+ require.Equal(t, int64(2), reg.Get())
+}
diff --git a/internal/models/makemetric.go b/models/makemetric.go
similarity index 100%
rename from internal/models/makemetric.go
rename to models/makemetric.go
diff --git a/internal/models/running_aggregator.go b/models/running_aggregator.go
similarity index 70%
rename from internal/models/running_aggregator.go
rename to models/running_aggregator.go
index 8a2cd576ab0f4..ad054be76f6c1 100644
--- a/internal/models/running_aggregator.go
+++ b/models/running_aggregator.go
@@ -1,7 +1,6 @@
package models
import (
- "log"
"sync"
"time"
@@ -16,6 +15,7 @@ type RunningAggregator struct {
Config *AggregatorConfig
periodStart time.Time
periodEnd time.Time
+ log telegraf.Logger
MetricsPushed selfstat.Stat
MetricsFiltered selfstat.Stat
@@ -23,42 +23,55 @@ type RunningAggregator struct {
PushTime selfstat.Stat
}
-func NewRunningAggregator(
- aggregator telegraf.Aggregator,
- config *AggregatorConfig,
-) *RunningAggregator {
+func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConfig) *RunningAggregator {
+ tags := map[string]string{"aggregator": config.Name}
+ if config.Alias != "" {
+ tags["alias"] = config.Alias
+ }
+
+ aggErrorsRegister := selfstat.Register("aggregate", "errors", tags)
+ logger := NewLogger("aggregators", config.Name, config.Alias)
+ logger.OnErr(func() {
+ aggErrorsRegister.Incr(1)
+ })
+
+ setLoggerOnPlugin(aggregator, logger)
+
return &RunningAggregator{
Aggregator: aggregator,
Config: config,
MetricsPushed: selfstat.Register(
"aggregate",
"metrics_pushed",
- map[string]string{"aggregator": config.Name},
+ tags,
),
MetricsFiltered: selfstat.Register(
"aggregate",
"metrics_filtered",
- map[string]string{"aggregator": config.Name},
+ tags,
),
MetricsDropped: selfstat.Register(
"aggregate",
"metrics_dropped",
- map[string]string{"aggregator": config.Name},
+ tags,
),
PushTime: selfstat.Register(
"aggregate",
"push_time_ns",
- map[string]string{"aggregator": config.Name},
+ tags,
),
+ log: logger,
}
}
// AggregatorConfig is the common config for all aggregators.
type AggregatorConfig struct {
Name string
+ Alias string
DropOriginal bool
Period time.Duration
Delay time.Duration
+ Grace time.Duration
NameOverride string
MeasurementPrefix string
@@ -67,8 +80,18 @@ type AggregatorConfig struct {
Filter Filter
}
-func (r *RunningAggregator) Name() string {
- return "aggregators." + r.Config.Name
+func (r *RunningAggregator) LogName() string {
+ return logName("aggregators", r.Config.Name, r.Config.Alias)
+}
+
+func (r *RunningAggregator) Init() error {
+ if p, ok := r.Aggregator.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
}
func (r *RunningAggregator) Period() time.Duration {
@@ -82,7 +105,7 @@ func (r *RunningAggregator) EndPeriod() time.Time {
func (r *RunningAggregator) UpdateWindow(start, until time.Time) {
r.periodStart = start
r.periodEnd = until
- log.Printf("D! [%s] Updated aggregation range [%s, %s]", r.Name(), start, until)
+ r.log.Debugf("Updated aggregation range [%s, %s]", start, until)
}
func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric {
@@ -125,9 +148,9 @@ func (r *RunningAggregator) Add(m telegraf.Metric) bool {
r.Lock()
defer r.Unlock()
- if m.Time().Before(r.periodStart) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) {
- log.Printf("D! [%s] metric is outside aggregation window; discarding. %s: m: %s e: %s",
- r.Name(), m.Time(), r.periodStart, r.periodEnd)
+ if m.Time().Before(r.periodStart.Add(-r.Config.Grace)) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) {
+ r.log.Debugf("Metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s",
+ m.Time(), r.periodStart, r.periodEnd, r.Config.Grace)
r.MetricsDropped.Incr(1)
return r.Config.DropOriginal
}
@@ -154,3 +177,7 @@ func (r *RunningAggregator) push(acc telegraf.Accumulator) {
elapsed := time.Since(start)
r.PushTime.Incr(elapsed.Nanoseconds())
}
+
+func (r *RunningAggregator) Log() telegraf.Logger {
+ return r.log
+}
diff --git a/internal/models/running_aggregator_test.go b/models/running_aggregator_test.go
similarity index 74%
rename from internal/models/running_aggregator_test.go
rename to models/running_aggregator_test.go
index 19476eecfbc5a..a858859652acf 100644
--- a/internal/models/running_aggregator_test.go
+++ b/models/running_aggregator_test.go
@@ -1,7 +1,6 @@
package models
import (
- "sync/atomic"
"testing"
"time"
@@ -89,6 +88,68 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"])
}
+func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) {
+ a := &TestAggregator{}
+ ra := NewRunningAggregator(a, &AggregatorConfig{
+ Name: "TestRunningAggregator",
+ Filter: Filter{
+ NamePass: []string{"*"},
+ },
+ Period: time.Millisecond * 1500,
+ Grace: time.Millisecond * 500,
+ })
+ require.NoError(t, ra.Config.Filter.Compile())
+ acc := testutil.Accumulator{}
+ now := time.Now()
+ ra.UpdateWindow(now, now.Add(ra.Config.Period))
+
+ m := testutil.MustMetric("RITest",
+ map[string]string{},
+ map[string]interface{}{
+ "value": int64(101),
+ },
+ now.Add(-time.Hour),
+ telegraf.Untyped,
+ )
+ require.False(t, ra.Add(m))
+
+ // metric before current period (late)
+ m = testutil.MustMetric("RITest",
+ map[string]string{},
+ map[string]interface{}{
+ "value": int64(100),
+ },
+ now.Add(-time.Millisecond*1000),
+ telegraf.Untyped,
+ )
+ require.False(t, ra.Add(m))
+
+ // metric before current period, but within grace period (late)
+ m = testutil.MustMetric("RITest",
+ map[string]string{},
+ map[string]interface{}{
+ "value": int64(102),
+ },
+ now.Add(-time.Millisecond*200),
+ telegraf.Untyped,
+ )
+ require.False(t, ra.Add(m))
+
+ // "now" metric
+ m = testutil.MustMetric("RITest",
+ map[string]string{},
+ map[string]interface{}{
+ "value": int64(101),
+ },
+ time.Now().Add(time.Millisecond*50),
+ telegraf.Untyped)
+ require.False(t, ra.Add(m))
+
+ ra.Push(&acc)
+ require.Equal(t, 1, len(acc.Metrics))
+ require.Equal(t, int64(203), acc.Metrics[0].Fields["sum"])
+}
+
func TestAddAndPushOnePeriod(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
@@ -184,7 +245,7 @@ type TestAggregator struct {
func (t *TestAggregator) Description() string { return "" }
func (t *TestAggregator) SampleConfig() string { return "" }
func (t *TestAggregator) Reset() {
- atomic.StoreInt64(&t.sum, 0)
+ t.sum = 0
}
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
@@ -197,7 +258,7 @@ func (t *TestAggregator) Push(acc telegraf.Accumulator) {
func (t *TestAggregator) Add(in telegraf.Metric) {
for _, v := range in.Fields() {
if vi, ok := v.(int64); ok {
- atomic.AddInt64(&t.sum, vi)
+ t.sum += vi
}
}
}
diff --git a/internal/models/running_input.go b/models/running_input.go
similarity index 60%
rename from internal/models/running_input.go
rename to models/running_input.go
index 08a804c406d04..52f95cb522871 100644
--- a/internal/models/running_input.go
+++ b/models/running_input.go
@@ -7,12 +7,16 @@ import (
"github.com/influxdata/telegraf/selfstat"
)
-var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
+var (
+ GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
+ GlobalGatherErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
+)
type RunningInput struct {
Input telegraf.Input
Config *InputConfig
+ log telegraf.Logger
defaultTags map[string]string
MetricsGathered selfstat.Stat
@@ -20,26 +24,43 @@ type RunningInput struct {
}
func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput {
+ tags := map[string]string{"input": config.Name}
+ if config.Alias != "" {
+ tags["alias"] = config.Alias
+ }
+
+ inputErrorsRegister := selfstat.Register("gather", "errors", tags)
+ logger := NewLogger("inputs", config.Name, config.Alias)
+ logger.OnErr(func() {
+ inputErrorsRegister.Incr(1)
+ GlobalGatherErrors.Incr(1)
+ })
+ setLoggerOnPlugin(input, logger)
+
return &RunningInput{
Input: input,
Config: config,
MetricsGathered: selfstat.Register(
"gather",
"metrics_gathered",
- map[string]string{"input": config.Name},
+ tags,
),
GatherTime: selfstat.RegisterTiming(
"gather",
"gather_time_ns",
- map[string]string{"input": config.Name},
+ tags,
),
+ log: logger,
}
}
// InputConfig is the common config for all inputs.
type InputConfig struct {
- Name string
- Interval time.Duration
+ Name string
+ Alias string
+ Interval time.Duration
+ CollectionJitter time.Duration
+ Precision time.Duration
NameOverride string
MeasurementPrefix string
@@ -48,14 +69,24 @@ type InputConfig struct {
Filter Filter
}
-func (r *RunningInput) Name() string {
- return "inputs." + r.Config.Name
-}
-
func (r *RunningInput) metricFiltered(metric telegraf.Metric) {
metric.Drop()
}
+func (r *RunningInput) LogName() string {
+ return logName("inputs", r.Config.Name, r.Config.Alias)
+}
+
+func (r *RunningInput) Init() error {
+ if p, ok := r.Input.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func (r *RunningInput) MakeMetric(metric telegraf.Metric) telegraf.Metric {
if ok := r.Config.Filter.Select(metric); !ok {
r.metricFiltered(metric)
@@ -92,3 +123,7 @@ func (r *RunningInput) Gather(acc telegraf.Accumulator) error {
func (r *RunningInput) SetDefaultTags(tags map[string]string) {
r.defaultTags = tags
}
+
+func (r *RunningInput) Log() telegraf.Logger {
+ return r.log
+}
diff --git a/internal/models/running_input_test.go b/models/running_input_test.go
similarity index 88%
rename from internal/models/running_input_test.go
rename to models/running_input_test.go
index 5978a006198e3..ff3747116f6ca 100644
--- a/internal/models/running_input_test.go
+++ b/models/running_input_test.go
@@ -4,6 +4,8 @@ import (
"testing"
"time"
+ "github.com/influxdata/telegraf/selfstat"
+
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
@@ -256,6 +258,35 @@ func TestMakeMetricNameSuffix(t *testing.T) {
require.Equal(t, expected, m)
}
+func TestMetricErrorCounters(t *testing.T) {
+ ri := NewRunningInput(&testInput{}, &InputConfig{
+ Name: "TestMetricErrorCounters",
+ })
+
+ getGatherErrors := func() int64 {
+ for _, r := range selfstat.Metrics() {
+ tag, hasTag := r.GetTag("input")
+ if r.Name() == "internal_gather" && hasTag && tag == "TestMetricErrorCounters" {
+ errCount, ok := r.GetField("errors")
+ if !ok {
+ t.Fatal("Expected error field")
+ }
+ return errCount.(int64)
+ }
+ }
+ return 0
+ }
+
+ before := getGatherErrors()
+
+ ri.Log().Error("Oh no")
+
+ after := getGatherErrors()
+
+ require.Greater(t, after, before)
+ require.GreaterOrEqual(t, int64(1), GlobalGatherErrors.Get())
+}
+
type testInput struct{}
func (t *testInput) Description() string { return "" }
diff --git a/internal/models/running_output.go b/models/running_output.go
similarity index 61%
rename from internal/models/running_output.go
rename to models/running_output.go
index 4cec18cc8284f..0d2954c4aa4fa 100644
--- a/internal/models/running_output.go
+++ b/models/running_output.go
@@ -1,7 +1,6 @@
package models
import (
- "log"
"sync"
"sync/atomic"
"time"
@@ -21,19 +20,25 @@ const (
// OutputConfig containing name and filter
type OutputConfig struct {
Name string
+ Alias string
Filter Filter
FlushInterval time.Duration
+ FlushJitter time.Duration
MetricBufferLimit int
MetricBatchSize int
+
+ NameOverride string
+ NamePrefix string
+ NameSuffix string
}
// RunningOutput contains the output configuration
type RunningOutput struct {
// Must be 64-bit aligned
newMetricsCount int64
+ droppedMetrics int64
- Name string
Output telegraf.Output
Config *OutputConfig
MetricBufferLimit int
@@ -45,6 +50,7 @@ type RunningOutput struct {
BatchReady chan time.Time
buffer *Buffer
+ log telegraf.Logger
aggMutex sync.Mutex
}
@@ -52,50 +58,78 @@ type RunningOutput struct {
func NewRunningOutput(
name string,
output telegraf.Output,
- conf *OutputConfig,
+ config *OutputConfig,
batchSize int,
bufferLimit int,
) *RunningOutput {
- if conf.MetricBufferLimit > 0 {
- bufferLimit = conf.MetricBufferLimit
+ tags := map[string]string{"output": config.Name}
+ if config.Alias != "" {
+ tags["alias"] = config.Alias
+ }
+
+ writeErrorsRegister := selfstat.Register("write", "errors", tags)
+ logger := NewLogger("outputs", config.Name, config.Alias)
+ logger.OnErr(func() {
+ writeErrorsRegister.Incr(1)
+ })
+ setLoggerOnPlugin(output, logger)
+
+ if config.MetricBufferLimit > 0 {
+ bufferLimit = config.MetricBufferLimit
}
if bufferLimit == 0 {
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
}
- if conf.MetricBatchSize > 0 {
- batchSize = conf.MetricBatchSize
+ if config.MetricBatchSize > 0 {
+ batchSize = config.MetricBatchSize
}
if batchSize == 0 {
batchSize = DEFAULT_METRIC_BATCH_SIZE
}
+
ro := &RunningOutput{
- Name: name,
- buffer: NewBuffer(name, bufferLimit),
+ buffer: NewBuffer(config.Name, config.Alias, bufferLimit),
BatchReady: make(chan time.Time, 1),
Output: output,
- Config: conf,
+ Config: config,
MetricBufferLimit: bufferLimit,
MetricBatchSize: batchSize,
MetricsFiltered: selfstat.Register(
"write",
"metrics_filtered",
- map[string]string{"output": name},
+ tags,
),
WriteTime: selfstat.RegisterTiming(
"write",
"write_time_ns",
- map[string]string{"output": name},
+ tags,
),
+ log: logger,
}
return ro
}
+func (r *RunningOutput) LogName() string {
+ return logName("outputs", r.Config.Name, r.Config.Alias)
+}
+
func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) {
ro.MetricsFiltered.Incr(1)
metric.Drop()
}
+func (r *RunningOutput) Init() error {
+ if p, ok := r.Output.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return err
+ }
+
+ }
+ return nil
+}
+
// AddMetric adds a metric to the output.
//
// Takes ownership of metric
@@ -118,7 +152,20 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
return
}
- ro.buffer.Add(metric)
+ if len(ro.Config.NameOverride) > 0 {
+ metric.SetName(ro.Config.NameOverride)
+ }
+
+ if len(ro.Config.NamePrefix) > 0 {
+ metric.AddPrefix(ro.Config.NamePrefix)
+ }
+
+ if len(ro.Config.NameSuffix) > 0 {
+ metric.AddSuffix(ro.Config.NameSuffix)
+ }
+
+ dropped := ro.buffer.Add(metric)
+ atomic.AddInt64(&ro.droppedMetrics, int64(dropped))
count := atomic.AddInt64(&ro.newMetricsCount, 1)
if count == int64(ro.MetricBatchSize) {
@@ -180,28 +227,41 @@ func (ro *RunningOutput) WriteBatch() error {
return nil
}
-func (ro *RunningOutput) Close() {
- err := ro.Output.Close()
+// Close closes the output
+func (r *RunningOutput) Close() {
+ err := r.Output.Close()
if err != nil {
- log.Printf("E! [outputs.%s] Error closing output: %v", ro.Name, err)
+ r.log.Errorf("Error closing output: %v", err)
}
}
-func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
+func (r *RunningOutput) write(metrics []telegraf.Metric) error {
+ dropped := atomic.LoadInt64(&r.droppedMetrics)
+ if dropped > 0 {
+ r.log.Warnf("Metric buffer overflow; %d metrics have been dropped", dropped)
+ atomic.StoreInt64(&r.droppedMetrics, 0)
+ }
+
start := time.Now()
- err := ro.Output.Write(metrics)
+ err := r.Output.Write(metrics)
elapsed := time.Since(start)
- ro.WriteTime.Incr(elapsed.Nanoseconds())
+ r.WriteTime.Incr(elapsed.Nanoseconds())
if err == nil {
- log.Printf("D! [outputs.%s] wrote batch of %d metrics in %s\n",
- ro.Name, len(metrics), elapsed)
+ r.log.Debugf("Wrote batch of %d metrics in %s", len(metrics), elapsed)
}
return err
}
-func (ro *RunningOutput) LogBufferStatus() {
- nBuffer := ro.buffer.Len()
- log.Printf("D! [outputs.%s] buffer fullness: %d / %d metrics. ",
- ro.Name, nBuffer, ro.MetricBufferLimit)
+func (r *RunningOutput) LogBufferStatus() {
+ nBuffer := r.buffer.Len()
+ r.log.Debugf("Buffer fullness: %d / %d metrics", nBuffer, r.MetricBufferLimit)
+}
+
+func (r *RunningOutput) Log() telegraf.Logger {
+ return r.log
+}
+
+func (r *RunningOutput) BufferLength() int {
+ return r.buffer.Len()
}
diff --git a/internal/models/running_output_test.go b/models/running_output_test.go
similarity index 79%
rename from internal/models/running_output_test.go
rename to models/running_output_test.go
index fd38b0faa113f..38f79f9db397d 100644
--- a/internal/models/running_output_test.go
+++ b/models/running_output_test.go
@@ -4,8 +4,10 @@ import (
"fmt"
"sync"
"testing"
+ "time"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/selfstat"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -216,6 +218,60 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
assert.Len(t, m.Metrics()[0].Tags(), 1)
}
+// Test that measurement name overriding correctly
+func TestRunningOutput_NameOverride(t *testing.T) {
+ conf := &OutputConfig{
+ NameOverride: "new_metric_name",
+ }
+
+ m := &mockOutput{}
+ ro := NewRunningOutput("test", m, conf, 1000, 10000)
+
+ ro.AddMetric(testutil.TestMetric(101, "metric1"))
+ assert.Len(t, m.Metrics(), 0)
+
+ err := ro.Write()
+ assert.NoError(t, err)
+ assert.Len(t, m.Metrics(), 1)
+ assert.Equal(t, "new_metric_name", m.Metrics()[0].Name())
+}
+
+// Test that measurement name prefix is added correctly
+func TestRunningOutput_NamePrefix(t *testing.T) {
+ conf := &OutputConfig{
+ NamePrefix: "prefix_",
+ }
+
+ m := &mockOutput{}
+ ro := NewRunningOutput("test", m, conf, 1000, 10000)
+
+ ro.AddMetric(testutil.TestMetric(101, "metric1"))
+ assert.Len(t, m.Metrics(), 0)
+
+ err := ro.Write()
+ assert.NoError(t, err)
+ assert.Len(t, m.Metrics(), 1)
+ assert.Equal(t, "prefix_metric1", m.Metrics()[0].Name())
+}
+
+// Test that measurement name suffix is added correctly
+func TestRunningOutput_NameSuffix(t *testing.T) {
+ conf := &OutputConfig{
+ NameSuffix: "_suffix",
+ }
+
+ m := &mockOutput{}
+ ro := NewRunningOutput("test", m, conf, 1000, 10000)
+
+ ro.AddMetric(testutil.TestMetric(101, "metric1"))
+ assert.Len(t, m.Metrics(), 0)
+
+ err := ro.Write()
+ assert.NoError(t, err)
+ assert.Len(t, m.Metrics(), 1)
+ assert.Equal(t, "metric1_suffix", m.Metrics()[0].Name())
+}
+
// Test that we can write metrics with simple default setup.
func TestRunningOutputDefault(t *testing.T) {
conf := &OutputConfig{
@@ -304,7 +360,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) {
// Verify that 10 metrics were written
assert.Len(t, m.Metrics(), 10)
// Verify that they are in order
- expected := append(reverse(next5), reverse(first5)...)
+ expected := append(first5, next5...)
assert.Equal(t, expected, m.Metrics())
}
@@ -365,9 +421,9 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) {
// Verify that 20 metrics were written
assert.Len(t, m.Metrics(), 20)
// Verify that they are in order
- expected := append(reverse(next5), reverse(first5)...)
- expected = append(expected, reverse(next5)...)
- expected = append(expected, reverse(first5)...)
+ expected := append(first5, next5...)
+ expected = append(expected, first5...)
+ expected = append(expected, next5...)
assert.Equal(t, expected, m.Metrics())
}
@@ -408,10 +464,54 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) {
// Verify that 6 metrics were written
assert.Len(t, m.Metrics(), 6)
// Verify that they are in order
- expected := []telegraf.Metric{next5[0], first5[4], first5[3], first5[2], first5[1], first5[0]}
+ expected := []telegraf.Metric{first5[0], first5[1], first5[2], first5[3], first5[4], next5[0]}
assert.Equal(t, expected, m.Metrics())
}
+func TestInternalMetrics(t *testing.T) {
+ _ = NewRunningOutput(
+ "test_internal",
+ &mockOutput{},
+ &OutputConfig{
+ Filter: Filter{},
+ Name: "test_name",
+ Alias: "test_alias",
+ },
+ 5,
+ 10)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "internal_write",
+ map[string]string{
+ "output": "test_name",
+ "alias": "test_alias",
+ },
+ map[string]interface{}{
+ "buffer_limit": 10,
+ "buffer_size": 0,
+ "errors": 0,
+ "metrics_added": 0,
+ "metrics_dropped": 0,
+ "metrics_filtered": 0,
+ "metrics_written": 0,
+ "write_time_ns": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ var actual []telegraf.Metric
+ for _, m := range selfstat.Metrics() {
+ output, _ := m.GetTag("output")
+ if m.Name() == "internal_write" && output == "test_name" {
+ actual = append(actual, m)
+ }
+ }
+
+ testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
+}
+
type mockOutput struct {
sync.Mutex
diff --git a/models/running_processor.go b/models/running_processor.go
new file mode 100644
index 0000000000000..c487f48219ef3
--- /dev/null
+++ b/models/running_processor.go
@@ -0,0 +1,100 @@
+package models
+
+import (
+ "sync"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/selfstat"
+)
+
+type RunningProcessor struct {
+ sync.Mutex
+ log telegraf.Logger
+ Processor telegraf.StreamingProcessor
+ Config *ProcessorConfig
+}
+
+type RunningProcessors []*RunningProcessor
+
+func (rp RunningProcessors) Len() int { return len(rp) }
+func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
+func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
+
+// FilterConfig containing a name and filter
+type ProcessorConfig struct {
+ Name string
+ Alias string
+ Order int64
+ Filter Filter
+}
+
+func NewRunningProcessor(processor telegraf.StreamingProcessor, config *ProcessorConfig) *RunningProcessor {
+ tags := map[string]string{"processor": config.Name}
+ if config.Alias != "" {
+ tags["alias"] = config.Alias
+ }
+
+ processErrorsRegister := selfstat.Register("process", "errors", tags)
+ logger := NewLogger("processors", config.Name, config.Alias)
+ logger.OnErr(func() {
+ processErrorsRegister.Incr(1)
+ })
+ setLoggerOnPlugin(processor, logger)
+
+ return &RunningProcessor{
+ Processor: processor,
+ Config: config,
+ log: logger,
+ }
+}
+
+func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) {
+ metric.Drop()
+}
+
+func (r *RunningProcessor) Init() error {
+ if p, ok := r.Processor.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *RunningProcessor) Log() telegraf.Logger {
+ return r.log
+}
+
+func (r *RunningProcessor) LogName() string {
+ return logName("processors", r.Config.Name, r.Config.Alias)
+}
+
+func (r *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric {
+ return metric
+}
+
+func (r *RunningProcessor) Start(acc telegraf.Accumulator) error {
+ return r.Processor.Start(acc)
+}
+
+func (r *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error {
+ if ok := r.Config.Filter.Select(m); !ok {
+ // pass downstream
+ acc.AddMetric(m)
+ return nil
+ }
+
+ r.Config.Filter.Modify(m)
+ if len(m.FieldList()) == 0 {
+ // drop metric
+ r.metricFiltered(m)
+ return nil
+ }
+
+ return r.Processor.Add(m, acc)
+}
+
+func (r *RunningProcessor) Stop() {
+ r.Processor.Stop()
+}
diff --git a/internal/models/running_processor_test.go b/models/running_processor_test.go
similarity index 71%
rename from internal/models/running_processor_test.go
rename to models/running_processor_test.go
index c24347b8ecf8e..1c431bde1e9ba 100644
--- a/internal/models/running_processor_test.go
+++ b/models/running_processor_test.go
@@ -6,12 +6,12 @@ import (
"time"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/testutil"
-
"github.com/stretchr/testify/require"
)
-// MockProcessor is a Processor with an overrideable Apply implementation.
+// MockProcessor is a Processor with an overridable Apply implementation.
type MockProcessor struct {
ApplyF func(in ...telegraf.Metric) []telegraf.Metric
}
@@ -28,6 +28,37 @@ func (p *MockProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
return p.ApplyF(in...)
}
+// MockProcessorToInit is a Processor that needs to be initialized.
+type MockProcessorToInit struct {
+ HasBeenInit bool
+}
+
+func (p *MockProcessorToInit) SampleConfig() string {
+ return ""
+}
+
+func (p *MockProcessorToInit) Description() string {
+ return ""
+}
+
+func (p *MockProcessorToInit) Apply(in ...telegraf.Metric) []telegraf.Metric {
+ return in
+}
+
+func (p *MockProcessorToInit) Init() error {
+ p.HasBeenInit = true
+ return nil
+}
+
+func TestRunningProcessor_Init(t *testing.T) {
+ mock := MockProcessorToInit{}
+ rp := &RunningProcessor{
+ Processor: processors.NewStreamingProcessorFromProcessor(&mock),
+ }
+ rp.Init()
+ require.True(t, mock.HasBeenInit)
+}
+
// TagProcessor returns a Processor whose Apply function adds the tag and
// value.
func TagProcessor(key, value string) *MockProcessor {
@@ -43,7 +74,7 @@ func TagProcessor(key, value string) *MockProcessor {
func TestRunningProcessor_Apply(t *testing.T) {
type args struct {
- Processor telegraf.Processor
+ Processor telegraf.StreamingProcessor
Config *ProcessorConfig
}
@@ -56,7 +87,7 @@ func TestRunningProcessor_Apply(t *testing.T) {
{
name: "inactive filter applies metrics",
args: args{
- Processor: TagProcessor("apply", "true"),
+ Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")),
Config: &ProcessorConfig{
Filter: Filter{},
},
@@ -87,7 +118,7 @@ func TestRunningProcessor_Apply(t *testing.T) {
{
name: "filter applies",
args: args{
- Processor: TagProcessor("apply", "true"),
+ Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")),
Config: &ProcessorConfig{
Filter: Filter{
NamePass: []string{"cpu"},
@@ -120,7 +151,7 @@ func TestRunningProcessor_Apply(t *testing.T) {
{
name: "filter doesn't apply",
args: args{
- Processor: TagProcessor("apply", "true"),
+ Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")),
Config: &ProcessorConfig{
Filter: Filter{
NameDrop: []string{"cpu"},
@@ -158,7 +189,15 @@ func TestRunningProcessor_Apply(t *testing.T) {
}
rp.Config.Filter.Compile()
- actual := rp.Apply(tt.input...)
+ acc := testutil.Accumulator{}
+ err := rp.Start(&acc)
+ require.NoError(t, err)
+ for _, m := range tt.input {
+ rp.Add(m, &acc)
+ }
+ rp.Stop()
+
+ actual := acc.GetTelegrafMetrics()
require.Equal(t, tt.expected, actual)
})
}
diff --git a/output.go b/output.go
index 3c4a85ddb5427..0045b2ca60469 100644
--- a/output.go
+++ b/output.go
@@ -1,14 +1,12 @@
package telegraf
type Output interface {
+ PluginDescriber
+
// Connect to the Output
Connect() error
// Close any connections to the Output
Close() error
- // Description returns a one-sentence description on the Output
- Description() string
- // SampleConfig returns the default configuration of the Output
- SampleConfig() string
// Write takes in group of points to be written to the Output
Write(metrics []Metric) error
}
diff --git a/plugin.go b/plugin.go
new file mode 100644
index 0000000000000..29e8bb683500b
--- /dev/null
+++ b/plugin.go
@@ -0,0 +1,40 @@
+package telegraf
+
+// Initializer is an interface that all plugin types: Inputs, Outputs,
+// Processors, and Aggregators can optionally implement to initialize the
+// plugin.
+type Initializer interface {
+ // Init performs one time setup of the plugin and returns an error if the
+ // configuration is invalid.
+ Init() error
+}
+
+// PluginDescriber contains the functions all plugins must implement to describe
+// themselves to Telegraf
+type PluginDescriber interface {
+ // SampleConfig returns the default configuration of the Processor
+ SampleConfig() string
+
+ // Description returns a one-sentence description on the Processor
+ Description() string
+}
+
+// Logger defines an interface for logging.
+type Logger interface {
+ // Errorf logs an error message, patterned after log.Printf.
+ Errorf(format string, args ...interface{})
+ // Error logs an error message, patterned after log.Print.
+ Error(args ...interface{})
+ // Debugf logs a debug message, patterned after log.Printf.
+ Debugf(format string, args ...interface{})
+ // Debug logs a debug message, patterned after log.Print.
+ Debug(args ...interface{})
+ // Warnf logs a warning message, patterned after log.Printf.
+ Warnf(format string, args ...interface{})
+ // Warn logs a warning message, patterned after log.Print.
+ Warn(args ...interface{})
+ // Infof logs an information message, patterned after log.Printf.
+ Infof(format string, args ...interface{})
+ // Info logs an information message, patterned after log.Print.
+ Info(args ...interface{})
+}
diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go
index ff1bbfc709e93..eabfaa4bf8460 100644
--- a/plugins/aggregators/all/all.go
+++ b/plugins/aggregators/all/all.go
@@ -2,7 +2,9 @@ package all
import (
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
+ _ "github.com/influxdata/telegraf/plugins/aggregators/final"
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
+ _ "github.com/influxdata/telegraf/plugins/aggregators/merge"
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
_ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter"
)
diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md
index e9318036bdbac..8fef0c6f4886a 100644
--- a/plugins/aggregators/basicstats/README.md
+++ b/plugins/aggregators/basicstats/README.md
@@ -1,6 +1,6 @@
# BasicStats Aggregator Plugin
-The BasicStats aggregator plugin give us count,max,min,mean,sum,s2(variance), stdev for a set of values,
+The BasicStats aggregator plugin give us count,diff,max,min,mean,non_negative_diff,sum,s2(variance), stdev for a set of values,
emitting the aggregate every `period` seconds.
### Configuration:
@@ -10,25 +10,28 @@ emitting the aggregate every `period` seconds.
[[aggregators.basicstats]]
## The period on which to flush & clear the aggregator.
period = "30s"
+
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Configures which basic stats to push as fields
- # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
+ # stats = ["count","diff","min","max","mean","non_negative_diff","stdev","s2","sum"]
```
- stats
- - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum` is not aggregated by default to maintain backwards compatibility.
+ - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility.
- If empty array, no stats are aggregated
### Measurements & Fields:
- measurement1
- field1_count
+ - field1_diff (difference)
- field1_max
- field1_min
- field1_mean
+ - field1_non_negative_diff (non-negative difference)
- field1_sum
- field1_s2 (variance)
- field1_stdev (standard deviation)
@@ -43,8 +46,8 @@ No tags are applied by this aggregator.
$ telegraf --config telegraf.conf --quiet
system,host=tars load1=1 1475583980000000000
system,host=tars load1=1 1475583990000000000
-system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000
+system,host=tars load1_count=2,load1_diff=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000
system,host=tars load1=1 1475584020000000000
system,host=tars load1=3 1475584030000000000
-system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000
+system,host=tars load1_count=2,load1_diff=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000
```
diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go
index d054f39f03e11..4e62ee31123a4 100644
--- a/plugins/aggregators/basicstats/basicstats.go
+++ b/plugins/aggregators/basicstats/basicstats.go
@@ -1,7 +1,6 @@
package basicstats
import (
- "log"
"math"
"github.com/influxdata/telegraf"
@@ -10,25 +9,28 @@ import (
type BasicStats struct {
Stats []string `toml:"stats"`
+ Log telegraf.Logger
cache map[uint64]aggregate
statsConfig *configuredStats
}
type configuredStats struct {
- count bool
- min bool
- max bool
- mean bool
- variance bool
- stdev bool
- sum bool
+ count bool
+ min bool
+ max bool
+ mean bool
+ variance bool
+ stdev bool
+ sum bool
+ diff bool
+ non_negative_diff bool
}
func NewBasicStats() *BasicStats {
- mm := &BasicStats{}
- mm.Reset()
- return mm
+ return &BasicStats{
+ cache: make(map[uint64]aggregate),
+ }
}
type aggregate struct {
@@ -43,12 +45,15 @@ type basicstats struct {
max float64
sum float64
mean float64
- M2 float64 //intermedia value for variance/stdev
+ diff float64
+ M2 float64 //intermediate value for variance/stdev
+ LAST float64 //intermediate value for diff
}
var sampleConfig = `
## The period on which to flush & clear the aggregator.
period = "30s"
+
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
@@ -57,17 +62,17 @@ var sampleConfig = `
# stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
`
-func (m *BasicStats) SampleConfig() string {
+func (*BasicStats) SampleConfig() string {
return sampleConfig
}
-func (m *BasicStats) Description() string {
+func (*BasicStats) Description() string {
return "Keep the aggregate basicstats of each metric passing through."
}
-func (m *BasicStats) Add(in telegraf.Metric) {
+func (b *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
- if _, ok := m.cache[id]; !ok {
+ if _, ok := b.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
@@ -82,28 +87,32 @@ func (m *BasicStats) Add(in telegraf.Metric) {
max: fv,
mean: fv,
sum: fv,
+ diff: 0.0,
M2: 0.0,
+ LAST: fv,
}
}
}
- m.cache[id] = a
+ b.cache[id] = a
} else {
for _, field := range in.FieldList() {
if fv, ok := convert(field.Value); ok {
- if _, ok := m.cache[id].fields[field.Key]; !ok {
+ if _, ok := b.cache[id].fields[field.Key]; !ok {
// hit an uncached field of a cached metric
- m.cache[id].fields[field.Key] = basicstats{
+ b.cache[id].fields[field.Key] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
sum: fv,
+ diff: 0.0,
M2: 0.0,
+ LAST: fv,
}
continue
}
- tmp := m.cache[id].fields[field.Key]
+ tmp := b.cache[id].fields[field.Key]
//https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
//variable initialization
x := fv
@@ -127,33 +136,33 @@ func (m *BasicStats) Add(in telegraf.Metric) {
}
//sum compute
tmp.sum += fv
+ //diff compute
+ tmp.diff = fv - tmp.LAST
//store final data
- m.cache[id].fields[field.Key] = tmp
+ b.cache[id].fields[field.Key] = tmp
}
}
}
}
-func (m *BasicStats) Push(acc telegraf.Accumulator) {
- config := getConfiguredStats(m)
-
- for _, aggregate := range m.cache {
+func (b *BasicStats) Push(acc telegraf.Accumulator) {
+ for _, aggregate := range b.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
- if config.count {
+ if b.statsConfig.count {
fields[k+"_count"] = v.count
}
- if config.min {
+ if b.statsConfig.min {
fields[k+"_min"] = v.min
}
- if config.max {
+ if b.statsConfig.max {
fields[k+"_max"] = v.max
}
- if config.mean {
+ if b.statsConfig.mean {
fields[k+"_mean"] = v.mean
}
- if config.sum {
+ if b.statsConfig.sum {
fields[k+"_sum"] = v.sum
}
@@ -161,12 +170,19 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) {
if v.count > 1 {
variance := v.M2 / (v.count - 1)
- if config.variance {
+ if b.statsConfig.variance {
fields[k+"_s2"] = variance
}
- if config.stdev {
+ if b.statsConfig.stdev {
fields[k+"_stdev"] = math.Sqrt(variance)
}
+ if b.statsConfig.diff {
+ fields[k+"_diff"] = v.diff
+ }
+ if b.statsConfig.non_negative_diff && v.diff >= 0 {
+ fields[k+"_non_negative_diff"] = v.diff
+ }
+
}
//if count == 1 StdDev = infinite => so I won't send data
}
@@ -177,14 +193,12 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) {
}
}
-func parseStats(names []string) *configuredStats {
-
+// member function for logging.
+func (b *BasicStats) parseStats() *configuredStats {
parsed := &configuredStats{}
- for _, name := range names {
-
+ for _, name := range b.Stats {
switch name {
-
case "count":
parsed.count = true
case "min":
@@ -199,46 +213,38 @@ func parseStats(names []string) *configuredStats {
parsed.stdev = true
case "sum":
parsed.sum = true
+ case "diff":
+ parsed.diff = true
+ case "non_negative_diff":
+ parsed.non_negative_diff = true
default:
- log.Printf("W! Unrecognized basic stat '%s', ignoring", name)
+ b.Log.Warnf("Unrecognized basic stat %q, ignoring", name)
}
}
return parsed
}
-func defaultStats() *configuredStats {
-
- defaults := &configuredStats{}
-
- defaults.count = true
- defaults.min = true
- defaults.max = true
- defaults.mean = true
- defaults.variance = true
- defaults.stdev = true
- defaults.sum = false
-
- return defaults
-}
-
-func getConfiguredStats(m *BasicStats) *configuredStats {
-
- if m.statsConfig == nil {
-
- if m.Stats == nil {
- m.statsConfig = defaultStats()
- } else {
- m.statsConfig = parseStats(m.Stats)
+func (b *BasicStats) getConfiguredStats() {
+ if b.Stats == nil {
+ b.statsConfig = &configuredStats{
+ count: true,
+ min: true,
+ max: true,
+ mean: true,
+ variance: true,
+ stdev: true,
+ sum: false,
+ non_negative_diff: false,
}
+ } else {
+ b.statsConfig = b.parseStats()
}
-
- return m.statsConfig
}
-func (m *BasicStats) Reset() {
- m.cache = make(map[uint64]aggregate)
+func (b *BasicStats) Reset() {
+ b.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
@@ -254,6 +260,12 @@ func convert(in interface{}) (float64, bool) {
}
}
+func (b *BasicStats) Init() error {
+ b.getConfiguredStats()
+
+ return nil
+}
+
func init() {
aggregators.Add("basicstats", func() telegraf.Aggregator {
return NewBasicStats()
diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go
index 040cb0b823580..c5a093840abc7 100644
--- a/plugins/aggregators/basicstats/basicstats_test.go
+++ b/plugins/aggregators/basicstats/basicstats_test.go
@@ -17,6 +17,7 @@ var m1, _ = metric.New("m1",
"b": int64(1),
"c": float64(2),
"d": float64(2),
+ "g": int64(3),
},
time.Now(),
)
@@ -31,12 +32,15 @@ var m2, _ = metric.New("m1",
"f": uint64(200),
"ignoreme": "string",
"andme": true,
+ "g": int64(1),
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewBasicStats()
+ minmax.Log = testutil.Logger{}
+ minmax.getConfiguredStats()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
@@ -48,6 +52,8 @@ func BenchmarkApply(b *testing.B) {
func TestBasicStatsWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
+ minmax.Log = testutil.Logger{}
+ minmax.getConfiguredStats()
minmax.Add(m1)
minmax.Add(m2)
@@ -86,6 +92,12 @@ func TestBasicStatsWithPeriod(t *testing.T) {
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
+ "g_count": float64(2), //g
+ "g_max": float64(3),
+ "g_min": float64(1),
+ "g_mean": float64(2),
+ "g_s2": float64(2),
+ "g_stdev": math.Sqrt(2),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -98,6 +110,8 @@ func TestBasicStatsWithPeriod(t *testing.T) {
func TestBasicStatsDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
+ minmax.Log = testutil.Logger{}
+ minmax.getConfiguredStats()
minmax.Add(m1)
minmax.Push(&acc)
@@ -118,6 +132,10 @@ func TestBasicStatsDifferentPeriods(t *testing.T) {
"d_max": float64(2),
"d_min": float64(2),
"d_mean": float64(2),
+ "g_count": float64(1), //g
+ "g_max": float64(3),
+ "g_min": float64(3),
+ "g_mean": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -153,6 +171,10 @@ func TestBasicStatsDifferentPeriods(t *testing.T) {
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
+ "g_count": float64(1), //g
+ "g_max": float64(1),
+ "g_min": float64(1),
+ "g_mean": float64(1),
}
expectedTags = map[string]string{
"foo": "bar",
@@ -165,6 +187,8 @@ func TestBasicStatsWithOnlyCount(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"count"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -179,6 +203,7 @@ func TestBasicStatsWithOnlyCount(t *testing.T) {
"d_count": float64(2),
"e_count": float64(1),
"f_count": float64(1),
+ "g_count": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -191,6 +216,8 @@ func TestBasicStatsWithOnlyMin(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"min"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -205,6 +232,7 @@ func TestBasicStatsWithOnlyMin(t *testing.T) {
"d_min": float64(2),
"e_min": float64(200),
"f_min": float64(200),
+ "g_min": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -217,6 +245,8 @@ func TestBasicStatsWithOnlyMax(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"max"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -231,6 +261,7 @@ func TestBasicStatsWithOnlyMax(t *testing.T) {
"d_max": float64(6),
"e_max": float64(200),
"f_max": float64(200),
+ "g_max": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -243,6 +274,8 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"mean"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -257,6 +290,7 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
"d_mean": float64(4),
"e_mean": float64(200),
"f_mean": float64(200),
+ "g_mean": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -269,6 +303,8 @@ func TestBasicStatsWithOnlySum(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"sum"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -283,6 +319,7 @@ func TestBasicStatsWithOnlySum(t *testing.T) {
"d_sum": float64(8),
"e_sum": float64(200),
"f_sum": float64(200),
+ "g_sum": float64(4),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -326,6 +363,8 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"sum"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(sum1)
aggregator.Add(sum2)
@@ -347,6 +386,8 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"s2"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -359,6 +400,7 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) {
"b_s2": float64(2),
"c_s2": float64(2),
"d_s2": float64(8),
+ "g_s2": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -371,6 +413,8 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"stdev"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -383,6 +427,7 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
"b_stdev": math.Sqrt(2),
"c_stdev": math.Sqrt(2),
"d_stdev": math.Sqrt(8),
+ "g_stdev": math.Sqrt(2),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -395,6 +440,8 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"min", "max"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -415,6 +462,61 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
"e_min": float64(200),
"f_max": float64(200), //f
"f_min": float64(200),
+ "g_max": float64(3), //g
+ "g_min": float64(1),
+ }
+ expectedTags := map[string]string{
+ "foo": "bar",
+ }
+ acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
+}
+
+// Test only aggregating diff
+func TestBasicStatsWithDiff(t *testing.T) {
+
+ aggregator := NewBasicStats()
+ aggregator.Stats = []string{"diff"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
+
+ aggregator.Add(m1)
+ aggregator.Add(m2)
+
+ acc := testutil.Accumulator{}
+ aggregator.Push(&acc)
+
+ expectedFields := map[string]interface{}{
+ "a_diff": float64(0),
+ "b_diff": float64(2),
+ "c_diff": float64(2),
+ "d_diff": float64(4),
+ "g_diff": float64(-2),
+ }
+ expectedTags := map[string]string{
+ "foo": "bar",
+ }
+ acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
+}
+
+// Test only aggregating non_negative_diff
+func TestBasicStatsWithNonNegativeDiff(t *testing.T) {
+
+ aggregator := NewBasicStats()
+ aggregator.Stats = []string{"non_negative_diff"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
+
+ aggregator.Add(m1)
+ aggregator.Add(m2)
+
+ acc := testutil.Accumulator{}
+ aggregator.Push(&acc)
+
+ expectedFields := map[string]interface{}{
+ "a_non_negative_diff": float64(0),
+ "b_non_negative_diff": float64(2),
+ "c_non_negative_diff": float64(2),
+ "d_non_negative_diff": float64(4),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -426,7 +528,9 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
func TestBasicStatsWithAllStats(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
+ minmax.Log = testutil.Logger{}
minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum"}
+ minmax.getConfiguredStats()
minmax.Add(m1)
minmax.Add(m2)
@@ -471,6 +575,13 @@ func TestBasicStatsWithAllStats(t *testing.T) {
"f_min": float64(200),
"f_mean": float64(200),
"f_sum": float64(200),
+ "g_count": float64(2), //g
+ "g_max": float64(3),
+ "g_min": float64(1),
+ "g_mean": float64(2),
+ "g_s2": float64(2),
+ "g_stdev": math.Sqrt(2),
+ "g_sum": float64(4),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -483,6 +594,8 @@ func TestBasicStatsWithNoStats(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -498,6 +611,8 @@ func TestBasicStatsWithUnknownStat(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"crazy"}
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
@@ -515,6 +630,8 @@ func TestBasicStatsWithUnknownStat(t *testing.T) {
func TestBasicStatsWithDefaultStats(t *testing.T) {
aggregator := NewBasicStats()
+ aggregator.Log = testutil.Logger{}
+ aggregator.getConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
diff --git a/plugins/aggregators/final/README.md b/plugins/aggregators/final/README.md
new file mode 100644
index 0000000000000..444746d784349
--- /dev/null
+++ b/plugins/aggregators/final/README.md
@@ -0,0 +1,48 @@
+# Final Aggregator Plugin
+
+The final aggregator emits the last metric of a contiguous series. A
+contiguous series is defined as a series which receives updates within the
+time period in `series_timeout`. The contiguous series may be longer than the
+time interval defined by `period`.
+
+This is useful for getting the final value for data sources that produce
+discrete time series such as procstat, cgroup, kubernetes etc.
+
+When a series has not been updated within the time defined in
+`series_timeout`, the last metric is emitted with the `_final` appended.
+
+### Configuration
+
+```toml
+[[aggregators.final]]
+ ## The period on which to flush & clear the aggregator.
+ period = "30s"
+ ## If true, the original metric will be dropped by the
+ ## aggregator and will not get sent to the output plugins.
+ drop_original = false
+
+ ## The time that a series is not updated until considering it final.
+ series_timeout = "5m"
+```
+
+### Metrics
+
+Measurement and tags are unchanged, fields are emitted with the suffix
+`_final`.
+
+### Example Output
+
+```
+counter,host=bar i_final=3,j_final=6 1554281635115090133
+counter,host=foo i_final=3,j_final=6 1554281635112992012
+```
+
+Original input:
+```
+counter,host=bar i=1,j=4 1554281633101153300
+counter,host=foo i=1,j=4 1554281633099323601
+counter,host=bar i=2,j=5 1554281634107980073
+counter,host=foo i=2,j=5 1554281634105931116
+counter,host=bar i=3,j=6 1554281635115090133
+counter,host=foo i=3,j=6 1554281635112992012
+```
diff --git a/plugins/aggregators/final/final.go b/plugins/aggregators/final/final.go
new file mode 100644
index 0000000000000..53ad0a47c9d95
--- /dev/null
+++ b/plugins/aggregators/final/final.go
@@ -0,0 +1,72 @@
+package final
+
+import (
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/aggregators"
+)
+
+var sampleConfig = `
+ ## The period on which to flush & clear the aggregator.
+ period = "30s"
+ ## If true, the original metric will be dropped by the
+ ## aggregator and will not get sent to the output plugins.
+ drop_original = false
+
+ ## The time that a series is not updated until considering it final.
+ series_timeout = "5m"
+`
+
+type Final struct {
+ SeriesTimeout internal.Duration `toml:"series_timeout"`
+
+ // The last metric for all series which are active
+ metricCache map[uint64]telegraf.Metric
+}
+
+func NewFinal() *Final {
+ return &Final{
+ SeriesTimeout: internal.Duration{Duration: 5 * time.Minute},
+ metricCache: make(map[uint64]telegraf.Metric),
+ }
+}
+
+func (m *Final) SampleConfig() string {
+ return sampleConfig
+}
+
+func (m *Final) Description() string {
+ return "Report the final metric of a series"
+}
+
+func (m *Final) Add(in telegraf.Metric) {
+ id := in.HashID()
+ m.metricCache[id] = in
+}
+
+func (m *Final) Push(acc telegraf.Accumulator) {
+ // Preserve timestamp of original metric
+ acc.SetPrecision(time.Nanosecond)
+
+ for id, metric := range m.metricCache {
+ if time.Since(metric.Time()) > m.SeriesTimeout.Duration {
+ fields := map[string]interface{}{}
+ for _, field := range metric.FieldList() {
+ fields[field.Key+"_final"] = field.Value
+ }
+ acc.AddFields(metric.Name(), fields, metric.Tags(), metric.Time())
+ delete(m.metricCache, id)
+ }
+ }
+}
+
+func (m *Final) Reset() {
+}
+
+func init() {
+ aggregators.Add("final", func() telegraf.Aggregator {
+ return NewFinal()
+ })
+}
diff --git a/plugins/aggregators/final/final_test.go b/plugins/aggregators/final/final_test.go
new file mode 100644
index 0000000000000..1b3367fa5b3ad
--- /dev/null
+++ b/plugins/aggregators/final/final_test.go
@@ -0,0 +1,144 @@
+package final
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func TestSimple(t *testing.T) {
+ acc := testutil.Accumulator{}
+ final := NewFinal()
+
+ tags := map[string]string{"foo": "bar"}
+ m1, _ := metric.New("m1",
+ tags,
+ map[string]interface{}{"a": int64(1)},
+ time.Unix(1530939936, 0))
+ m2, _ := metric.New("m1",
+ tags,
+ map[string]interface{}{"a": int64(2)},
+ time.Unix(1530939937, 0))
+ m3, _ := metric.New("m1",
+ tags,
+ map[string]interface{}{"a": int64(3)},
+ time.Unix(1530939938, 0))
+ final.Add(m1)
+ final.Add(m2)
+ final.Add(m3)
+ final.Push(&acc)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "m1",
+ tags,
+ map[string]interface{}{
+ "a_final": 3,
+ },
+ time.Unix(1530939938, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
+}
+
+func TestTwoTags(t *testing.T) {
+ acc := testutil.Accumulator{}
+ final := NewFinal()
+
+ tags1 := map[string]string{"foo": "bar"}
+ tags2 := map[string]string{"foo": "baz"}
+
+ m1, _ := metric.New("m1",
+ tags1,
+ map[string]interface{}{"a": int64(1)},
+ time.Unix(1530939936, 0))
+ m2, _ := metric.New("m1",
+ tags2,
+ map[string]interface{}{"a": int64(2)},
+ time.Unix(1530939937, 0))
+ m3, _ := metric.New("m1",
+ tags1,
+ map[string]interface{}{"a": int64(3)},
+ time.Unix(1530939938, 0))
+ final.Add(m1)
+ final.Add(m2)
+ final.Add(m3)
+ final.Push(&acc)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "m1",
+ tags2,
+ map[string]interface{}{
+ "a_final": 2,
+ },
+ time.Unix(1530939937, 0),
+ ),
+ testutil.MustMetric(
+ "m1",
+ tags1,
+ map[string]interface{}{
+ "a_final": 3,
+ },
+ time.Unix(1530939938, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics())
+}
+
+func TestLongDifference(t *testing.T) {
+ acc := testutil.Accumulator{}
+ final := NewFinal()
+ final.SeriesTimeout = internal.Duration{Duration: 30 * time.Second}
+ tags := map[string]string{"foo": "bar"}
+
+ now := time.Now()
+
+ m1, _ := metric.New("m",
+ tags,
+ map[string]interface{}{"a": int64(1)},
+ now.Add(time.Second*-290))
+ m2, _ := metric.New("m",
+ tags,
+ map[string]interface{}{"a": int64(2)},
+ now.Add(time.Second*-275))
+ m3, _ := metric.New("m",
+ tags,
+ map[string]interface{}{"a": int64(3)},
+ now.Add(time.Second*-100))
+ m4, _ := metric.New("m",
+ tags,
+ map[string]interface{}{"a": int64(4)},
+ now.Add(time.Second*-20))
+ final.Add(m1)
+ final.Add(m2)
+ final.Push(&acc)
+ final.Add(m3)
+ final.Push(&acc)
+ final.Add(m4)
+ final.Push(&acc)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "m",
+ tags,
+ map[string]interface{}{
+ "a_final": 2,
+ },
+ now.Add(time.Second*-275),
+ ),
+ testutil.MustMetric(
+ "m",
+ tags,
+ map[string]interface{}{
+ "a_final": 3,
+ },
+ now.Add(time.Second*-100),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics())
+}
diff --git a/plugins/aggregators/histogram/README.md b/plugins/aggregators/histogram/README.md
index f9dafd7890e0f..f0b6c15b11804 100644
--- a/plugins/aggregators/histogram/README.md
+++ b/plugins/aggregators/histogram/README.md
@@ -3,8 +3,9 @@
The histogram aggregator plugin creates histograms containing the counts of
field values within a range.
-Values added to a bucket are also added to the larger buckets in the
-distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
+If `cumulative` is set to true, values added to a bucket are also added to the
+larger buckets in the distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
+Otherwise, values are added to only one bucket, which creates an [ordinary histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg)
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
By default bucket counts are not reset between periods and will be non-strictly
@@ -16,7 +17,7 @@ increasing while Telegraf is running. This behavior can be changed by setting th
Each metric is passed to the aggregator and this aggregator searches
histogram buckets for those fields, which have been specified in the
config. If buckets are found, the aggregator will increment +1 to the appropriate
-bucket otherwise it will be added to the `+Inf` bucket. Every `period`
+bucket. Otherwise, it will be added to the `+Inf` bucket. Every `period`
seconds this data will be forwarded to the outputs.
The algorithm of hit counting to buckets was implemented on the base
@@ -39,16 +40,20 @@ of the algorithm which is implemented in the Prometheus
## of accumulating the results.
reset = false
+ ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
+ ## Defaults to true.
+ cumulative = true
+
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
- # ## The set of buckets.
+ # ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
- # ## The set of buckets.
+ # ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
@@ -64,8 +69,9 @@ option. Optionally, if `fields` is set only the fields listed will be
aggregated. If `fields` is not set all fields are aggregated.
The `buckets` option contains a list of floats which specify the bucket
-boundaries. Each float value defines the inclusive upper bound of the bucket.
+boundaries. Each float value defines the inclusive upper (right) bound of the bucket.
The `+Inf` bucket is added automatically and does not need to be defined.
+(For left boundaries, these specified bucket borders and `-Inf` will be used).
### Measurements & Fields:
@@ -77,26 +83,43 @@ The postfix `bucket` will be added to each field key.
### Tags:
-All measurements are given the tag `le`. This tag has the border value of
-bucket. It means that the metric value is less than or equal to the value of
-this tag. For example, let assume that we have the metric value 10 and the
-following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
-10, because the metrics value is passed into bucket with right border value
-`10`.
+* `cumulative = true` (default):
+ * `le`: Right bucket border. It means that the metric value is less than or
+ equal to the value of this tag. If a metric value is sorted into a bucket,
+ it is also sorted into all larger buckets. As a result, the value of
+ `_bucket` is rising with rising `le` value. When `le` is `+Inf`,
+ the bucket value is the count of all metrics, because all metric values are
+ less than or equal to positive infinity.
+* `cumulative = false`:
+ * `gt`: Left bucket border. It means that the metric value is greater than
+ (and not equal to) the value of this tag.
+ * `le`: Right bucket border. It means that the metric value is less than or
+ equal to the value of this tag.
+ * As both `gt` and `le` are present, each metric is sorted in only exactly
+ one bucket.
+
### Example Output:
+Let assume we have the buckets [0, 10, 50, 100] and the following field values
+for `usage_idle`: [50, 7, 99, 12]
+
+With `cumulative = true`:
+
+```
+cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none
+cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7
+cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 # 7, 12
+cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=4i 1486998330000000000 # 7, 12, 50, 99
+cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=4i 1486998330000000000 # 7, 12, 50, 99
+```
+
+With `cumulative = false`:
+
```
-cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000
-cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000
+cpu,cpu=cpu1,host=localhost,gt=-Inf,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none
+cpu,cpu=cpu1,host=localhost,gt=0.0,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7
+cpu,cpu=cpu1,host=localhost,gt=10.0,le=50.0 usage_idle_bucket=1i 1486998330000000000 # 12
+cpu,cpu=cpu1,host=localhost,gt=50.0,le=100.0 usage_idle_bucket=2i 1486998330000000000 # 50, 99
+cpu,cpu=cpu1,host=localhost,gt=100.0,le=+Inf usage_idle_bucket=0i 1486998330000000000 # none
```
diff --git a/plugins/aggregators/histogram/histogram.go b/plugins/aggregators/histogram/histogram.go
index a565d89023ba5..dab524d62782e 100644
--- a/plugins/aggregators/histogram/histogram.go
+++ b/plugins/aggregators/histogram/histogram.go
@@ -8,16 +8,23 @@ import (
"github.com/influxdata/telegraf/plugins/aggregators"
)
-// bucketTag is the tag, which contains right bucket border
-const bucketTag = "le"
+// bucketRightTag is the tag, which contains right bucket border
+const bucketRightTag = "le"
-// bucketInf is the right bucket border for infinite values
-const bucketInf = "+Inf"
+// bucketPosInf is the right bucket border for infinite values
+const bucketPosInf = "+Inf"
+
+// bucketLeftTag is the tag, which contains left bucket border (exclusive)
+const bucketLeftTag = "gt"
+
+// bucketNegInf is the left bucket border for infinite values
+const bucketNegInf = "-Inf"
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
type HistogramAggregator struct {
Configs []config `toml:"config"`
ResetBuckets bool `toml:"reset"`
+ Cumulative bool `toml:"cumulative"`
buckets bucketsByMetrics
cache map[uint64]metricHistogramCollection
@@ -57,8 +64,10 @@ type groupedByCountFields struct {
}
// NewHistogramAggregator creates new histogram aggregator
-func NewHistogramAggregator() telegraf.Aggregator {
- h := &HistogramAggregator{}
+func NewHistogramAggregator() *HistogramAggregator {
+ h := &HistogramAggregator{
+ Cumulative: true,
+ }
h.buckets = make(bucketsByMetrics)
h.resetCache()
@@ -77,16 +86,20 @@ var sampleConfig = `
## of accumulating the results.
reset = false
+ ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
+ ## Defaults to true.
+ cumulative = true
+
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
- # ## The set of buckets.
+ # ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
- # ## The set of buckets.
+ # ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
@@ -167,18 +180,27 @@ func (h *HistogramAggregator) groupFieldsByBuckets(
tags map[string]string,
counts []int64,
) {
- count := int64(0)
- for index, bucket := range h.getBuckets(name, field) {
- count += counts[index]
+ sum := int64(0)
+ buckets := h.getBuckets(name, field) // note that len(buckets) + 1 == len(counts)
- tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64)
- h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags))
- }
+ for index, count := range counts {
+ if !h.Cumulative {
+ sum = 0 // reset sum -> don't store cumulative counts
- count += counts[len(counts)-1]
- tags[bucketTag] = bucketInf
+ tags[bucketLeftTag] = bucketNegInf
+ if index > 0 {
+ tags[bucketLeftTag] = strconv.FormatFloat(buckets[index-1], 'f', -1, 64)
+ }
+ }
- h.groupField(metricsWithGroupedFields, name, field, count, tags)
+ tags[bucketRightTag] = bucketPosInf
+ if index < len(buckets) {
+ tags[bucketRightTag] = strconv.FormatFloat(buckets[index], 'f', -1, 64)
+ }
+
+ sum += count
+ h.groupField(metricsWithGroupedFields, name, field, sum, copyTags(tags))
+ }
}
// groupField groups field by count value
diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go
index 69423583160d1..dfb3f5d12dfa8 100644
--- a/plugins/aggregators/histogram/histogram_test.go
+++ b/plugins/aggregators/histogram/histogram_test.go
@@ -11,11 +11,15 @@ import (
"github.com/stretchr/testify/assert"
)
+type fields map[string]interface{}
+type tags map[string]string
+
// NewTestHistogram creates new test histogram aggregation with specified config
-func NewTestHistogram(cfg []config, reset bool) telegraf.Aggregator {
- htm := &HistogramAggregator{Configs: cfg, ResetBuckets: reset}
- htm.buckets = make(bucketsByMetrics)
- htm.resetCache()
+func NewTestHistogram(cfg []config, reset bool, cumulative bool) telegraf.Aggregator {
+ htm := NewHistogramAggregator()
+ htm.Configs = cfg
+ htm.ResetBuckets = reset
+ htm.Cumulative = cumulative
return htm
}
@@ -23,8 +27,8 @@ func NewTestHistogram(cfg []config, reset bool) telegraf.Aggregator {
// firstMetric1 is the first test metric
var firstMetric1, _ = metric.New(
"first_metric_name",
- map[string]string{"tag_name": "tag_value"},
- map[string]interface{}{
+ tags{},
+ fields{
"a": float64(15.3),
"b": float64(40),
},
@@ -34,8 +38,8 @@ var firstMetric1, _ = metric.New(
// firstMetric1 is the first test metric with other value
var firstMetric2, _ = metric.New(
"first_metric_name",
- map[string]string{"tag_name": "tag_value"},
- map[string]interface{}{
+ tags{},
+ fields{
"a": float64(15.9),
"c": float64(40),
},
@@ -45,8 +49,8 @@ var firstMetric2, _ = metric.New(
// secondMetric is the second metric
var secondMetric, _ = metric.New(
"second_metric_name",
- map[string]string{"tag_name": "tag_value"},
- map[string]interface{}{
+ tags{},
+ fields{
"a": float64(105),
"ignoreme": "string",
"andme": true,
@@ -65,11 +69,35 @@ func BenchmarkApply(b *testing.B) {
}
}
-// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field
-func TestHistogramWithPeriodAndOneField(t *testing.T) {
+// TestHistogram tests metrics for one period and for one field
+func TestHistogram(t *testing.T) {
+ var cfg []config
+ cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
+ histogram := NewTestHistogram(cfg, false, true)
+
+ acc := &testutil.Accumulator{}
+
+ histogram.Add(firstMetric1)
+ histogram.Reset()
+ histogram.Add(firstMetric2)
+ histogram.Push(acc)
+
+ if len(acc.Metrics) != 6 {
+ assert.Fail(t, "Incorrect number of metrics")
+ }
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "40"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: bucketPosInf})
+}
+
+// TestHistogramNonCumulative tests metrics for one period and for one field
+func TestHistogramNonCumulative(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
- histogram := NewTestHistogram(cfg, false)
+ histogram := NewTestHistogram(cfg, false, false)
acc := &testutil.Accumulator{}
@@ -81,19 +109,19 @@ func TestHistogramWithPeriodAndOneField(t *testing.T) {
if len(acc.Metrics) != 6 {
assert.Fail(t, "Incorrect number of metrics")
}
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf)
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "10"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketLeftTag: "10", bucketRightTag: "20"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "20", bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "30", bucketRightTag: "40"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "40", bucketRightTag: bucketPosInf})
}
-// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field
+// TestHistogramWithReset tests metrics for one period and for one field, with reset between metrics adding
func TestHistogramWithReset(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
- histogram := NewTestHistogram(cfg, true)
+ histogram := NewTestHistogram(cfg, true, true)
acc := &testutil.Accumulator{}
@@ -105,20 +133,53 @@ func TestHistogramWithReset(t *testing.T) {
if len(acc.Metrics) != 6 {
assert.Fail(t, "Incorrect number of metrics")
}
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "20")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "30")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "40")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, bucketInf)
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "20"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "40"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf})
+}
+
+// TestHistogramWithAllFields tests two metrics for one period and for all fields
+func TestHistogramWithAllFields(t *testing.T) {
+ var cfg []config
+ cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}})
+ cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}})
+ histogram := NewTestHistogram(cfg, false, true)
+
+ acc := &testutil.Accumulator{}
+
+ histogram.Add(firstMetric1)
+ histogram.Add(firstMetric2)
+ histogram.Add(secondMetric)
+ histogram.Push(acc)
+
+ if len(acc.Metrics) != 12 {
+ assert.Fail(t, "Incorrect number of metrics")
+ }
+
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "15.5"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: "40"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf})
+
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "4"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "10"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "23"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: bucketPosInf})
}
-// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields
-func TestHistogramWithPeriodAndAllFields(t *testing.T) {
+// TestHistogramWithAllFieldsNonCumulative tests two metrics for one period and for all fields
+func TestHistogramWithAllFieldsNonCumulative(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}})
cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}})
- histogram := NewTestHistogram(cfg, false)
+ histogram := NewTestHistogram(cfg, false, false)
acc := &testutil.Accumulator{}
@@ -131,50 +192,50 @@ func TestHistogramWithPeriodAndAllFields(t *testing.T) {
assert.Fail(t, "Incorrect number of metrics")
}
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
-
- assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0")
- assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4")
- assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10")
- assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23")
- assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30")
- assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf)
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "15.5"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "15.5", bucketRightTag: "20"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "20", bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketLeftTag: "30", bucketRightTag: "40"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "40", bucketRightTag: bucketPosInf})
+
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "4"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "4", bucketRightTag: "10"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "10", bucketRightTag: "23"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "23", bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "30", bucketRightTag: bucketPosInf})
}
-// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
+// TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
// getting added in different periods) for all fields
-func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) {
+func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
- histogram := NewTestHistogram(cfg, false)
+ histogram := NewTestHistogram(cfg, false, true)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Push(acc)
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf)
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0)}, tags{bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0)}, tags{bucketRightTag: "10"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0)}, tags{bucketRightTag: "20"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0)}, tags{bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(1)}, tags{bucketRightTag: "40"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf})
acc.ClearMetrics()
histogram.Add(firstMetric2)
histogram.Push(acc)
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
- assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "10"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "30"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: "40"})
+ assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf})
}
// TestWrongBucketsOrder tests the calling panic with incorrect order of buckets
@@ -191,35 +252,42 @@ func TestWrongBucketsOrder(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
- histogram := NewTestHistogram(cfg, false)
+ histogram := NewTestHistogram(cfg, false, true)
histogram.Add(firstMetric2)
}
// assertContainsTaggedField is help functions to test histogram data
-func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) {
+func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, tags map[string]string) {
acc.Lock()
defer acc.Unlock()
for _, checkedMetric := range acc.Metrics {
- // check metric name
+ // filter by metric name
if checkedMetric.Measurement != metricName {
continue
}
- // check "le" tag
- if checkedMetric.Tags[bucketTag] != le {
+ // filter by tags
+ isTagsIdentical := true
+ for tag := range tags {
+ if val, ok := checkedMetric.Tags[tag]; !ok || val != tags[tag] {
+ isTagsIdentical = false
+ break
+ }
+ }
+ if !isTagsIdentical {
continue
}
- // check fields
- isFieldsIdentical := true
+ // filter by field keys
+ isFieldKeysIdentical := true
for field := range fields {
if _, ok := checkedMetric.Fields[field]; !ok {
- isFieldsIdentical = false
+ isFieldKeysIdentical = false
break
}
}
- if !isFieldsIdentical {
+ if !isFieldKeysIdentical {
continue
}
@@ -228,8 +296,8 @@ func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricNa
return
}
- assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName))
+ assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", checkedMetric.Fields, metricName))
}
- assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields))
+ assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields))
}
diff --git a/plugins/aggregators/merge/README.md b/plugins/aggregators/merge/README.md
new file mode 100644
index 0000000000000..89f7f0983c692
--- /dev/null
+++ b/plugins/aggregators/merge/README.md
@@ -0,0 +1,25 @@
+# Merge Aggregator
+
+Merge metrics together into a metric with multiple fields into the most memory
+and network transfer efficient form.
+
+Use this plugin when fields are split over multiple metrics, with the same
+measurement, tag set and timestamp. By merging into a single metric they can
+be handled more efficiently by the output.
+
+### Configuration
+
+```toml
+[[aggregators.merge]]
+ ## If true, the original metric will be dropped by the
+ ## aggregator and will not get sent to the output plugins.
+ drop_original = true
+```
+
+### Example
+
+```diff
+- cpu,host=localhost usage_time=42 1567562620000000000
+- cpu,host=localhost idle_time=42 1567562620000000000
++ cpu,host=localhost idle_time=42,usage_time=42 1567562620000000000
+```
diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go
new file mode 100644
index 0000000000000..083c8fd3e6b0a
--- /dev/null
+++ b/plugins/aggregators/merge/merge.go
@@ -0,0 +1,66 @@
+package seriesgrouper
+
+import (
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/aggregators"
+)
+
+const (
+ description = "Merge metrics into multifield metrics by series key"
+ sampleConfig = `
+ ## If true, the original metric will be dropped by the
+ ## aggregator and will not get sent to the output plugins.
+ drop_original = true
+`
+)
+
+type Merge struct {
+ grouper *metric.SeriesGrouper
+ log telegraf.Logger
+}
+
+func (a *Merge) Init() error {
+ a.grouper = metric.NewSeriesGrouper()
+ return nil
+}
+
+func (a *Merge) Description() string {
+ return description
+}
+
+func (a *Merge) SampleConfig() string {
+ return sampleConfig
+}
+
+func (a *Merge) Add(m telegraf.Metric) {
+ tags := m.Tags()
+ for _, field := range m.FieldList() {
+ err := a.grouper.Add(m.Name(), tags, m.Time(), field.Key, field.Value)
+ if err != nil {
+ a.log.Errorf("Error adding metric: %v", err)
+ }
+ }
+}
+
+func (a *Merge) Push(acc telegraf.Accumulator) {
+ // Always use nanosecond precision to avoid rounding metrics that were
+ // produced at a precision higher than the agent default.
+ acc.SetPrecision(time.Nanosecond)
+
+ for _, m := range a.grouper.Metrics() {
+ acc.AddMetric(m)
+ }
+}
+
+func (a *Merge) Reset() {
+ a.grouper = metric.NewSeriesGrouper()
+}
+
+func init() {
+ aggregators.Add("merge", func() telegraf.Aggregator {
+ return &Merge{}
+ })
+}
diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go
new file mode 100644
index 0000000000000..2f2703c8f4b7c
--- /dev/null
+++ b/plugins/aggregators/merge/merge_test.go
@@ -0,0 +1,186 @@
+package seriesgrouper
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSimple(t *testing.T) {
+ plugin := &Merge{}
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ plugin.Add(
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ )
+ require.NoError(t, err)
+
+ plugin.Add(
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_guest": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ )
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ plugin.Push(&acc)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ "time_guest": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
+}
+
+func TestNanosecondPrecision(t *testing.T) {
+ plugin := &Merge{}
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ plugin.Add(
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 1),
+ ),
+ )
+ require.NoError(t, err)
+
+ plugin.Add(
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_guest": 42,
+ },
+ time.Unix(0, 1),
+ ),
+ )
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ acc.SetPrecision(time.Second)
+ plugin.Push(&acc)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ "time_guest": 42,
+ },
+ time.Unix(0, 1),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
+}
+
+func TestReset(t *testing.T) {
+ plugin := &Merge{}
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ plugin.Add(
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ )
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ plugin.Push(&acc)
+
+ plugin.Reset()
+
+ plugin.Add(
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_guest": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ )
+ require.NoError(t, err)
+
+ plugin.Push(&acc)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_guest": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
+}
diff --git a/plugins/common/encoding/decoder.go b/plugins/common/encoding/decoder.go
new file mode 100644
index 0000000000000..8bc3b7f92fe1e
--- /dev/null
+++ b/plugins/common/encoding/decoder.go
@@ -0,0 +1,34 @@
+package encoding
+
+import (
+ "errors"
+
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/encoding/unicode"
+)
+
+// NewDecoder returns a x/text Decoder for the specified text encoding. The
+// Decoder converts a character encoding into utf-8 bytes. If a BOM is found
+// it will be converted into a utf-8 BOM, you can use
+// github.com/dimchansky/utfbom to strip the BOM.
+//
+// The "none" or "" encoding will pass through bytes unchecked. Use the utf-8
+// encoding if you want invalid bytes replaced using the the unicode
+// replacement character.
+//
+// Detection of utf-16 endianness using the BOM is not currently provided due
+// to the tail input plugins requirement to be able to start at the middle or
+// end of the file.
+func NewDecoder(enc string) (*Decoder, error) {
+ switch enc {
+ case "utf-8":
+ return &Decoder{Transformer: unicode.UTF8.NewDecoder()}, nil
+ case "utf-16le":
+ return newDecoder(unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewDecoder()), nil
+ case "utf-16be":
+ return newDecoder(unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM).NewDecoder()), nil
+ case "none", "":
+ return newDecoder(encoding.Nop.NewDecoder()), nil
+ }
+ return nil, errors.New("unknown character encoding")
+}
diff --git a/plugins/common/encoding/decoder_reader.go b/plugins/common/encoding/decoder_reader.go
new file mode 100644
index 0000000000000..7324c8e72e883
--- /dev/null
+++ b/plugins/common/encoding/decoder_reader.go
@@ -0,0 +1,171 @@
+package encoding
+
+import (
+ "errors"
+ "io"
+
+ "golang.org/x/text/transform"
+)
+
+// Other than resetting r.err and r.transformComplete in Read() this
+// was copied from x/text
+
+func newDecoder(t transform.Transformer) *Decoder {
+ return &Decoder{Transformer: t}
+}
+
+// A Decoder converts bytes to UTF-8. It implements transform.Transformer.
+//
+// Transforming source bytes that are not of that encoding will not result in an
+// error per se. Each byte that cannot be transcoded will be represented in the
+// output by the UTF-8 encoding of '\uFFFD', the replacement rune.
+type Decoder struct {
+ transform.Transformer
+
+ // This forces external creators of Decoders to use names in struct
+ // initializers, allowing for future extendibility without having to break
+ // code.
+ _ struct{}
+}
+
+// Bytes converts the given encoded bytes to UTF-8. It returns the converted
+// bytes or nil, err if any error occurred.
+func (d *Decoder) Bytes(b []byte) ([]byte, error) {
+ b, _, err := transform.Bytes(d, b)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// String converts the given encoded string to UTF-8. It returns the converted
+// string or "", err if any error occurred.
+func (d *Decoder) String(s string) (string, error) {
+ s, _, err := transform.String(d, s)
+ if err != nil {
+ return "", err
+ }
+ return s, nil
+}
+
+// Reader wraps another Reader to decode its bytes.
+//
+// The Decoder may not be used for any other operation as long as the returned
+// Reader is in use.
+func (d *Decoder) Reader(r io.Reader) io.Reader {
+ return NewReader(r, d)
+}
+
+// Reader wraps another io.Reader by transforming the bytes read.
+type Reader struct {
+ r io.Reader
+ t transform.Transformer
+ err error
+
+ // dst[dst0:dst1] contains bytes that have been transformed by t but
+ // not yet copied out via Read.
+ dst []byte
+ dst0, dst1 int
+
+ // src[src0:src1] contains bytes that have been read from r but not
+ // yet transformed through t.
+ src []byte
+ src0, src1 int
+
+ // transformComplete is whether the transformation is complete,
+ // regardless of whether or not it was successful.
+ transformComplete bool
+}
+
+var (
+ // ErrShortDst means that the destination buffer was too short to
+ // receive all of the transformed bytes.
+ ErrShortDst = errors.New("transform: short destination buffer")
+
+ // ErrShortSrc means that the source buffer has insufficient data to
+ // complete the transformation.
+ ErrShortSrc = errors.New("transform: short source buffer")
+
+ // errInconsistentByteCount means that Transform returned success (nil
+ // error) but also returned nSrc inconsistent with the src argument.
+ errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
+)
+
+const defaultBufSize = 4096
+
+// NewReader returns a new Reader that wraps r by transforming the bytes read
+// via t. It calls Reset on t.
+func NewReader(r io.Reader, t transform.Transformer) *Reader {
+ t.Reset()
+ return &Reader{
+ r: r,
+ t: t,
+ dst: make([]byte, defaultBufSize),
+ src: make([]byte, defaultBufSize),
+ }
+}
+
+// Read implements the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ // Clear previous errors so a Read can be performed even if the last call
+ // returned EOF.
+ r.err = nil
+ r.transformComplete = false
+
+ n, err := 0, error(nil)
+ for {
+ // Copy out any transformed bytes and return the final error if we are done.
+ if r.dst0 != r.dst1 {
+ n = copy(p, r.dst[r.dst0:r.dst1])
+ r.dst0 += n
+ if r.dst0 == r.dst1 && r.transformComplete {
+ return n, r.err
+ }
+ return n, nil
+ } else if r.transformComplete {
+ return 0, r.err
+ }
+
+ // Try to transform some source bytes, or to flush the transformer if we
+ // are out of source bytes. We do this even if r.r.Read returned an error.
+ // As the io.Reader documentation says, "process the n > 0 bytes returned
+ // before considering the error".
+ if r.src0 != r.src1 || r.err != nil {
+ r.dst0 = 0
+ r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
+ r.src0 += n
+
+ switch {
+ case err == nil:
+ if r.src0 != r.src1 {
+ r.err = errInconsistentByteCount
+ }
+ // The Transform call was successful; we are complete if we
+ // cannot read more bytes into src.
+ r.transformComplete = r.err != nil
+ continue
+ case err == ErrShortDst && (r.dst1 != 0 || n != 0):
+ // Make room in dst by copying out, and try again.
+ continue
+ case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
+ // Read more bytes into src via the code below, and try again.
+ default:
+ r.transformComplete = true
+ // The reader error (r.err) takes precedence over the
+ // transformer error (err) unless r.err is nil or io.EOF.
+ if r.err == nil || r.err == io.EOF {
+ r.err = err
+ }
+ continue
+ }
+ }
+
+ // Move any untransformed source bytes to the start of the buffer
+ // and read more bytes.
+ if r.src0 != 0 {
+ r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
+ }
+ n, r.err = r.r.Read(r.src[r.src1:])
+ r.src1 += n
+ }
+}
diff --git a/plugins/common/encoding/decoder_test.go b/plugins/common/encoding/decoder_test.go
new file mode 100644
index 0000000000000..87115318ad0ed
--- /dev/null
+++ b/plugins/common/encoding/decoder_test.go
@@ -0,0 +1,78 @@
+package encoding
+
+import (
+ "bytes"
+ "io/ioutil"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestDecoder(t *testing.T) {
+ tests := []struct {
+ name string
+ encoding string
+ input []byte
+ expected []byte
+ expectedErr bool
+ }{
+ {
+ name: "no decoder utf-8",
+ encoding: "",
+ input: []byte("howdy"),
+ expected: []byte("howdy"),
+ },
+ {
+ name: "utf-8 decoder",
+ encoding: "utf-8",
+ input: []byte("howdy"),
+ expected: []byte("howdy"),
+ },
+ {
+ name: "utf-8 decoder invalid bytes replaced with replacement char",
+ encoding: "utf-8",
+ input: []byte("\xff\xfe"),
+ expected: []byte("\uFFFD\uFFFD"),
+ },
+ {
+ name: "utf-16le decoder no BOM",
+ encoding: "utf-16le",
+ input: []byte("h\x00o\x00w\x00d\x00y\x00"),
+ expected: []byte("howdy"),
+ },
+ {
+ name: "utf-16le decoder with BOM",
+ encoding: "utf-16le",
+ input: []byte("\xff\xfeh\x00o\x00w\x00d\x00y\x00"),
+ expected: []byte("\xef\xbb\xbfhowdy"),
+ },
+ {
+ name: "utf-16be decoder no BOM",
+ encoding: "utf-16be",
+ input: []byte("\x00h\x00o\x00w\x00d\x00y"),
+ expected: []byte("howdy"),
+ },
+ {
+ name: "utf-16be decoder with BOM",
+ encoding: "utf-16be",
+ input: []byte("\xfe\xff\x00h\x00o\x00w\x00d\x00y"),
+ expected: []byte("\xef\xbb\xbfhowdy"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ decoder, err := NewDecoder(tt.encoding)
+ require.NoError(t, err)
+ buf := bytes.NewBuffer(tt.input)
+ r := decoder.Reader(buf)
+ actual, err := ioutil.ReadAll(r)
+ if tt.expectedErr {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, actual)
+ })
+ }
+}
diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go
new file mode 100644
index 0000000000000..cd3358b3833ec
--- /dev/null
+++ b/plugins/common/kafka/sasl.go
@@ -0,0 +1,25 @@
+package kafka
+
+import (
+ "errors"
+
+ "github.com/Shopify/sarama"
+)
+
+func SASLVersion(kafkaVersion sarama.KafkaVersion, saslVersion *int) (int16, error) {
+ if saslVersion == nil {
+ if kafkaVersion.IsAtLeast(sarama.V1_0_0_0) {
+ return sarama.SASLHandshakeV1, nil
+ }
+ return sarama.SASLHandshakeV0, nil
+ }
+
+ switch *saslVersion {
+ case 0:
+ return sarama.SASLHandshakeV0, nil
+ case 1:
+ return sarama.SASLHandshakeV1, nil
+ default:
+ return 0, errors.New("invalid SASL version")
+ }
+}
diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go
new file mode 100644
index 0000000000000..a7f99023be1ba
--- /dev/null
+++ b/plugins/common/logrus/hook.go
@@ -0,0 +1,35 @@
+package logrus
+
+import (
+ "io/ioutil"
+ "log"
+ "strings"
+ "sync"
+
+ "github.com/sirupsen/logrus"
+)
+
+var once sync.Once
+
+type LogHook struct {
+}
+
+// Install a logging hook into the logrus standard logger, diverting all logs
+// through the Telegraf logger at debug level. This is useful for libraries
+// that directly log to the logrus system without providing an override method.
+func InstallHook() {
+ once.Do(func() {
+ logrus.SetOutput(ioutil.Discard)
+ logrus.AddHook(&LogHook{})
+ })
+}
+
+func (h *LogHook) Fire(entry *logrus.Entry) error {
+ msg := strings.ReplaceAll(entry.Message, "\n", " ")
+ log.Print("D! [logrus] ", msg)
+ return nil
+}
+
+func (h *LogHook) Levels() []logrus.Level {
+ return logrus.AllLevels
+}
diff --git a/plugins/common/shim/README.md b/plugins/common/shim/README.md
new file mode 100644
index 0000000000000..5453c90a4d548
--- /dev/null
+++ b/plugins/common/shim/README.md
@@ -0,0 +1,63 @@
+# Telegraf Execd Go Shim
+
+The goal of this _shim_ is to make it trivial to extract an internal input,
+processor, or output plugin from the main Telegraf repo out to a stand-alone
+repo. This allows anyone to build and run it as a separate app using one of the
+execd plugins:
+- [inputs.execd](/plugins/inputs/execd)
+- [processors.execd](/plugins/processors/execd)
+- [outputs.execd](/plugins/outputs/execd)
+
+## Steps to externalize a plugin
+
+1. Move the project to an external repo, it's recommended to preserve the path
+ structure, (but not strictly necessary). eg if your plugin was at
+ `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu`
+ in the new repo. For a further example of what this might look like, take a
+ look at [ssoroka/rand](https://github.com/ssoroka/rand) or
+ [danielnelson/telegraf-plugins](https://github.com/danielnelson/telegraf-plugins)
+1. Copy [main.go](./example/cmd/main.go) into your project under the `cmd` folder.
+ This will be the entrypoint to the plugin when run as a stand-alone program, and
+ it will call the shim code for you to make that happen. It's recommended to
+ have only one plugin per repo, as the shim is not designed to run multiple
+ plugins at the same time (it would vastly complicate things).
+1. Edit the main.go file to import your plugin. Within Telegraf this would have
+ been done in an all.go file, but here we don't split the two apart, and the change
+ just goes in the top of main.go. If you skip this step, your plugin will do nothing.
+ eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"`
+1. Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration
+ specific to your plugin. Note that this config file **must be separate from the
+ rest of the config for Telegraf, and must not be in a shared directory where
+ Telegraf is expecting to load all configs**. If Telegraf reads this config file
+ it will not know which plugin it relates to. Telegraf instead uses an execd config
+ block to look for this plugin.
+
+## Steps to build and run your plugin
+
+1. Build the cmd/main.go. For my rand project this looks like `go build -o rand cmd/main.go`
+1. If you're building an input, you can test out the binary just by running it.
+ eg `./rand -config plugin.conf`
+ Depending on your polling settings and whether you implemented a service plugin or
+ an input gathering plugin, you may see data right away, or you may have to hit enter
+ first, or wait for your poll duration to elapse, but the metrics will be written to
+ STDOUT. Ctrl-C to end your test.
+ If you're testig a processor or output manually, you can still do this but you
+ will need to feed valid metrics in on STDIN to verify that it is doing what you
+ want. This can be a very valuable debugging technique before hooking it up to
+ Telegraf.
+1. Configure Telegraf to call your new plugin binary. For an input, this would
+ look something like:
+
+```toml
+[[inputs.execd]]
+ command = ["/path/to/rand", "-config", "/path/to/plugin.conf"]
+ signal = "none"
+```
+
+ Refer to the execd plugin readmes for more information.
+
+## Congratulations!
+
+You've done it! Consider publishing your plugin to github and open a Pull Request
+back to the Telegraf repo letting us know about the availability of your
+[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md).
\ No newline at end of file
diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go
new file mode 100644
index 0000000000000..d5d1910964e7c
--- /dev/null
+++ b/plugins/common/shim/config.go
@@ -0,0 +1,171 @@
+package shim
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+
+ "github.com/BurntSushi/toml"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/outputs"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+type config struct {
+ Inputs map[string][]toml.Primitive
+ Processors map[string][]toml.Primitive
+ Outputs map[string][]toml.Primitive
+}
+
+type loadedConfig struct {
+ Input telegraf.Input
+ Processor telegraf.StreamingProcessor
+ Output telegraf.Output
+}
+
+// LoadConfig Adds plugins to the shim
+func (s *Shim) LoadConfig(filePath *string) error {
+ conf, err := LoadConfig(filePath)
+ if err != nil {
+ return err
+ }
+ if conf.Input != nil {
+ if err = s.AddInput(conf.Input); err != nil {
+ return fmt.Errorf("Failed to add Input: %w", err)
+ }
+ } else if conf.Processor != nil {
+ if err = s.AddStreamingProcessor(conf.Processor); err != nil {
+ return fmt.Errorf("Failed to add Processor: %w", err)
+ }
+ } else if conf.Output != nil {
+ if err = s.AddOutput(conf.Output); err != nil {
+ return fmt.Errorf("Failed to add Output: %w", err)
+ }
+ }
+ return nil
+}
+
+// LoadConfig loads the config and returns inputs that later need to be loaded.
+func LoadConfig(filePath *string) (loaded loadedConfig, err error) {
+ var data string
+ conf := config{}
+ if filePath != nil && *filePath != "" {
+
+ b, err := ioutil.ReadFile(*filePath)
+ if err != nil {
+ return loadedConfig{}, err
+ }
+
+ data = expandEnvVars(b)
+
+ } else {
+ conf, err = DefaultImportedPlugins()
+ if err != nil {
+ return loadedConfig{}, err
+ }
+ }
+
+ md, err := toml.Decode(data, &conf)
+ if err != nil {
+ return loadedConfig{}, err
+ }
+
+ return createPluginsWithTomlConfig(md, conf)
+}
+
+func expandEnvVars(contents []byte) string {
+ return os.Expand(string(contents), getEnv)
+}
+
+func getEnv(key string) string {
+ v := os.Getenv(key)
+
+ return envVarEscaper.Replace(v)
+}
+
+func createPluginsWithTomlConfig(md toml.MetaData, conf config) (loadedConfig, error) {
+ loadedConf := loadedConfig{}
+
+ for name, primitives := range conf.Inputs {
+ creator, ok := inputs.Inputs[name]
+ if !ok {
+ return loadedConf, errors.New("unknown input " + name)
+ }
+
+ plugin := creator()
+ if len(primitives) > 0 {
+ primitive := primitives[0]
+ if err := md.PrimitiveDecode(primitive, plugin); err != nil {
+ return loadedConf, err
+ }
+ }
+
+ loadedConf.Input = plugin
+ break
+ }
+
+ for name, primitives := range conf.Processors {
+ creator, ok := processors.Processors[name]
+ if !ok {
+ return loadedConf, errors.New("unknown processor " + name)
+ }
+
+ plugin := creator()
+ if len(primitives) > 0 {
+ primitive := primitives[0]
+ if err := md.PrimitiveDecode(primitive, plugin); err != nil {
+ return loadedConf, err
+ }
+ }
+ loadedConf.Processor = plugin
+ break
+ }
+
+ for name, primitives := range conf.Outputs {
+ creator, ok := outputs.Outputs[name]
+ if !ok {
+ return loadedConf, errors.New("unknown output " + name)
+ }
+
+ plugin := creator()
+ if len(primitives) > 0 {
+ primitive := primitives[0]
+ if err := md.PrimitiveDecode(primitive, plugin); err != nil {
+ return loadedConf, err
+ }
+ }
+ loadedConf.Output = plugin
+ break
+ }
+ return loadedConf, nil
+}
+
+// DefaultImportedPlugins defaults to whatever plugins happen to be loaded and
+// have registered themselves with the registry. This makes loading plugins
+// without having to define a config dead easy.
+func DefaultImportedPlugins() (config, error) {
+ conf := config{
+ Inputs: map[string][]toml.Primitive{},
+ Processors: map[string][]toml.Primitive{},
+ Outputs: map[string][]toml.Primitive{},
+ }
+ for name := range inputs.Inputs {
+ log.Println("No config found. Loading default config for plugin", name)
+ conf.Inputs[name] = []toml.Primitive{}
+ return conf, nil
+ }
+ for name := range processors.Processors {
+ log.Println("No config found. Loading default config for plugin", name)
+ conf.Processors[name] = []toml.Primitive{}
+ return conf, nil
+ }
+ for name := range outputs.Outputs {
+ log.Println("No config found. Loading default config for plugin", name)
+ conf.Outputs[name] = []toml.Primitive{}
+ return conf, nil
+ }
+ return conf, nil
+}
diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go
new file mode 100644
index 0000000000000..be4ee4140feb5
--- /dev/null
+++ b/plugins/common/shim/config_test.go
@@ -0,0 +1,72 @@
+package shim
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ tgConfig "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLoadConfig(t *testing.T) {
+ os.Setenv("SECRET_TOKEN", "xxxxxxxxxx")
+ os.Setenv("SECRET_VALUE", `test"\test`)
+
+ inputs.Add("test", func() telegraf.Input {
+ return &serviceInput{}
+ })
+
+ c := "./testdata/plugin.conf"
+ conf, err := LoadConfig(&c)
+ require.NoError(t, err)
+
+ inp := conf.Input.(*serviceInput)
+
+ require.Equal(t, "awesome name", inp.ServiceName)
+ require.Equal(t, "xxxxxxxxxx", inp.SecretToken)
+ require.Equal(t, `test"\test`, inp.SecretValue)
+}
+
+func TestDefaultImportedPluginsSelfRegisters(t *testing.T) {
+ inputs.Add("test", func() telegraf.Input {
+ return &testInput{}
+ })
+
+ cfg, err := LoadConfig(nil)
+ require.NoError(t, err)
+ require.Equal(t, "test", cfg.Input.Description())
+}
+
+func TestLoadingSpecialTypes(t *testing.T) {
+ inputs.Add("test", func() telegraf.Input {
+ return &testDurationInput{}
+ })
+
+ c := "./testdata/special.conf"
+ conf, err := LoadConfig(&c)
+ require.NoError(t, err)
+
+ inp := conf.Input.(*testDurationInput)
+
+ require.EqualValues(t, 3*time.Second, inp.Duration)
+ require.EqualValues(t, 3*1000*1000, inp.Size)
+}
+
+type testDurationInput struct {
+ Duration tgConfig.Duration `toml:"duration"`
+ Size tgConfig.Size `toml:"size"`
+}
+
+func (i *testDurationInput) SampleConfig() string {
+ return ""
+}
+
+func (i *testDurationInput) Description() string {
+ return ""
+}
+func (i *testDurationInput) Gather(acc telegraf.Accumulator) error {
+ return nil
+}
diff --git a/plugins/common/shim/example/cmd/main.go b/plugins/common/shim/example/cmd/main.go
new file mode 100644
index 0000000000000..4f51f7f878fb3
--- /dev/null
+++ b/plugins/common/shim/example/cmd/main.go
@@ -0,0 +1,60 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "time"
+
+ // TODO: import your plugins
+ // _ "github.com/my_github_user/my_plugin_repo/plugins/inputs/mypluginname"
+
+ "github.com/influxdata/telegraf/plugins/common/shim"
+)
+
+var pollInterval = flag.Duration("poll_interval", 1*time.Second, "how often to send metrics")
+var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "how often to send metrics")
+var configFile = flag.String("config", "", "path to the config file for this plugin")
+var err error
+
+// This is designed to be simple; Just change the import above and you're good.
+//
+// However, if you want to do all your config in code, you can like so:
+//
+// // initialize your plugin with any settngs you want
+// myInput := &mypluginname.MyPlugin{
+// DefaultSettingHere: 3,
+// }
+//
+// shim := shim.New()
+//
+// shim.AddInput(myInput)
+//
+// // now the shim.Run() call as below.
+//
+func main() {
+ // parse command line options
+ flag.Parse()
+ if *pollIntervalDisabled {
+ *pollInterval = shim.PollIntervalDisabled
+ }
+
+ // create the shim. This is what will run your plugins.
+ shim := shim.New()
+
+ // If no config is specified, all imported plugins are loaded.
+ // otherwise follow what the config asks for.
+ // Check for settings from a config toml file,
+ // (or just use whatever plugins were imported above)
+ err = shim.LoadConfig(configFile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Err loading input: %s\n", err)
+ os.Exit(1)
+ }
+
+ // run the input plugin(s) until stdin closes or we receive a termination signal
+ if err := shim.Run(*pollInterval); err != nil {
+ fmt.Fprintf(os.Stderr, "Err: %s\n", err)
+ os.Exit(1)
+ }
+}
diff --git a/plugins/common/shim/example/cmd/plugin.conf b/plugins/common/shim/example/cmd/plugin.conf
new file mode 100644
index 0000000000000..53f89a55946ca
--- /dev/null
+++ b/plugins/common/shim/example/cmd/plugin.conf
@@ -0,0 +1,2 @@
+[[inputs.my_plugin_name]]
+ value_name = "value"
diff --git a/plugins/common/shim/goshim.go b/plugins/common/shim/goshim.go
new file mode 100644
index 0000000000000..7be139194520f
--- /dev/null
+++ b/plugins/common/shim/goshim.go
@@ -0,0 +1,134 @@
+package shim
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/serializers/influx"
+)
+
+type empty struct{}
+
+var (
+ forever = 100 * 365 * 24 * time.Hour
+ envVarEscaper = strings.NewReplacer(
+ `"`, `\"`,
+ `\`, `\\`,
+ )
+)
+
+const (
+ // PollIntervalDisabled is used to indicate that you want to disable polling,
+ // as opposed to duration 0 meaning poll constantly.
+ PollIntervalDisabled = time.Duration(0)
+)
+
+// Shim allows you to wrap your inputs and run them as if they were part of Telegraf,
+// except built externally.
+type Shim struct {
+ Input telegraf.Input
+ Processor telegraf.StreamingProcessor
+ Output telegraf.Output
+
+ log *Logger
+
+ // streams
+ stdin io.Reader
+ stdout io.Writer
+ stderr io.Writer
+
+ // outgoing metric channel
+ metricCh chan telegraf.Metric
+
+ // input only
+ gatherPromptCh chan empty
+}
+
+// New creates a new shim interface
+func New() *Shim {
+ return &Shim{
+ metricCh: make(chan telegraf.Metric, 1),
+ stdin: os.Stdin,
+ stdout: os.Stdout,
+ stderr: os.Stderr,
+ log: NewLogger(),
+ }
+}
+
+func (s *Shim) watchForShutdown(cancel context.CancelFunc) {
+ quit := make(chan os.Signal, 1)
+ signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
+ go func() {
+ <-quit // user-triggered quit
+ // cancel, but keep looping until the metric channel closes.
+ cancel()
+ }()
+}
+
+// Run the input plugins..
+func (s *Shim) Run(pollInterval time.Duration) error {
+ if s.Input != nil {
+ err := s.RunInput(pollInterval)
+ if err != nil {
+ return fmt.Errorf("RunInput error: %w", err)
+ }
+ } else if s.Processor != nil {
+ err := s.RunProcessor()
+ if err != nil {
+ return fmt.Errorf("RunProcessor error: %w", err)
+ }
+ } else if s.Output != nil {
+ err := s.RunOutput()
+ if err != nil {
+ return fmt.Errorf("RunOutput error: %w", err)
+ }
+ } else {
+ return fmt.Errorf("Nothing to run")
+ }
+
+ return nil
+}
+
+func hasQuit(ctx context.Context) bool {
+ return ctx.Err() != nil
+}
+
+func (s *Shim) writeProcessedMetrics() error {
+ serializer := influx.NewSerializer()
+ for {
+ select {
+ case m, open := <-s.metricCh:
+ if !open {
+ return nil
+ }
+ b, err := serializer.Serialize(m)
+ if err != nil {
+ return fmt.Errorf("failed to serialize metric: %s", err)
+ }
+ // Write this to stdout
+ fmt.Fprint(s.stdout, string(b))
+ }
+ }
+}
+
+// LogName satisfies the MetricMaker interface
+func (s *Shim) LogName() string {
+ return ""
+}
+
+// MakeMetric satisfies the MetricMaker interface
+func (s *Shim) MakeMetric(m telegraf.Metric) telegraf.Metric {
+ return m // don't need to do anything to it.
+}
+
+// Log satisfies the MetricMaker interface
+func (s *Shim) Log() telegraf.Logger {
+ return s.log
+}
diff --git a/plugins/common/shim/goshim_test.go b/plugins/common/shim/goshim_test.go
new file mode 100644
index 0000000000000..080a513ade250
--- /dev/null
+++ b/plugins/common/shim/goshim_test.go
@@ -0,0 +1,79 @@
+package shim
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "log"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/stretchr/testify/require"
+)
+
+func TestShimSetsUpLogger(t *testing.T) {
+ stderrReader, stderrWriter := io.Pipe()
+ stdinReader, stdinWriter := io.Pipe()
+
+ runErroringInputPlugin(t, 40*time.Second, stdinReader, nil, stderrWriter)
+
+ stdinWriter.Write([]byte("\n"))
+
+ // <-metricProcessed
+
+ r := bufio.NewReader(stderrReader)
+ out, err := r.ReadString('\n')
+ require.NoError(t, err)
+ require.Contains(t, out, "Error in plugin: intentional")
+
+ stdinWriter.Close()
+}
+
+func runErroringInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdout, stderr io.Writer) (metricProcessed chan bool, exited chan bool) {
+ metricProcessed = make(chan bool, 1)
+ exited = make(chan bool, 1)
+ inp := &erroringInput{}
+
+ shim := New()
+ if stdin != nil {
+ shim.stdin = stdin
+ }
+ if stdout != nil {
+ shim.stdout = stdout
+ }
+ if stderr != nil {
+ shim.stderr = stderr
+ log.SetOutput(stderr)
+ }
+ shim.AddInput(inp)
+ go func() {
+ err := shim.Run(interval)
+ require.NoError(t, err)
+ exited <- true
+ }()
+ return metricProcessed, exited
+}
+
+type erroringInput struct {
+}
+
+func (i *erroringInput) SampleConfig() string {
+ return ""
+}
+
+func (i *erroringInput) Description() string {
+ return ""
+}
+
+func (i *erroringInput) Gather(acc telegraf.Accumulator) error {
+ acc.AddError(errors.New("intentional"))
+ return nil
+}
+
+func (i *erroringInput) Start(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func (i *erroringInput) Stop() {
+}
diff --git a/plugins/common/shim/input.go b/plugins/common/shim/input.go
new file mode 100644
index 0000000000000..a2956c3e1f1e1
--- /dev/null
+++ b/plugins/common/shim/input.go
@@ -0,0 +1,112 @@
+package shim
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/agent"
+)
+
+// AddInput adds the input to the shim. Later calls to Run() will run this input.
+func (s *Shim) AddInput(input telegraf.Input) error {
+ setLoggerOnPlugin(input, s.Log())
+ if p, ok := input.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return fmt.Errorf("failed to init input: %s", err)
+ }
+ }
+
+ s.Input = input
+ return nil
+}
+
+func (s *Shim) RunInput(pollInterval time.Duration) error {
+ // context is used only to close the stdin reader. everything else cascades
+ // from that point and closes cleanly when it's done.
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ s.watchForShutdown(cancel)
+
+ acc := agent.NewAccumulator(s, s.metricCh)
+ acc.SetPrecision(time.Nanosecond)
+
+ if serviceInput, ok := s.Input.(telegraf.ServiceInput); ok {
+ if err := serviceInput.Start(acc); err != nil {
+ return fmt.Errorf("failed to start input: %s", err)
+ }
+ }
+ s.gatherPromptCh = make(chan empty, 1)
+ go func() {
+ s.startGathering(ctx, s.Input, acc, pollInterval)
+ if serviceInput, ok := s.Input.(telegraf.ServiceInput); ok {
+ serviceInput.Stop()
+ }
+ // closing the metric channel gracefully stops writing to stdout
+ close(s.metricCh)
+ }()
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ s.writeProcessedMetrics()
+ wg.Done()
+ }()
+
+ go func() {
+ scanner := bufio.NewScanner(s.stdin)
+ for scanner.Scan() {
+ // push a non-blocking message to trigger metric collection.
+ s.pushCollectMetricsRequest()
+ }
+
+ cancel() // cancel gracefully stops gathering
+ }()
+
+ wg.Wait() // wait for writing to stdout to finish
+ return nil
+}
+
+func (s *Shim) startGathering(ctx context.Context, input telegraf.Input, acc telegraf.Accumulator, pollInterval time.Duration) {
+ if pollInterval == PollIntervalDisabled {
+ pollInterval = forever
+ }
+ t := time.NewTicker(pollInterval)
+ defer t.Stop()
+ for {
+ // give priority to stopping.
+ if hasQuit(ctx) {
+ return
+ }
+ // see what's up
+ select {
+ case <-ctx.Done():
+ return
+ case <-s.gatherPromptCh:
+ if err := input.Gather(acc); err != nil {
+ fmt.Fprintf(s.stderr, "failed to gather metrics: %s\n", err)
+ }
+ case <-t.C:
+ if err := input.Gather(acc); err != nil {
+ fmt.Fprintf(s.stderr, "failed to gather metrics: %s\n", err)
+ }
+ }
+ }
+}
+
+// pushCollectMetricsRequest pushes a non-blocking (nil) message to the
+// gatherPromptCh channel to trigger metric collection.
+// The channel is defined with a buffer of 1, so while it's full, subsequent
+// requests are discarded.
+func (s *Shim) pushCollectMetricsRequest() {
+ // push a message out to each channel to collect metrics. don't block.
+ select {
+ case s.gatherPromptCh <- empty{}:
+ default:
+ }
+}
diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go
new file mode 100644
index 0000000000000..32f97d5924bc5
--- /dev/null
+++ b/plugins/common/shim/input_test.go
@@ -0,0 +1,141 @@
+package shim
+
+import (
+ "bufio"
+ "io"
+ "io/ioutil"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/influxdata/telegraf"
+)
+
+func TestInputShimTimer(t *testing.T) {
+ stdoutReader, stdoutWriter := io.Pipe()
+
+ stdin, _ := io.Pipe() // hold the stdin pipe open
+
+ metricProcessed, _ := runInputPlugin(t, 10*time.Millisecond, stdin, stdoutWriter, nil)
+
+ <-metricProcessed
+ r := bufio.NewReader(stdoutReader)
+ out, err := r.ReadString('\n')
+ require.NoError(t, err)
+ require.Contains(t, out, "\n")
+ metricLine := strings.Split(out, "\n")[0]
+ require.Equal(t, "measurement,tag=tag field=1i 1234000005678", metricLine)
+}
+
+func TestInputShimStdinSignalingWorks(t *testing.T) {
+ stdinReader, stdinWriter := io.Pipe()
+ stdoutReader, stdoutWriter := io.Pipe()
+
+ metricProcessed, exited := runInputPlugin(t, 40*time.Second, stdinReader, stdoutWriter, nil)
+
+ stdinWriter.Write([]byte("\n"))
+
+ <-metricProcessed
+
+ r := bufio.NewReader(stdoutReader)
+ out, err := r.ReadString('\n')
+ require.NoError(t, err)
+ require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out)
+
+ stdinWriter.Close()
+ go ioutil.ReadAll(r)
+ // check that it exits cleanly
+ <-exited
+}
+
+func runInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdout, stderr io.Writer) (metricProcessed chan bool, exited chan bool) {
+ metricProcessed = make(chan bool, 1)
+ exited = make(chan bool, 1)
+ inp := &testInput{
+ metricProcessed: metricProcessed,
+ }
+
+ shim := New()
+ if stdin != nil {
+ shim.stdin = stdin
+ }
+ if stdout != nil {
+ shim.stdout = stdout
+ }
+ if stderr != nil {
+ shim.stderr = stderr
+ }
+ shim.AddInput(inp)
+ go func() {
+ err := shim.Run(interval)
+ require.NoError(t, err)
+ exited <- true
+ }()
+ return metricProcessed, exited
+}
+
+type testInput struct {
+ metricProcessed chan bool
+}
+
+func (i *testInput) SampleConfig() string {
+ return ""
+}
+
+func (i *testInput) Description() string {
+ return "test"
+}
+
+func (i *testInput) Gather(acc telegraf.Accumulator) error {
+ acc.AddFields("measurement",
+ map[string]interface{}{
+ "field": 1,
+ },
+ map[string]string{
+ "tag": "tag",
+ }, time.Unix(1234, 5678))
+ i.metricProcessed <- true
+ return nil
+}
+
+func (i *testInput) Start(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func (i *testInput) Stop() {
+}
+
+type serviceInput struct {
+ ServiceName string `toml:"service_name"`
+ SecretToken string `toml:"secret_token"`
+ SecretValue string `toml:"secret_value"`
+}
+
+func (i *serviceInput) SampleConfig() string {
+ return ""
+}
+
+func (i *serviceInput) Description() string {
+ return ""
+}
+
+func (i *serviceInput) Gather(acc telegraf.Accumulator) error {
+ acc.AddFields("measurement",
+ map[string]interface{}{
+ "field": 1,
+ },
+ map[string]string{
+ "tag": "tag",
+ }, time.Unix(1234, 5678))
+
+ return nil
+}
+
+func (i *serviceInput) Start(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func (i *serviceInput) Stop() {
+}
diff --git a/plugins/common/shim/logger.go b/plugins/common/shim/logger.go
new file mode 100644
index 0000000000000..88db63ab7d58c
--- /dev/null
+++ b/plugins/common/shim/logger.go
@@ -0,0 +1,89 @@
+package shim
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+
+ "github.com/influxdata/telegraf"
+)
+
+func init() {
+ log.SetOutput(os.Stderr)
+}
+
+// Logger defines a logging structure for plugins.
+// external plugins can only ever write to stderr and writing to stdout
+// would interfere with input/processor writing out of metrics.
+type Logger struct{}
+
+// NewLogger creates a new logger instance
+func NewLogger() *Logger {
+ return &Logger{}
+}
+
+// Errorf logs an error message, patterned after log.Printf.
+func (l *Logger) Errorf(format string, args ...interface{}) {
+ log.Printf("E! "+format, args...)
+}
+
+// Error logs an error message, patterned after log.Print.
+func (l *Logger) Error(args ...interface{}) {
+ log.Print("E! ", fmt.Sprint(args...))
+}
+
+// Debugf logs a debug message, patterned after log.Printf.
+func (l *Logger) Debugf(format string, args ...interface{}) {
+ log.Printf("D! "+format, args...)
+}
+
+// Debug logs a debug message, patterned after log.Print.
+func (l *Logger) Debug(args ...interface{}) {
+ log.Print("D! ", fmt.Sprint(args...))
+}
+
+// Warnf logs a warning message, patterned after log.Printf.
+func (l *Logger) Warnf(format string, args ...interface{}) {
+ log.Printf("W! "+format, args...)
+}
+
+// Warn logs a warning message, patterned after log.Print.
+func (l *Logger) Warn(args ...interface{}) {
+ log.Print("W! ", fmt.Sprint(args...))
+}
+
+// Infof logs an information message, patterned after log.Printf.
+func (l *Logger) Infof(format string, args ...interface{}) {
+ log.Printf("I! "+format, args...)
+}
+
+// Info logs an information message, patterned after log.Print.
+func (l *Logger) Info(args ...interface{}) {
+ log.Print("I! ", fmt.Sprint(args...))
+}
+
+// setLoggerOnPlugin injects the logger into the plugin,
+// if it defines Log telegraf.Logger. This is sort of like SetLogger but using
+// reflection instead of forcing the plugin author to define the function for it
+func setLoggerOnPlugin(i interface{}, log telegraf.Logger) {
+ valI := reflect.ValueOf(i)
+
+ if valI.Type().Kind() != reflect.Ptr {
+ valI = reflect.New(reflect.TypeOf(i))
+ }
+
+ field := valI.Elem().FieldByName("Log")
+ if !field.IsValid() {
+ return
+ }
+
+ switch field.Type().String() {
+ case "telegraf.Logger":
+ if field.CanSet() {
+ field.Set(reflect.ValueOf(log))
+ }
+ }
+
+ return
+}
diff --git a/plugins/common/shim/output.go b/plugins/common/shim/output.go
new file mode 100644
index 0000000000000..c5ce46da7f0f8
--- /dev/null
+++ b/plugins/common/shim/output.go
@@ -0,0 +1,52 @@
+package shim
+
+import (
+ "bufio"
+ "fmt"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/parsers"
+)
+
+// AddOutput adds the input to the shim. Later calls to Run() will run this.
+func (s *Shim) AddOutput(output telegraf.Output) error {
+ setLoggerOnPlugin(output, s.Log())
+ if p, ok := output.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return fmt.Errorf("failed to init input: %s", err)
+ }
+ }
+
+ s.Output = output
+ return nil
+}
+
+func (s *Shim) RunOutput() error {
+ parser, err := parsers.NewInfluxParser()
+ if err != nil {
+ return fmt.Errorf("Failed to create new parser: %w", err)
+ }
+
+ err = s.Output.Connect()
+ if err != nil {
+ return fmt.Errorf("failed to start processor: %w", err)
+ }
+ defer s.Output.Close()
+
+ var m telegraf.Metric
+
+ scanner := bufio.NewScanner(s.stdin)
+ for scanner.Scan() {
+ m, err = parser.ParseLine(scanner.Text())
+ if err != nil {
+ fmt.Fprintf(s.stderr, "Failed to parse metric: %s\n", err)
+ continue
+ }
+ if err = s.Output.Write([]telegraf.Metric{m}); err != nil {
+ fmt.Fprintf(s.stderr, "Failed to write metric: %s\n", err)
+ }
+ }
+
+ return nil
+}
diff --git a/plugins/common/shim/output_test.go b/plugins/common/shim/output_test.go
new file mode 100644
index 0000000000000..5a74d59edb240
--- /dev/null
+++ b/plugins/common/shim/output_test.go
@@ -0,0 +1,82 @@
+package shim
+
+import (
+ "io"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOutputShim(t *testing.T) {
+ o := &testOutput{}
+
+ stdinReader, stdinWriter := io.Pipe()
+
+ s := New()
+ s.stdin = stdinReader
+ err := s.AddOutput(o)
+ require.NoError(t, err)
+
+ wg := sync.WaitGroup{}
+
+ wg.Add(1)
+ go func() {
+ err := s.RunOutput()
+ require.NoError(t, err)
+ wg.Done()
+ }()
+
+ serializer, _ := serializers.NewInfluxSerializer()
+
+ m, _ := metric.New("thing",
+ map[string]string{
+ "a": "b",
+ },
+ map[string]interface{}{
+ "v": 1,
+ },
+ time.Now(),
+ )
+ b, err := serializer.Serialize(m)
+ require.NoError(t, err)
+ _, err = stdinWriter.Write(b)
+ require.NoError(t, err)
+ err = stdinWriter.Close()
+ require.NoError(t, err)
+
+ wg.Wait()
+
+ require.Len(t, o.MetricsWritten, 1)
+ mOut := o.MetricsWritten[0]
+
+ testutil.RequireMetricEqual(t, m, mOut)
+}
+
+type testOutput struct {
+ MetricsWritten []telegraf.Metric
+}
+
+func (o *testOutput) Connect() error {
+ return nil
+}
+func (o *testOutput) Close() error {
+ return nil
+}
+func (o *testOutput) Write(metrics []telegraf.Metric) error {
+ o.MetricsWritten = append(o.MetricsWritten, metrics...)
+ return nil
+}
+
+func (o *testOutput) SampleConfig() string {
+ return ""
+}
+
+func (o *testOutput) Description() string {
+ return ""
+}
diff --git a/plugins/common/shim/processor.go b/plugins/common/shim/processor.go
new file mode 100644
index 0000000000000..33dceba872759
--- /dev/null
+++ b/plugins/common/shim/processor.go
@@ -0,0 +1,71 @@
+package shim
+
+import (
+ "bufio"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/agent"
+ "github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+// AddProcessor adds the processor to the shim. Later calls to Run() will run this.
+func (s *Shim) AddProcessor(processor telegraf.Processor) error {
+ setLoggerOnPlugin(processor, s.Log())
+ p := processors.NewStreamingProcessorFromProcessor(processor)
+ return s.AddStreamingProcessor(p)
+}
+
+// AddStreamingProcessor adds the processor to the shim. Later calls to Run() will run this.
+func (s *Shim) AddStreamingProcessor(processor telegraf.StreamingProcessor) error {
+ setLoggerOnPlugin(processor, s.Log())
+ if p, ok := processor.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return fmt.Errorf("failed to init input: %s", err)
+ }
+ }
+
+ s.Processor = processor
+ return nil
+}
+
+func (s *Shim) RunProcessor() error {
+ acc := agent.NewAccumulator(s, s.metricCh)
+ acc.SetPrecision(time.Nanosecond)
+
+ parser, err := parsers.NewInfluxParser()
+ if err != nil {
+ return fmt.Errorf("Failed to create new parser: %w", err)
+ }
+
+ err = s.Processor.Start(acc)
+ if err != nil {
+ return fmt.Errorf("failed to start processor: %w", err)
+ }
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ s.writeProcessedMetrics()
+ wg.Done()
+ }()
+
+ scanner := bufio.NewScanner(s.stdin)
+ for scanner.Scan() {
+ m, err := parser.ParseLine(scanner.Text())
+ if err != nil {
+ fmt.Fprintf(s.stderr, "Failed to parse metric: %s\b", err)
+ continue
+ }
+ s.Processor.Add(m, acc)
+ }
+
+ close(s.metricCh)
+ s.Processor.Stop()
+ wg.Wait()
+ return nil
+}
diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go
new file mode 100644
index 0000000000000..b4cf01ae0236f
--- /dev/null
+++ b/plugins/common/shim/processor_test.go
@@ -0,0 +1,88 @@
+package shim
+
+import (
+ "bufio"
+ "io"
+ "io/ioutil"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/stretchr/testify/require"
+)
+
+func TestProcessorShim(t *testing.T) {
+ p := &testProcessor{}
+
+ stdinReader, stdinWriter := io.Pipe()
+ stdoutReader, stdoutWriter := io.Pipe()
+
+ s := New()
+ // inject test into shim
+ s.stdin = stdinReader
+ s.stdout = stdoutWriter
+ err := s.AddProcessor(p)
+ require.NoError(t, err)
+
+ wg := sync.WaitGroup{}
+
+ wg.Add(1)
+ go func() {
+ err := s.RunProcessor()
+ require.NoError(t, err)
+ wg.Done()
+ }()
+
+ serializer, _ := serializers.NewInfluxSerializer()
+ parser, _ := parsers.NewInfluxParser()
+
+ m, _ := metric.New("thing",
+ map[string]string{
+ "a": "b",
+ },
+ map[string]interface{}{
+ "v": 1,
+ },
+ time.Now(),
+ )
+ b, err := serializer.Serialize(m)
+ require.NoError(t, err)
+ _, err = stdinWriter.Write(b)
+ require.NoError(t, err)
+ err = stdinWriter.Close()
+ require.NoError(t, err)
+
+ r := bufio.NewReader(stdoutReader)
+ out, err := r.ReadString('\n')
+ require.NoError(t, err)
+ mOut, err := parser.ParseLine(out)
+ require.NoError(t, err)
+
+ val, ok := mOut.GetTag("hi")
+ require.True(t, ok)
+ require.Equal(t, "mom", val)
+
+ go ioutil.ReadAll(r)
+ wg.Wait()
+}
+
+type testProcessor struct{}
+
+func (p *testProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
+ for _, metric := range in {
+ metric.AddTag("hi", "mom")
+ }
+ return in
+}
+
+func (p *testProcessor) SampleConfig() string {
+ return ""
+}
+
+func (p *testProcessor) Description() string {
+ return ""
+}
diff --git a/plugins/common/shim/testdata/plugin.conf b/plugins/common/shim/testdata/plugin.conf
new file mode 100644
index 0000000000000..78dbb33a90683
--- /dev/null
+++ b/plugins/common/shim/testdata/plugin.conf
@@ -0,0 +1,4 @@
+[[inputs.test]]
+ service_name = "awesome name"
+ secret_token = "${SECRET_TOKEN}"
+ secret_value = "$SECRET_VALUE"
diff --git a/plugins/common/shim/testdata/special.conf b/plugins/common/shim/testdata/special.conf
new file mode 100644
index 0000000000000..c324b638497c5
--- /dev/null
+++ b/plugins/common/shim/testdata/special.conf
@@ -0,0 +1,4 @@
+# testing custom field types
+[[inputs.test]]
+ duration = "3s"
+ size = "3MB"
\ No newline at end of file
diff --git a/plugins/common/tls/common.go b/plugins/common/tls/common.go
new file mode 100644
index 0000000000000..1ceb20c3f4c46
--- /dev/null
+++ b/plugins/common/tls/common.go
@@ -0,0 +1,38 @@
+package tls
+
+import "crypto/tls"
+
+var tlsVersionMap = map[string]uint16{
+ "TLS10": tls.VersionTLS10,
+ "TLS11": tls.VersionTLS11,
+ "TLS12": tls.VersionTLS12,
+ "TLS13": tls.VersionTLS13,
+}
+
+var tlsCipherMap = map[string]uint16{
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
+ "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256,
+ "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384,
+ "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256,
+}
diff --git a/internal/tls/config.go b/plugins/common/tls/config.go
similarity index 74%
rename from internal/tls/config.go
rename to plugins/common/tls/config.go
index ce79583430d3e..59fbc49526745 100644
--- a/internal/tls/config.go
+++ b/plugins/common/tls/config.go
@@ -5,6 +5,7 @@ import (
"crypto/x509"
"fmt"
"io/ioutil"
+ "strings"
)
// ClientConfig represents the standard client TLS config.
@@ -25,6 +26,9 @@ type ServerConfig struct {
TLSCert string `toml:"tls_cert"`
TLSKey string `toml:"tls_key"`
TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"`
+ TLSCipherSuites []string `toml:"tls_cipher_suites"`
+ TLSMinVersion string `toml:"tls_min_version"`
+ TLSMaxVersion string `toml:"tls_max_version"`
}
// TLSConfig returns a tls.Config, may be nil without error if TLS is not
@@ -97,6 +101,38 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) {
}
}
+ if len(c.TLSCipherSuites) != 0 {
+ cipherSuites, err := ParseCiphers(c.TLSCipherSuites)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "could not parse server cipher suites %s: %v", strings.Join(c.TLSCipherSuites, ","), err)
+ }
+ tlsConfig.CipherSuites = cipherSuites
+ }
+
+ if c.TLSMaxVersion != "" {
+ version, err := ParseTLSVersion(c.TLSMaxVersion)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "could not parse tls max version %q: %v", c.TLSMaxVersion, err)
+ }
+ tlsConfig.MaxVersion = version
+ }
+
+ if c.TLSMinVersion != "" {
+ version, err := ParseTLSVersion(c.TLSMinVersion)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "could not parse tls min version %q: %v", c.TLSMinVersion, err)
+ }
+ tlsConfig.MinVersion = version
+ }
+
+ if tlsConfig.MinVersion != 0 && tlsConfig.MaxVersion != 0 && tlsConfig.MinVersion > tlsConfig.MaxVersion {
+ return nil, fmt.Errorf(
+ "tls min version %q can't be greater than tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion)
+ }
+
return tlsConfig, nil
}
diff --git a/internal/tls/config_test.go b/plugins/common/tls/config_test.go
similarity index 60%
rename from internal/tls/config_test.go
rename to plugins/common/tls/config_test.go
index 31a70d9a18ebd..93656087dfd55 100644
--- a/internal/tls/config_test.go
+++ b/plugins/common/tls/config_test.go
@@ -6,12 +6,12 @@ import (
"testing"
"time"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
-var pki = testutil.NewPKI("../../testutil/pki")
+var pki = testutil.NewPKI("../../../testutil/pki")
func TestClientConfig(t *testing.T) {
tests := []struct {
@@ -123,6 +123,47 @@ func TestServerConfig(t *testing.T) {
TLSCert: pki.ServerCertPath(),
TLSKey: pki.ServerKeyPath(),
TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CipherSuite()},
+ TLSMinVersion: pki.TLSMinVersion(),
+ TLSMaxVersion: pki.TLSMaxVersion(),
+ },
+ },
+ {
+ name: "missing tls cipher suites is okay",
+ server: tls.ServerConfig{
+ TLSCert: pki.ServerCertPath(),
+ TLSKey: pki.ServerKeyPath(),
+ TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CipherSuite()},
+ },
+ },
+ {
+ name: "missing tls max version is okay",
+ server: tls.ServerConfig{
+ TLSCert: pki.ServerCertPath(),
+ TLSKey: pki.ServerKeyPath(),
+ TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CipherSuite()},
+ TLSMaxVersion: pki.TLSMaxVersion(),
+ },
+ },
+ {
+ name: "missing tls min version is okay",
+ server: tls.ServerConfig{
+ TLSCert: pki.ServerCertPath(),
+ TLSKey: pki.ServerKeyPath(),
+ TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CipherSuite()},
+ TLSMinVersion: pki.TLSMinVersion(),
+ },
+ },
+ {
+ name: "missing tls min/max versions is okay",
+ server: tls.ServerConfig{
+ TLSCert: pki.ServerCertPath(),
+ TLSKey: pki.ServerKeyPath(),
+ TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CipherSuite()},
},
},
{
@@ -172,6 +213,56 @@ func TestServerConfig(t *testing.T) {
expNil: true,
expErr: true,
},
+ {
+ name: "invalid cipher suites",
+ server: tls.ServerConfig{
+ TLSCert: pki.ServerCertPath(),
+ TLSKey: pki.ServerKeyPath(),
+ TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CACertPath()},
+ },
+ expNil: true,
+ expErr: true,
+ },
+ {
+ name: "TLS Max Version less than TLS Min version",
+ server: tls.ServerConfig{
+ TLSCert: pki.ServerCertPath(),
+ TLSKey: pki.ServerKeyPath(),
+ TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CACertPath()},
+ TLSMinVersion: pki.TLSMaxVersion(),
+ TLSMaxVersion: pki.TLSMinVersion(),
+ },
+ expNil: true,
+ expErr: true,
+ },
+ {
+ name: "invalid tls min version",
+ server: tls.ServerConfig{
+ TLSCert: pki.ServerCertPath(),
+ TLSKey: pki.ServerKeyPath(),
+ TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CipherSuite()},
+ TLSMinVersion: pki.ServerKeyPath(),
+ TLSMaxVersion: pki.TLSMaxVersion(),
+ },
+ expNil: true,
+ expErr: true,
+ },
+ {
+ name: "invalid tls max version",
+ server: tls.ServerConfig{
+ TLSCert: pki.ServerCertPath(),
+ TLSKey: pki.ServerKeyPath(),
+ TLSAllowedCACerts: []string{pki.CACertPath()},
+ TLSCipherSuites: []string{pki.CACertPath()},
+ TLSMinVersion: pki.TLSMinVersion(),
+ TLSMaxVersion: pki.ServerCertPath(),
+ },
+ expNil: true,
+ expErr: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
diff --git a/plugins/common/tls/utils.go b/plugins/common/tls/utils.go
new file mode 100644
index 0000000000000..ddc12d2c1e5e3
--- /dev/null
+++ b/plugins/common/tls/utils.go
@@ -0,0 +1,30 @@
+package tls
+
+import (
+ "fmt"
+)
+
+// ParseCiphers returns a `[]uint16` by received `[]string` key that represents ciphers from crypto/tls.
+// If some of ciphers in received list doesn't exists ParseCiphers returns nil with error
+func ParseCiphers(ciphers []string) ([]uint16, error) {
+ suites := []uint16{}
+
+ for _, cipher := range ciphers {
+ if v, ok := tlsCipherMap[cipher]; ok {
+ suites = append(suites, v)
+ } else {
+ return nil, fmt.Errorf("unsupported cipher %q", cipher)
+ }
+ }
+
+ return suites, nil
+}
+
+// ParseTLSVersion returns a `uint16` by received version string key that represents tls version from crypto/tls.
+// If version isn't supported ParseTLSVersion returns 0 with error
+func ParseTLSVersion(version string) (uint16, error) {
+ if v, ok := tlsVersionMap[version]; ok {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unsupported version %q", version)
+}
diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md
index b60d48c91daf2..6b86615b0e6a8 100644
--- a/plugins/inputs/EXAMPLE_README.md
+++ b/plugins/inputs/EXAMPLE_README.md
@@ -1,9 +1,12 @@
# Example Input Plugin
-The example plugin gathers metrics about example things. This description
+The `example` plugin gathers metrics about example things. This description
explains at a high level what the plugin does and provides links to where
additional information can be found.
+Telegraf minimum version: Telegraf x.x
+Plugin minimum tested version: x.x
+
### Configuration
This section contains the default TOML to configure the plugin. You can
@@ -36,11 +39,15 @@ mapped to the output.
- field1 (type, unit)
- field2 (float, percent)
-- measurement2
++ measurement2
- tags:
- tag3
- fields:
- field3 (integer, bytes)
+ - field4 (integer, green=1 yellow=2 red=3)
+ - field5 (string)
+ - field6 (float)
+ - field7 (boolean)
### Sample Queries
diff --git a/plugins/inputs/activemq/README.md b/plugins/inputs/activemq/README.md
index b44d12d22f07b..aba5a7f83ec27 100644
--- a/plugins/inputs/activemq/README.md
+++ b/plugins/inputs/activemq/README.md
@@ -1,4 +1,4 @@
-# Telegraf Input Plugin: ActiveMQ
+# ActiveMQ Input Plugin
This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API.
@@ -7,12 +7,14 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console A
```toml
# Description
[[inputs.activemq]]
+ ## ActiveMQ WebConsole URL
+ url = "http://127.0.0.1:8161"
+
## Required ActiveMQ Endpoint
+ ## deprecated in 1.11; use the url option
# server = "192.168.50.10"
-
- ## Required ActiveMQ port
# port = 8161
-
+
## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
@@ -22,46 +24,41 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console A
## Maximum time to receive response.
# response_timeout = "5s"
-
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
```
-### Measurements & Fields:
+### Metrics
Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API.
-- activemq_queues:
+- activemq_queues
+ - tags:
+ - name
+ - source
+ - port
+ - fields:
- size
- consumer_count
- enqueue_count
- dequeue_count
- - activemq_topics:
++ activemq_topics
+ - tags:
+ - name
+ - source
+ - port
+ - fields:
- size
- consumer_count
- enqueue_count
- dequeue_count
- - subscribers_metrics:
- - pending_queue_size
- - dispatched_queue_size
- - dispatched_counter
- - enqueue_counter
- - dequeue_counter
-
-### Tags:
-
-- activemq_queues:
- - name
- - source
- - port
-- activemq_topics:
- - name
- - source
- - port
-- activemq_subscribers:
+- activemq_subscribers
+ - tags:
- client_id
- subscription_name
- connection_id
@@ -70,11 +67,16 @@ Every effort was made to preserve the names based on the XML response from the A
- active
- source
- port
+ - fields:
+ - pending_queue_size
+ - dispatched_queue_size
+ - dispatched_counter
+ - enqueue_counter
+ - dequeue_counter
-### Example Output:
+### Example Output
```
-$ ./telegraf -config telegraf.conf -input-filter activemq -test
activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000
activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000
activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000
diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go
index 9cc9037ed04b4..f7847f83d8d04 100644
--- a/plugins/inputs/activemq/activemq.go
+++ b/plugins/inputs/activemq/activemq.go
@@ -5,27 +5,30 @@ import (
"fmt"
"io/ioutil"
"net/http"
+ "net/url"
+ "path"
"strconv"
- "time"
-
"strings"
+ "time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type ActiveMQ struct {
- Server string `json:"server"`
- Port int `json:"port"`
- Username string `json:"username"`
- Password string `json:"password"`
- Webadmin string `json:"webadmin"`
- ResponseTimeout internal.Duration
+ Server string `toml:"server"`
+ Port int `toml:"port"`
+ URL string `toml:"url"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ Webadmin string `toml:"webadmin"`
+ ResponseTimeout internal.Duration `toml:"response_timeout"`
tls.ClientConfig
- client *http.Client
+ client *http.Client
+ baseURL *url.URL
}
type Topics struct {
@@ -79,17 +82,13 @@ type Stats struct {
DequeueCounter int `xml:"dequeueCounter,attr"`
}
-const (
- QUEUES_STATS = "queues"
- TOPICS_STATS = "topics"
- SUBSCRIBERS_STATS = "subscribers"
-)
-
var sampleConfig = `
- ## Required ActiveMQ Endpoint
- # server = "192.168.50.10"
+ ## ActiveMQ WebConsole URL
+ url = "http://127.0.0.1:8161"
- ## Required ActiveMQ port
+ ## Required ActiveMQ Endpoint
+ ## deprecated in 1.11; use the url option
+ # server = "127.0.0.1"
# port = 8161
## Credentials for basic HTTP authentication
@@ -107,6 +106,7 @@ var sampleConfig = `
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
`
func (a *ActiveMQ) Description() string {
@@ -133,32 +133,57 @@ func (a *ActiveMQ) createHttpClient() (*http.Client, error) {
return client, nil
}
-func (a *ActiveMQ) GetMetrics(keyword string) ([]byte, error) {
+func (a *ActiveMQ) Init() error {
if a.ResponseTimeout.Duration < time.Second {
a.ResponseTimeout.Duration = time.Second * 5
}
- if a.client == nil {
- client, err := a.createHttpClient()
+ var err error
+ u := &url.URL{Scheme: "http", Host: a.Server + ":" + strconv.Itoa(a.Port)}
+ if a.URL != "" {
+ u, err = url.Parse(a.URL)
if err != nil {
- return nil, err
+ return err
}
- a.client = client
}
- url := fmt.Sprintf("http://%s:%d/%s/xml/%s.jsp", a.Server, a.Port, a.Webadmin, keyword)
- req, err := http.NewRequest("GET", url, nil)
+ if !strings.HasPrefix(u.Scheme, "http") {
+ return fmt.Errorf("invalid scheme %q", u.Scheme)
+ }
+
+ if u.Hostname() == "" {
+ return fmt.Errorf("invalid hostname %q", u.Hostname())
+ }
+
+ a.baseURL = u
+
+ a.client, err = a.createHttpClient()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) {
+ req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
- req.SetBasicAuth(a.Username, a.Password)
+ if a.Username != "" || a.Password != "" {
+ req.SetBasicAuth(a.Username, a.Password)
+ }
+
resp, err := a.client.Do(req)
if err != nil {
return nil, err
}
-
defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("GET %s returned status %q", u, resp.Status)
+ }
+
return ioutil.ReadAll(resp.Body)
}
@@ -168,8 +193,8 @@ func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues)
tags := make(map[string]string)
tags["name"] = strings.TrimSpace(queue.Name)
- tags["source"] = a.Server
- tags["port"] = strconv.Itoa(a.Port)
+ tags["source"] = a.baseURL.Hostname()
+ tags["port"] = a.baseURL.Port()
records["size"] = queue.Stats.Size
records["consumer_count"] = queue.Stats.ConsumerCount
@@ -186,8 +211,8 @@ func (a *ActiveMQ) GatherTopicsMetrics(acc telegraf.Accumulator, topics Topics)
tags := make(map[string]string)
tags["name"] = topic.Name
- tags["source"] = a.Server
- tags["port"] = strconv.Itoa(a.Port)
+ tags["source"] = a.baseURL.Hostname()
+ tags["port"] = a.baseURL.Port()
records["size"] = topic.Stats.Size
records["consumer_count"] = topic.Stats.ConsumerCount
@@ -209,8 +234,8 @@ func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscriber
tags["destination_name"] = subscriber.DestinationName
tags["selector"] = subscriber.Selector
tags["active"] = subscriber.Active
- tags["source"] = a.Server
- tags["port"] = strconv.Itoa(a.Port)
+ tags["source"] = a.baseURL.Hostname()
+ tags["port"] = a.baseURL.Port()
records["pending_queue_size"] = subscriber.Stats.PendingQueueSize
records["dispatched_queue_size"] = subscriber.Stats.DispatchedQueueSize
@@ -223,25 +248,34 @@ func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscriber
}
func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error {
- dataQueues, err := a.GetMetrics(QUEUES_STATS)
+ dataQueues, err := a.GetMetrics(a.QueuesURL())
+ if err != nil {
+ return err
+ }
queues := Queues{}
err = xml.Unmarshal(dataQueues, &queues)
if err != nil {
- return err
+ return fmt.Errorf("queues XML unmarshal error: %v", err)
}
- dataTopics, err := a.GetMetrics(TOPICS_STATS)
+ dataTopics, err := a.GetMetrics(a.TopicsURL())
+ if err != nil {
+ return err
+ }
topics := Topics{}
err = xml.Unmarshal(dataTopics, &topics)
if err != nil {
- return err
+ return fmt.Errorf("topics XML unmarshal error: %v", err)
}
- dataSubscribers, err := a.GetMetrics(SUBSCRIBERS_STATS)
+ dataSubscribers, err := a.GetMetrics(a.SubscribersURL())
+ if err != nil {
+ return err
+ }
subscribers := Subscribers{}
err = xml.Unmarshal(dataSubscribers, &subscribers)
if err != nil {
- return err
+ return fmt.Errorf("subscribers XML unmarshal error: %v", err)
}
a.GatherQueuesMetrics(acc, queues)
@@ -251,11 +285,27 @@ func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error {
return nil
}
+func (a *ActiveMQ) QueuesURL() string {
+ ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/queues.jsp")}
+ return a.baseURL.ResolveReference(&ref).String()
+}
+
+func (a *ActiveMQ) TopicsURL() string {
+ ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/topics.jsp")}
+ return a.baseURL.ResolveReference(&ref).String()
+}
+
+func (a *ActiveMQ) SubscribersURL() string {
+ ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/subscribers.jsp")}
+ return a.baseURL.ResolveReference(&ref).String()
+}
+
func init() {
inputs.Add("activemq", func() telegraf.Input {
return &ActiveMQ{
- Server: "localhost",
- Port: 8161,
+ Server: "localhost",
+ Port: 8161,
+ Webadmin: "admin",
}
})
}
diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go
index c277af3c5e72c..407a381775adc 100644
--- a/plugins/inputs/activemq/activemq_test.go
+++ b/plugins/inputs/activemq/activemq_test.go
@@ -2,9 +2,12 @@ package activemq
import (
"encoding/xml"
+ "net/http"
+ "net/http/httptest"
"testing"
"github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
)
func TestGatherQueuesMetrics(t *testing.T) {
@@ -47,6 +50,7 @@ func TestGatherQueuesMetrics(t *testing.T) {
activeMQ := new(ActiveMQ)
activeMQ.Server = "localhost"
activeMQ.Port = 8161
+ activeMQ.Init()
activeMQ.GatherQueuesMetrics(&acc, queues)
acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags)
@@ -93,6 +97,7 @@ func TestGatherTopicsMetrics(t *testing.T) {
activeMQ := new(ActiveMQ)
activeMQ.Server = "localhost"
activeMQ.Port = 8161
+ activeMQ.Init()
activeMQ.GatherTopicsMetrics(&acc, topics)
acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags)
@@ -133,7 +138,43 @@ func TestGatherSubscribersMetrics(t *testing.T) {
activeMQ := new(ActiveMQ)
activeMQ.Server = "localhost"
activeMQ.Port = 8161
+ activeMQ.Init()
activeMQ.GatherSubscribersMetrics(&acc, subscribers)
acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags)
}
+
+func TestURLs(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/admin/xml/queues.jsp":
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(" "))
+ case "/admin/xml/topics.jsp":
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(" "))
+ case "/admin/xml/subscribers.jsp":
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(" "))
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ t.Fatalf("unexpected path: " + r.URL.Path)
+ }
+ })
+
+ plugin := ActiveMQ{
+ URL: "http://" + ts.Listener.Addr().String(),
+ Webadmin: "admin",
+ }
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ err = plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ require.Len(t, acc.GetTelegrafMetrics(), 0)
+}
diff --git a/plugins/inputs/aerospike/README.md b/plugins/inputs/aerospike/README.md
index 56775d908b926..66fbbe12ec8f0 100644
--- a/plugins/inputs/aerospike/README.md
+++ b/plugins/inputs/aerospike/README.md
@@ -28,11 +28,37 @@ All metrics are attempted to be cast to integers, then booleans, then strings.
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
+
+ # Feature Options
+ # Add namespace variable to limit the namespaces executed on
+ # Leave blank to do all
+ # disable_query_namespaces = true # default false
+ # namespaces = ["namespace1", "namespace2"]
+
+ # Enable set level telmetry
+ # query_sets = true # default: false
+ # Add namespace set combinations to limit sets executed on
+ # Leave blank to do all
+ # sets = ["namespace1/set1", "namespace1/set2"]
+ # sets = ["namespace1/set1", "namespace1/set2", "namespace3"]
+
+ # Histograms
+ # enable_ttl_histogram = true # default: false
+ # enable_object_size_linear_histogram = true # default: false
+
+ # by default, aerospike produces a 100 bucket histogram
+ # this is not great for most graphing tools, this will allow
+ # the ability to squash this to a smaller number of buckets
+ # To have a balanced histogram, the number of buckets chosen
+ # should divide evenly into 100.
+ # num_histogram_buckets = 100 # default: 10
+
+
```
### Measurements:
-The aerospike metrics are under two measurement names:
+The aerospike metrics are under a few measurement names:
***aerospike_node***: These are the aerospike **node** measurements, which are
available from the aerospike `statistics` command.
@@ -55,6 +81,36 @@ are available from the aerospike `namespace/` command.
namespace/
...
```
+***aerospike_set***: These are aerospike set measurements, which
+are available from the aerospike `sets//` command.
+
+ ie,
+ ```
+ telnet localhost 3003
+ sets
+ sets/
+ sets//
+ ...
+ ```
+***aerospike_histogram_ttl***: These are aerospike ttl hisogram measurements, which
+is available from the aerospike `histogram:namespace=;[set=;]type=ttl` command.
+
+ ie,
+ ```
+ telnet localhost 3003
+ histogram:namespace=;type=ttl
+ histogram:namespace=;[set=;]type=ttl
+ ...
+ ```
+***aerospike_histogram_object_size_linear***: These are aerospike object size linear histogram measurements, which is available from the aerospike `histogram:namespace=;[set=;]type=object_size_linear` command.
+
+ ie,
+ ```
+ telnet localhost 3003
+ histogram:namespace=;type=object_size_linear
+ histogram:namespace=;[set=;]type=object_size_linear
+ ...
+ ```
### Tags:
@@ -67,10 +123,23 @@ Namespace metrics have tags:
- namespace_name
+Set metrics have tags:
+
+- namespace_name
+- set_name
+
+Histogram metrics have tags:
+- namespace_name
+- set_name (optional)
+- type
+
### Example Output:
```
% telegraf --input-filter aerospike --test
> aerospike_node,aerospike_host=localhost:3000,node_name="BB9020011AC4202" batch_error=0i,batch_index_complete=0i,batch_index_created_buffers=0i,batch_index_destroyed_buffers=0i,batch_index_error=0i,batch_index_huge_buffers=0i,batch_index_initiate=0i,batch_index_queue="0:0,0:0,0:0,0:0",batch_index_timeout=0i,batch_index_unused_buffers=0i,batch_initiate=0i,batch_queue=0i,batch_timeout=0i,client_connections=6i,cluster_integrity=true,cluster_key="8AF422E05281249E",cluster_size=1i,delete_queue=0i,demarshal_error=0i,early_tsvc_batch_sub_error=0i,early_tsvc_client_error=0i,early_tsvc_udf_sub_error=0i,fabric_connections=16i,fabric_msgs_rcvd=0i,fabric_msgs_sent=0i,heartbeat_connections=0i,heartbeat_received_foreign=0i,heartbeat_received_self=0i,info_complete=47i,info_queue=0i,migrate_allowed=true,migrate_partitions_remaining=0i,migrate_progress_recv=0i,migrate_progress_send=0i,objects=0i,paxos_principal="BB9020011AC4202",proxy_in_progress=0i,proxy_retry=0i,query_long_running=0i,query_short_running=0i,reaped_fds=0i,record_refs=0i,rw_in_progress=0i,scans_active=0i,sindex_gc_activity_dur=0i,sindex_gc_garbage_cleaned=0i,sindex_gc_garbage_found=0i,sindex_gc_inactivity_dur=0i,sindex_gc_list_creation_time=0i,sindex_gc_list_deletion_time=0i,sindex_gc_locktimedout=0i,sindex_gc_objects_validated=0i,sindex_ucgarbage_found=0i,sub_objects=0i,system_free_mem_pct=92i,system_swapping=false,tsvc_queue=0i,uptime=1457i 1468923222000000000
> aerospike_namespace,aerospike_host=localhost:3000,namespace=test,node_name="BB9020011AC4202" allow_nonxdr_writes=true,allow_xdr_writes=true,available_bin_names=32768i,batch_sub_proxy_complete=0i,batch_sub_proxy_error=0i,batch_sub_proxy_timeout=0i,batch_sub_read_error=0i,batch_sub_read_not_found=0i,batch_sub_read_success=0i,batch_sub_read_timeout=0i,batch_sub_tsvc_error=0i,batch_sub_tsvc_timeout=0i,client_delete_error=0i,client_delete_not_found=0i,client_delete_success=0i,client_delete_timeout=0i,client_lang_delete_success=0i,client_lang_error=0i,client_lang_read_success=0i,client_lang_write_success=0i,client_proxy_complete=0i,client_proxy_error=0i,client_proxy_timeout=0i,client_read_error=0i,client_read_not_found=0i,client_read_success=0i,client_read_timeout=0i,client_tsvc_error=0i,client_tsvc_timeout=0i,client_udf_complete=0i,client_udf_error=0i,client_udf_timeout=0i,client_write_error=0i,client_write_success=0i,client_write_timeout=0i,cold_start_evict_ttl=4294967295i,conflict_resolution_policy="generation",current_time=206619222i,data_in_index=false,default_ttl=432000i,device_available_pct=99i,device_free_pct=100i,device_total_bytes=4294967296i,device_used_bytes=0i,disallow_null_setname=false,enable_benchmarks_batch_sub=false,enable_benchmarks_read=false,enable_benchmarks_storage=false,enable_benchmarks_udf=false,enable_benchmarks_udf_sub=false,enable_benchmarks_write=false,enable_hist_proxy=false,enable_xdr=false,evict_hist_buckets=10000i,evict_tenths_pct=5i,evict_ttl=0i,evicted_objects=0i,expired_objects=0i,fail_generation=0i,fail_key_busy=0i,fail_record_too_big=0i,fail_xdr_forbidden=0i,geo2dsphere_within.earth_radius_meters=6371000i,geo2dsphere_within.level_mod=1i,geo2dsphere_within.max_cells=12i,geo2dsphere_within.max_level=30i,geo2dsphere_within.min_level=1i,geo2dsphere_within.strict=true,geo_region_query_cells=0i,geo_region_query_falsepos=0i,geo_region_query_points=0i,geo_region_query_reqs=0i,high_water_disk_pct=50i,high_water_memory_pct=60i,hwm_breached=false,ldt_enabled=false,ldt_gc_rate=0i,ldt_page_size=8192i,master_objects=0i,master_sub_objects=0i,max_ttl=315360000i,max_void_time=0i,memory_free_pct=100i,memory_size=1073741824i,memory_used_bytes=0i,memory_used_data_bytes=0i,memory_used_index_bytes=0i,memory_used_sindex_bytes=0i,migrate_order=5i,migrate_record_receives=0i,migrate_record_retransmits=0i,migrate_records_skipped=0i,migrate_records_transmitted=0i,migrate_rx_instances=0i,migrate_rx_partitions_active=0i,migrate_rx_partitions_initial=0i,migrate_rx_partitions_remaining=0i,migrate_sleep=1i,migrate_tx_instances=0i,migrate_tx_partitions_active=0i,migrate_tx_partitions_imbalance=0i,migrate_tx_partitions_initial=0i,migrate_tx_partitions_remaining=0i,non_expirable_objects=0i,ns_forward_xdr_writes=false,nsup_cycle_duration=0i,nsup_cycle_sleep_pct=0i,objects=0i,prole_objects=0i,prole_sub_objects=0i,query_agg=0i,query_agg_abort=0i,query_agg_avg_rec_count=0i,query_agg_error=0i,query_agg_success=0i,query_fail=0i,query_long_queue_full=0i,query_long_reqs=0i,query_lookup_abort=0i,query_lookup_avg_rec_count=0i,query_lookup_error=0i,query_lookup_success=0i,query_lookups=0i,query_reqs=0i,query_short_queue_full=0i,query_short_reqs=0i,query_udf_bg_failure=0i,query_udf_bg_success=0i,read_consistency_level_override="off",repl_factor=1i,scan_aggr_abort=0i,scan_aggr_complete=0i,scan_aggr_error=0i,scan_basic_abort=0i,scan_basic_complete=0i,scan_basic_error=0i,scan_udf_bg_abort=0i,scan_udf_bg_complete=0i,scan_udf_bg_error=0i,set_deleted_objects=0i,sets_enable_xdr=true,sindex.data_max_memory="ULONG_MAX",sindex.num_partitions=32i,single_bin=false,stop_writes=false,stop_writes_pct=90i,storage_engine="device",storage_engine.cold_start_empty=false,storage_engine.data_in_memory=true,storage_engine.defrag_lwm_pct=50i,storage_engine.defrag_queue_min=0i,storage_engine.defrag_sleep=1000i,storage_engine.defrag_startup_minimum=10i,storage_engine.disable_odirect=false,storage_engine.enable_osync=false,storage_engine.file="/opt/aerospike/data/test.dat",storage_engine.filesize=4294967296i,storage_engine.flush_max_ms=1000i,storage_engine.fsync_max_sec=0i,storage_engine.max_write_cache=67108864i,storage_engine.min_avail_pct=5i,storage_engine.post_write_queue=0i,storage_engine.scheduler_mode="null",storage_engine.write_block_size=1048576i,storage_engine.write_threads=1i,sub_objects=0i,udf_sub_lang_delete_success=0i,udf_sub_lang_error=0i,udf_sub_lang_read_success=0i,udf_sub_lang_write_success=0i,udf_sub_tsvc_error=0i,udf_sub_tsvc_timeout=0i,udf_sub_udf_complete=0i,udf_sub_udf_error=0i,udf_sub_udf_timeout=0i,write_commit_level_override="off",xdr_write_error=0i,xdr_write_success=0i,xdr_write_timeout=0i,{test}_query_hist_track_back=300i,{test}_query_hist_track_slice=10i,{test}_query_hist_track_thresholds="1,8,64",{test}_read_hist_track_back=300i,{test}_read_hist_track_slice=10i,{test}_read_hist_track_thresholds="1,8,64",{test}_udf_hist_track_back=300i,{test}_udf_hist_track_slice=10i,{test}_udf_hist_track_thresholds="1,8,64",{test}_write_hist_track_back=300i,{test}_write_hist_track_slice=10i,{test}_write_hist_track_thresholds="1,8,64" 1468923222000000000
+> aerospike_set,aerospike_host=localhost:3000,node_name=BB99458B42826B0,set=test/test disable_eviction=false,memory_data_bytes=0i,objects=0i,set_enable_xdr="use-default",stop_writes_count=0i,tombstones=0i,truncate_lut=0i 1598033805000000000
+>> aerospike_histogram_ttl,aerospike_host=localhost:3000,namespace=test,node_name=BB98EE5B42826B0,set=test 0=0i,1=0i,10=0i,11=0i,12=0i,13=0i,14=0i,15=0i,16=0i,17=0i,18=0i,19=0i,2=0i,20=0i,21=0i,22=0i,23=0i,24=0i,25=0i,26=0i,27=0i,28=0i,29=0i,3=0i,30=0i,31=0i,32=0i,33=0i,34=0i,35=0i,36=0i,37=0i,38=0i,39=0i,4=0i,40=0i,41=0i,42=0i,43=0i,44=0i,45=0i,46=0i,47=0i,48=0i,49=0i,5=0i,50=0i,51=0i,52=0i,53=0i,54=0i,55=0i,56=0i,57=0i,58=0i,59=0i,6=0i,60=0i,61=0i,62=0i,63=0i,64=0i,65=0i,66=0i,67=0i,68=0i,69=0i,7=0i,70=0i,71=0i,72=0i,73=0i,74=0i,75=0i,76=0i,77=0i,78=0i,79=0i,8=0i,80=0i,81=0i,82=0i,83=0i,84=0i,85=0i,86=0i,87=0i,88=0i,89=0i,9=0i,90=0i,91=0i,92=0i,93=0i,94=0i,95=0i,96=0i,97=0i,98=0i,99=0i 1598034191000000000
+
```
diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go
index d4c4fce85f2bf..7ab15d18168f7 100644
--- a/plugins/inputs/aerospike/aerospike.go
+++ b/plugins/inputs/aerospike/aerospike.go
@@ -2,6 +2,8 @@ package aerospike
import (
"crypto/tls"
+ "fmt"
+ "math"
"net"
"strconv"
"strings"
@@ -9,7 +11,7 @@ import (
"time"
"github.com/influxdata/telegraf"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
as "github.com/aerospike/aerospike-client-go"
@@ -27,6 +29,17 @@ type Aerospike struct {
initialized bool
tlsConfig *tls.Config
+
+ DisableQueryNamespaces bool `toml:"disable_query_namespaces"`
+ Namespaces []string `toml:"namespaces"`
+
+ QuerySets bool `toml:"query_sets"`
+ Sets []string `toml:"sets"`
+
+ EnableTTLHistogram bool `toml:"enable_ttl_histogram"`
+ EnableObjectSizeLinearHistogram bool `toml:"enable_object_size_linear_histogram"`
+
+ NumberHistogramBuckets int `toml:"num_histogram_buckets"`
}
var sampleConfig = `
@@ -45,7 +58,28 @@ var sampleConfig = `
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
- `
+
+ # Feature Options
+ # Add namespace variable to limit the namespaces executed on
+ # Leave blank to do all
+ # disable_query_namespaces = true # default false
+ # namespaces = ["namespace1", "namespace2"]
+
+ # Enable set level telmetry
+ # query_sets = true # default: false
+ # Add namespace set combinations to limit sets executed on
+ # Leave blank to do all sets
+ # sets = ["namespace1/set1", "namespace1/set2", "namespace3"]
+
+ # Histograms
+ # enable_ttl_histogram = true # default: false
+ # enable_object_size_linear_histogram = true # default: false
+
+ # by default, aerospike produces a 100 bucket histogram
+ # this is not great for most graphing tools, this will allow
+ # the ability to squash this to a smaller number of buckets
+ # num_histogram_buckets = 100 # default: 10
+`
func (a *Aerospike) SampleConfig() string {
return sampleConfig
@@ -68,6 +102,14 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
a.initialized = true
}
+ if a.NumberHistogramBuckets == 0 {
+ a.NumberHistogramBuckets = 10
+ } else if a.NumberHistogramBuckets > 100 {
+ a.NumberHistogramBuckets = 100
+ } else if a.NumberHistogramBuckets < 1 {
+ a.NumberHistogramBuckets = 10
+ }
+
if len(a.Servers) == 0 {
return a.gatherServer("127.0.0.1:3000", acc)
}
@@ -85,8 +127,8 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
return nil
}
-func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
- host, port, err := net.SplitHostPort(hostport)
+func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) error {
+ host, port, err := net.SplitHostPort(hostPort)
if err != nil {
return err
}
@@ -108,51 +150,323 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
nodes := c.GetNodes()
for _, n := range nodes {
- tags := map[string]string{
- "aerospike_host": hostport,
- "node_name": n.GetName(),
+ stats, err := a.getNodeInfo(n)
+ if err != nil {
+ return err
}
- fields := make(map[string]interface{})
- stats, err := as.RequestNodeStats(n)
+ a.parseNodeInfo(stats, hostPort, n.GetName(), acc)
+
+ namespaces, err := a.getNamespaces(n)
if err != nil {
return err
}
- for k, v := range stats {
- val := parseValue(v)
- fields[strings.Replace(k, "-", "_", -1)] = val
+
+ if !a.DisableQueryNamespaces {
+ // Query Namespaces
+ for _, namespace := range namespaces {
+ stats, err = a.getNamespaceInfo(namespace, n)
+
+ if err != nil {
+ continue
+ } else {
+ a.parseNamespaceInfo(stats, hostPort, namespace, n.GetName(), acc)
+ }
+
+ if a.EnableTTLHistogram {
+ err = a.getTTLHistogram(hostPort, namespace, "", n, acc)
+ if err != nil {
+ continue
+ }
+ }
+ if a.EnableObjectSizeLinearHistogram {
+ err = a.getObjectSizeLinearHistogram(hostPort, namespace, "", n, acc)
+ if err != nil {
+ continue
+ }
+ }
+ }
}
- acc.AddFields("aerospike_node", fields, tags, time.Now())
+ if a.QuerySets {
+ namespaceSets, err := a.getSets(n)
+ if err == nil {
+ for _, namespaceSet := range namespaceSets {
+ namespace, set := splitNamespaceSet(namespaceSet)
+
+ stats, err := a.getSetInfo(namespaceSet, n)
+
+ if err != nil {
+ continue
+ } else {
+ a.parseSetInfo(stats, hostPort, namespaceSet, n.GetName(), acc)
+ }
+
+ if a.EnableTTLHistogram {
+ err = a.getTTLHistogram(hostPort, namespace, set, n, acc)
+ if err != nil {
+ continue
+ }
+ }
+
+ if a.EnableObjectSizeLinearHistogram {
+ err = a.getObjectSizeLinearHistogram(hostPort, namespace, set, n, acc)
+ if err != nil {
+ continue
+ }
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (a *Aerospike) getNodeInfo(n *as.Node) (map[string]string, error) {
+ stats, err := as.RequestNodeStats(n)
+ if err != nil {
+ return nil, err
+ }
+
+ return stats, nil
+}
+
+func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, nodeName string, acc telegraf.Accumulator) {
+ tags := map[string]string{
+ "aerospike_host": hostPort,
+ "node_name": nodeName,
+ }
+ fields := make(map[string]interface{})
+
+ for k, v := range stats {
+ val := parseValue(v)
+ fields[strings.Replace(k, "-", "_", -1)] = val
+ }
+ acc.AddFields("aerospike_node", fields, tags, time.Now())
+
+ return
+}
+
+func (a *Aerospike) getNamespaces(n *as.Node) ([]string, error) {
+ var namespaces []string
+ if len(a.Namespaces) <= 0 {
info, err := as.RequestNodeInfo(n, "namespaces")
if err != nil {
- return err
+ return namespaces, err
+ }
+ namespaces = strings.Split(info["namespaces"], ";")
+ } else {
+ namespaces = a.Namespaces
+ }
+
+ return namespaces, nil
+}
+
+func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node) (map[string]string, error) {
+ stats, err := as.RequestNodeInfo(n, "namespace/"+namespace)
+ if err != nil {
+ return nil, err
+ }
+
+ return stats, err
+}
+func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, namespace string, nodeName string, acc telegraf.Accumulator) {
+
+ nTags := map[string]string{
+ "aerospike_host": hostPort,
+ "node_name": nodeName,
+ }
+ nTags["namespace"] = namespace
+ nFields := make(map[string]interface{})
+
+ stat := strings.Split(stats["namespace/"+namespace], ";")
+ for _, pair := range stat {
+ parts := strings.Split(pair, "=")
+ if len(parts) < 2 {
+ continue
+ }
+ val := parseValue(parts[1])
+ nFields[strings.Replace(parts[0], "-", "_", -1)] = val
+ }
+ acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
+
+ return
+}
+
+func (a *Aerospike) getSets(n *as.Node) ([]string, error) {
+ var namespaceSets []string
+ // Gather all sets
+ if len(a.Sets) <= 0 {
+ stats, err := as.RequestNodeInfo(n, "sets")
+ if err != nil {
+ return namespaceSets, err
}
- namespaces := strings.Split(info["namespaces"], ";")
- for _, namespace := range namespaces {
- nTags := map[string]string{
- "aerospike_host": hostport,
- "node_name": n.GetName(),
+ stat := strings.Split(stats["sets"], ";")
+ for _, setStats := range stat {
+ // setInfo is "ns=test:set=foo:objects=1:tombstones=0"
+ if len(setStats) > 0 {
+ pairs := strings.Split(setStats, ":")
+ var ns, set string
+ for _, pair := range pairs {
+ parts := strings.Split(pair, "=")
+ if len(parts) == 2 {
+ if parts[0] == "ns" {
+ ns = parts[1]
+ }
+ if parts[0] == "set" {
+ set = parts[1]
+ }
+ }
+ }
+ if len(ns) > 0 && len(set) > 0 {
+ namespaceSets = append(namespaceSets, fmt.Sprintf("%s/%s", ns, set))
+ }
}
- nTags["namespace"] = namespace
- nFields := make(map[string]interface{})
- info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
- if err != nil {
+ }
+ } else { // User has passed in sets
+ namespaceSets = a.Sets
+ }
+
+ return namespaceSets, nil
+}
+
+func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node) (map[string]string, error) {
+ stats, err := as.RequestNodeInfo(n, "sets/"+namespaceSet)
+ if err != nil {
+ return nil, err
+ }
+ return stats, nil
+}
+
+func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, namespaceSet string, nodeName string, acc telegraf.Accumulator) {
+
+ stat := strings.Split(
+ strings.TrimSuffix(
+ stats[fmt.Sprintf("sets/%s", namespaceSet)], ";"), ":")
+ nTags := map[string]string{
+ "aerospike_host": hostPort,
+ "node_name": nodeName,
+ "set": namespaceSet,
+ }
+ nFields := make(map[string]interface{})
+ for _, part := range stat {
+ pieces := strings.Split(part, "=")
+ if len(pieces) < 2 {
+ continue
+ }
+
+ val := parseValue(pieces[1])
+ nFields[strings.Replace(pieces[0], "-", "_", -1)] = val
+ }
+ acc.AddFields("aerospike_set", nFields, nTags, time.Now())
+
+ return
+}
+
+func (a *Aerospike) getTTLHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error {
+ stats, err := a.getHistogram(namespace, set, "ttl", n)
+ if err != nil {
+ return err
+ }
+ a.parseHistogram(stats, hostPort, namespace, set, "ttl", n.GetName(), acc)
+
+ return nil
+}
+
+func (a *Aerospike) getObjectSizeLinearHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error {
+
+ stats, err := a.getHistogram(namespace, set, "object-size-linear", n)
+ if err != nil {
+ return err
+ }
+ a.parseHistogram(stats, hostPort, namespace, set, "object-size-linear", n.GetName(), acc)
+
+ return nil
+}
+
+func (a *Aerospike) getHistogram(namespace string, set string, histogramType string, n *as.Node) (map[string]string, error) {
+ var queryArg string
+ if len(set) > 0 {
+ queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v;set=%v", histogramType, namespace, set)
+ } else {
+ queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v", histogramType, namespace)
+ }
+
+ stats, err := as.RequestNodeInfo(n, queryArg)
+ if err != nil {
+ return nil, err
+ }
+ return stats, nil
+
+}
+
+func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, namespace string, set string, histogramType string, nodeName string, acc telegraf.Accumulator) {
+
+ nTags := map[string]string{
+ "aerospike_host": hostPort,
+ "node_name": nodeName,
+ "namespace": namespace,
+ }
+
+ if len(set) > 0 {
+ nTags["set"] = set
+ }
+
+ nFields := make(map[string]interface{})
+
+ for _, stat := range stats {
+ for _, part := range strings.Split(stat, ":") {
+ pieces := strings.Split(part, "=")
+ if len(pieces) < 2 {
continue
}
- stats := strings.Split(info["namespace/"+namespace], ";")
- for _, stat := range stats {
- parts := strings.Split(stat, "=")
- if len(parts) < 2 {
- continue
+
+ if pieces[0] == "buckets" {
+ buckets := strings.Split(pieces[1], ",")
+
+ // Normalize incase of less buckets than expected
+ numRecordsPerBucket := 1
+ if len(buckets) > a.NumberHistogramBuckets {
+ numRecordsPerBucket = int(math.Ceil((float64(len(buckets)) / float64(a.NumberHistogramBuckets))))
}
- val := parseValue(parts[1])
- nFields[strings.Replace(parts[0], "-", "_", -1)] = val
+
+ bucketCount := 0
+ bucketSum := int64(0) // cast to int64, as can have large object sums
+ bucketName := 0
+ for i, bucket := range buckets {
+ // Sum records and increment bucket collection counter
+ if bucketCount < numRecordsPerBucket {
+ bucketSum = bucketSum + parseValue(bucket).(int64)
+ bucketCount++
+ }
+
+ // Store records and reset counters
+ // increment bucket name
+ if bucketCount == numRecordsPerBucket {
+ nFields[strconv.Itoa(bucketName)] = bucketSum
+
+ bucketCount = 0
+ bucketSum = 0
+ bucketName++
+ } else if i == (len(buckets) - 1) {
+ // base/edge case where final bucket does not fully
+ // fill number of records per bucket
+ nFields[strconv.Itoa(bucketName)] = bucketSum
+ }
+ }
+
}
- acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
}
}
- return nil
+
+ acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.Replace(histogramType, "-", "_", -1)), nFields, nTags, time.Now())
+
+ return
+}
+
+func splitNamespaceSet(namespaceSet string) (string, string) {
+ split := strings.Split(namespaceSet, "/")
+ return split[0], split[1]
}
func parseValue(v string) interface{} {
diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go
index 724102195128e..ee69f0049f401 100644
--- a/plugins/inputs/aerospike/aerospike_test.go
+++ b/plugins/inputs/aerospike/aerospike_test.go
@@ -3,6 +3,7 @@ package aerospike
import (
"testing"
+ as "github.com/aerospike/aerospike-client-go"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -26,7 +27,11 @@ func TestAerospikeStatistics(t *testing.T) {
assert.True(t, acc.HasTag("aerospike_node", "node_name"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasTag("aerospike_namespace", "node_name"))
- assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
+ assert.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error"))
+
+ namespaceName := acc.TagValue("aerospike_namespace", "namespace")
+ assert.Equal(t, namespaceName, "test")
+
}
func TestAerospikeStatisticsPartialErr(t *testing.T) {
@@ -42,19 +47,419 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
}
var acc testutil.Accumulator
+ err := acc.GatherError(a.Gather)
+
+ require.Error(t, err)
+
+ assert.True(t, acc.HasMeasurement("aerospike_node"))
+ assert.True(t, acc.HasMeasurement("aerospike_namespace"))
+ assert.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error"))
+ namespaceName := acc.TagSetValue("aerospike_namespace", "namespace")
+ assert.Equal(t, namespaceName, "test")
+}
+
+func TestSelectNamepsaces(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping aerospike integration tests.")
+ }
+
+ // Select nonexistent namespace
+ a := &Aerospike{
+ Servers: []string{testutil.GetLocalHost() + ":3000"},
+ Namespaces: []string{"notTest"},
+ }
+
+ var acc testutil.Accumulator
+
+ err := acc.GatherError(a.Gather)
+ require.NoError(t, err)
+
+ assert.True(t, acc.HasMeasurement("aerospike_node"))
+ assert.True(t, acc.HasTag("aerospike_node", "node_name"))
+ assert.True(t, acc.HasMeasurement("aerospike_namespace"))
+ assert.True(t, acc.HasTag("aerospike_namespace", "node_name"))
+
+ // Expect only 1 namespace
+ count := 0
+ for _, p := range acc.Metrics {
+ if p.Measurement == "aerospike_namespace" {
+ count += 1
+ }
+ }
+ assert.Equal(t, count, 1)
+
+ // expect namespace to have no fields as nonexistent
+ assert.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining"))
+}
+
+func TestDisableQueryNamespaces(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping aerospike integration tests.")
+ }
+
+ a := &Aerospike{
+ Servers: []string{
+ testutil.GetLocalHost() + ":3000",
+ },
+ DisableQueryNamespaces: true,
+ }
+
+ var acc testutil.Accumulator
+ err := acc.GatherError(a.Gather)
+ require.NoError(t, err)
- require.Error(t, acc.GatherError(a.Gather))
+ assert.True(t, acc.HasMeasurement("aerospike_node"))
+ assert.False(t, acc.HasMeasurement("aerospike_namespace"))
+
+ a.DisableQueryNamespaces = false
+ err = acc.GatherError(a.Gather)
+ require.NoError(t, err)
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
- assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
+func TestQuerySets(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping aerospike integration tests.")
+ }
+
+ // create a set
+ // test is the default namespace from aerospike
+ policy := as.NewClientPolicy()
+ client, err := as.NewClientWithPolicy(policy, testutil.GetLocalHost(), 3000)
+
+ key, err := as.NewKey("test", "foo", 123)
+ require.NoError(t, err)
+ bins := as.BinMap{
+ "e": 2,
+ "pi": 3,
+ }
+ err = client.Add(nil, key, bins)
+ require.NoError(t, err)
+
+ key, err = as.NewKey("test", "bar", 1234)
+ require.NoError(t, err)
+ bins = as.BinMap{
+ "e": 2,
+ "pi": 3,
+ }
+ err = client.Add(nil, key, bins)
+ require.NoError(t, err)
+
+ a := &Aerospike{
+ Servers: []string{
+ testutil.GetLocalHost() + ":3000",
+ },
+ QuerySets: true,
+ DisableQueryNamespaces: true,
+ }
+
+ var acc testutil.Accumulator
+ err = acc.GatherError(a.Gather)
+ require.NoError(t, err)
+
+ assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo"))
+ assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar"))
+
+ assert.True(t, acc.HasMeasurement("aerospike_set"))
+ assert.True(t, acc.HasTag("aerospike_set", "set"))
+ assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes"))
+
+}
+
+func TestSelectQuerySets(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping aerospike integration tests.")
+ }
+
+ // create a set
+ // test is the default namespace from aerospike
+ policy := as.NewClientPolicy()
+ client, err := as.NewClientWithPolicy(policy, testutil.GetLocalHost(), 3000)
+
+ key, err := as.NewKey("test", "foo", 123)
+ require.NoError(t, err)
+ bins := as.BinMap{
+ "e": 2,
+ "pi": 3,
+ }
+ err = client.Add(nil, key, bins)
+ require.NoError(t, err)
+
+ key, err = as.NewKey("test", "bar", 1234)
+ require.NoError(t, err)
+ bins = as.BinMap{
+ "e": 2,
+ "pi": 3,
+ }
+ err = client.Add(nil, key, bins)
+ require.NoError(t, err)
+
+ a := &Aerospike{
+ Servers: []string{
+ testutil.GetLocalHost() + ":3000",
+ },
+ QuerySets: true,
+ Sets: []string{"test/foo"},
+ DisableQueryNamespaces: true,
+ }
+
+ var acc testutil.Accumulator
+ err = acc.GatherError(a.Gather)
+ require.NoError(t, err)
+
+ assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo"))
+ assert.False(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar"))
+
+ assert.True(t, acc.HasMeasurement("aerospike_set"))
+ assert.True(t, acc.HasTag("aerospike_set", "set"))
+ assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes"))
+
+}
+
+func TestDisableTTLHistogram(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping aerospike integration tests.")
+ }
+ a := &Aerospike{
+ Servers: []string{
+ testutil.GetLocalHost() + ":3000",
+ },
+ QuerySets: true,
+ EnableTTLHistogram: false,
+ }
+ /*
+ No measurement exists
+ */
+ var acc testutil.Accumulator
+ err := acc.GatherError(a.Gather)
+ require.NoError(t, err)
+
+ assert.False(t, acc.HasMeasurement("aerospike_histogram_ttl"))
+}
+func TestTTLHistogram(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping aerospike integration tests.")
+ } else {
+ t.Skip("Skipping, only passes if the aerospike db has been running for at least 1 hour")
+ }
+ a := &Aerospike{
+ Servers: []string{
+ testutil.GetLocalHost() + ":3000",
+ },
+ QuerySets: true,
+ EnableTTLHistogram: true,
+ }
+ /*
+ Produces histogram
+ Measurment exists
+ Has appropriate tags (node name etc)
+ Has appropriate keys (time:value)
+ may be able to leverage histogram plugin
+ */
+ var acc testutil.Accumulator
+ err := acc.GatherError(a.Gather)
+ require.NoError(t, err)
+
+ assert.True(t, acc.HasMeasurement("aerospike_histogram_ttl"))
+ assert.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test"))
+
+}
+func TestDisableObjectSizeLinearHistogram(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping aerospike integration tests.")
+ }
+ a := &Aerospike{
+ Servers: []string{
+ testutil.GetLocalHost() + ":3000",
+ },
+ QuerySets: true,
+ EnableObjectSizeLinearHistogram: false,
+ }
+ /*
+ No Measurement
+ */
+ var acc testutil.Accumulator
+ err := acc.GatherError(a.Gather)
+ require.NoError(t, err)
+
+ assert.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear"))
+}
+func TestObjectSizeLinearHistogram(t *testing.T) {
+
+ if testing.Short() {
+ t.Skip("Skipping aerospike integration tests.")
+ } else {
+ t.Skip("Skipping, only passes if the aerospike db has been running for at least 1 hour")
+ }
+ a := &Aerospike{
+ Servers: []string{
+ testutil.GetLocalHost() + ":3000",
+ },
+ QuerySets: true,
+ EnableObjectSizeLinearHistogram: true,
+ }
+ /*
+ Produces histogram
+ Measurment exists
+ Has appropriate tags (node name etc)
+ Has appropriate keys (time:value)
+
+ */
+ var acc testutil.Accumulator
+ err := acc.GatherError(a.Gather)
+ require.NoError(t, err)
+ assert.True(t, acc.HasMeasurement("aerospike_histogram_object_size_linear"))
+ assert.True(t, FindTagValue(&acc, "aerospike_histogram_object_size_linear", "namespace", "test"))
+}
+
+func TestParseNodeInfo(t *testing.T) {
+ a := &Aerospike{}
+ var acc testutil.Accumulator
+
+ stats := map[string]string{
+ "early_tsvc_from_proxy_error": "0",
+ "cluster_principal": "BB9020012AC4202",
+ "cluster_is_member": "true",
+ }
+
+ expectedFields := map[string]interface{}{
+ "early_tsvc_from_proxy_error": int64(0),
+ "cluster_principal": "BB9020012AC4202",
+ "cluster_is_member": true,
+ }
+
+ expectedTags := map[string]string{
+ "aerospike_host": "127.0.0.1:3000",
+ "node_name": "TestNodeName",
+ }
+
+ a.parseNodeInfo(stats, "127.0.0.1:3000", "TestNodeName", &acc)
+ acc.AssertContainsTaggedFields(t, "aerospike_node", expectedFields, expectedTags)
+}
+
+func TestParseNamespaceInfo(t *testing.T) {
+ a := &Aerospike{}
+ var acc testutil.Accumulator
+
+ stats := map[string]string{
+ "namespace/test": "ns_cluster_size=1;effective_replication_factor=1;objects=2;tombstones=0;master_objects=2",
+ }
+
+ expectedFields := map[string]interface{}{
+ "ns_cluster_size": int64(1),
+ "effective_replication_factor": int64(1),
+ "tombstones": int64(0),
+ "objects": int64(2),
+ "master_objects": int64(2),
+ }
+
+ expectedTags := map[string]string{
+ "aerospike_host": "127.0.0.1:3000",
+ "node_name": "TestNodeName",
+ "namespace": "test",
+ }
+
+ a.parseNamespaceInfo(stats, "127.0.0.1:3000", "test", "TestNodeName", &acc)
+ acc.AssertContainsTaggedFields(t, "aerospike_namespace", expectedFields, expectedTags)
+}
+
+func TestParseSetInfo(t *testing.T) {
+ a := &Aerospike{}
+
+ var acc testutil.Accumulator
+
+ stats := map[string]string{
+ "sets/test/foo": "objects=1:tombstones=0:memory_data_bytes=26;",
+ }
+
+ expectedFields := map[string]interface{}{
+ "objects": int64(1),
+ "tombstones": int64(0),
+ "memory_data_bytes": int64(26),
+ }
+
+ expectedTags := map[string]string{
+ "aerospike_host": "127.0.0.1:3000",
+ "node_name": "TestNodeName",
+ "set": "test/foo",
+ }
+ a.parseSetInfo(stats, "127.0.0.1:3000", "test/foo", "TestNodeName", &acc)
+ acc.AssertContainsTaggedFields(t, "aerospike_set", expectedFields, expectedTags)
+}
+
+func TestParseHistogramSet(t *testing.T) {
+ a := &Aerospike{
+ NumberHistogramBuckets: 10,
+ }
+
+ var acc testutil.Accumulator
+
+ stats := map[string]string{
+ "histogram:type=object-size-linear;namespace=test;set=foo": "units=bytes:hist-width=1048576:bucket-width=1024:buckets=0,1,3,1,6,1,9,1,12,1,15,1,18",
+ }
+
+ expectedFields := map[string]interface{}{
+ "0": int64(1),
+ "1": int64(4),
+ "2": int64(7),
+ "3": int64(10),
+ "4": int64(13),
+ "5": int64(16),
+ "6": int64(18),
+ }
+
+ expectedTags := map[string]string{
+ "aerospike_host": "127.0.0.1:3000",
+ "node_name": "TestNodeName",
+ "namespace": "test",
+ "set": "foo",
+ }
+
+ a.parseHistogram(stats, "127.0.0.1:3000", "test", "foo", "object-size-linear", "TestNodeName", &acc)
+ acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags)
+
+}
+func TestParseHistogramNamespace(t *testing.T) {
+ a := &Aerospike{
+ NumberHistogramBuckets: 10,
+ }
+
+ var acc testutil.Accumulator
+
+ stats := map[string]string{
+ "histogram:type=object-size-linear;namespace=test;set=foo": " units=bytes:hist-width=1048576:bucket-width=1024:buckets=0,1,3,1,6,1,9,1,12,1,15,1,18",
+ }
+
+ expectedFields := map[string]interface{}{
+ "0": int64(1),
+ "1": int64(4),
+ "2": int64(7),
+ "3": int64(10),
+ "4": int64(13),
+ "5": int64(16),
+ "6": int64(18),
+ }
+
+ expectedTags := map[string]string{
+ "aerospike_host": "127.0.0.1:3000",
+ "node_name": "TestNodeName",
+ "namespace": "test",
+ }
+
+ a.parseHistogram(stats, "127.0.0.1:3000", "test", "", "object-size-linear", "TestNodeName", &acc)
+ acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags)
+
+}
func TestAerospikeParseValue(t *testing.T) {
// uint64 with value bigger than int64 max
val := parseValue("18446744041841121751")
require.Equal(t, uint64(18446744041841121751), val)
+ val = parseValue("true")
+ require.Equal(t, true, val)
+
// int values
val = parseValue("42")
require.Equal(t, val, int64(42), "must be parsed as int")
@@ -63,3 +468,16 @@ func TestAerospikeParseValue(t *testing.T) {
val = parseValue("BB977942A2CA502")
require.Equal(t, val, `BB977942A2CA502`, "must be left as string")
}
+
+func FindTagValue(acc *testutil.Accumulator, measurement string, key string, value string) bool {
+ for _, p := range acc.Metrics {
+ if p.Measurement == measurement {
+ v, ok := p.Tags[key]
+ if ok && v == value {
+ return true
+ }
+
+ }
+ }
+ return false
+}
diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go
index 7c592e925b0c5..986c501ed7aa9 100644
--- a/plugins/inputs/all/all.go
+++ b/plugins/inputs/all/all.go
@@ -5,7 +5,9 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
+ _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd"
_ "github.com/influxdata/telegraf/plugins/inputs/aurora"
+ _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd"
_ "github.com/influxdata/telegraf/plugins/inputs/bind"
@@ -15,6 +17,8 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
+ _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt"
+ _ "github.com/influxdata/telegraf/plugins/inputs/clickhouse"
_ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub"
_ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push"
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
@@ -30,16 +34,23 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
+ _ "github.com/influxdata/telegraf/plugins/inputs/docker_log"
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
+ _ "github.com/influxdata/telegraf/plugins/inputs/ecs"
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
+ _ "github.com/influxdata/telegraf/plugins/inputs/ethtool"
+ _ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
+ _ "github.com/influxdata/telegraf/plugins/inputs/execd"
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
_ "github.com/influxdata/telegraf/plugins/inputs/fibaro"
_ "github.com/influxdata/telegraf/plugins/inputs/file"
_ "github.com/influxdata/telegraf/plugins/inputs/filecount"
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
+ _ "github.com/influxdata/telegraf/plugins/inputs/fireboard"
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
_ "github.com/influxdata/telegraf/plugins/inputs/github"
+ _ "github.com/influxdata/telegraf/plugins/inputs/gnmi"
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
@@ -48,8 +59,10 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/icinga2"
+ _ "github.com/influxdata/telegraf/plugins/inputs/infiniband"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener"
+ _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
@@ -69,17 +82,22 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory"
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
+ _ "github.com/influxdata/telegraf/plugins/inputs/lanz"
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
_ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs"
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
+ _ "github.com/influxdata/telegraf/plugins/inputs/logstash"
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
+ _ "github.com/influxdata/telegraf/plugins/inputs/marklogic"
_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter"
_ "github.com/influxdata/telegraf/plugins/inputs/mem"
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
+ _ "github.com/influxdata/telegraf/plugins/inputs/modbus"
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
+ _ "github.com/influxdata/telegraf/plugins/inputs/monit"
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/multifile"
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
@@ -91,15 +109,20 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api"
+ _ "github.com/influxdata/telegraf/plugins/inputs/nginx_sts"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts"
+ _ "github.com/influxdata/telegraf/plugins/inputs/nsd"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi"
+ _ "github.com/influxdata/telegraf/plugins/inputs/opcua"
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
+ _ "github.com/influxdata/telegraf/plugins/inputs/openntpd"
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
+ _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/pf"
_ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer"
@@ -109,29 +132,37 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
+ _ "github.com/influxdata/telegraf/plugins/inputs/powerdns_recursor"
_ "github.com/influxdata/telegraf/plugins/inputs/processes"
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
+ _ "github.com/influxdata/telegraf/plugins/inputs/proxmox"
_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent"
_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq"
_ "github.com/influxdata/telegraf/plugins/inputs/raindrops"
+ _ "github.com/influxdata/telegraf/plugins/inputs/redfish"
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
+ _ "github.com/influxdata/telegraf/plugins/inputs/sflow"
_ "github.com/influxdata/telegraf/plugins/inputs/smart"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
+ _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap"
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/stackdriver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
+ _ "github.com/influxdata/telegraf/plugins/inputs/suricata"
_ "github.com/influxdata/telegraf/plugins/inputs/swap"
+ _ "github.com/influxdata/telegraf/plugins/inputs/synproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/syslog"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
_ "github.com/influxdata/telegraf/plugins/inputs/system"
+ _ "github.com/influxdata/telegraf/plugins/inputs/systemd_units"
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
@@ -142,11 +173,13 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/unbound"
+ _ "github.com/influxdata/telegraf/plugins/inputs/uwsgi"
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
_ "github.com/influxdata/telegraf/plugins/inputs/vsphere"
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
_ "github.com/influxdata/telegraf/plugins/inputs/win_services"
+ _ "github.com/influxdata/telegraf/plugins/inputs/wireguard"
_ "github.com/influxdata/telegraf/plugins/inputs/wireless"
_ "github.com/influxdata/telegraf/plugins/inputs/x509_cert"
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md
index ca1af800cd7b3..8ef6d6fe2a8e9 100644
--- a/plugins/inputs/amqp_consumer/README.md
+++ b/plugins/inputs/amqp_consumer/README.md
@@ -1,6 +1,6 @@
# AMQP Consumer Input Plugin
-This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
+This plugin provides a consumer for use with AMQP 0-9-1, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
Metrics are read from a topic exchange using the configured queue and binding_key.
@@ -27,7 +27,7 @@ The following defaults are known to work with RabbitMQ:
# username = ""
# password = ""
- ## Exchange to declare and consume from.
+ ## Name of the exchange to declare. If unset, no exchange will be declared.
exchange = "telegraf"
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
@@ -41,7 +41,7 @@ The following defaults are known to work with RabbitMQ:
## Additional exchange arguments.
# exchange_arguments = { }
- # exchange_arguments = {"hash_propery" = "timestamp"}
+ # exchange_arguments = {"hash_property" = "timestamp"}
## AMQP queue name
queue = "telegraf"
@@ -49,7 +49,11 @@ The following defaults are known to work with RabbitMQ:
## AMQP queue durability can be "transient" or "durable".
queue_durability = "durable"
- ## Binding Key
+ ## If true, queue will be passively declared.
+ # queue_passive = false
+
+ ## A binding between the exchange and queue using this binding key is
+ ## created. If unset, no binding is created.
binding_key = "#"
## Maximum number of messages server should give to the worker.
@@ -77,6 +81,10 @@ The following defaults are known to work with RabbitMQ:
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
+ ## Content encoding for message payloads, can be set to "gzip" to or
+ ## "identity" to apply no encoding.
+ # content_encoding = "identity"
+
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go
index d80a3683bab09..d98b1c19f4ab3 100644
--- a/plugins/inputs/amqp_consumer/amqp_consumer.go
+++ b/plugins/inputs/amqp_consumer/amqp_consumer.go
@@ -4,14 +4,14 @@ import (
"context"
"errors"
"fmt"
- "log"
"math/rand"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/streadway/amqp"
@@ -40,6 +40,7 @@ type AMQPConsumer struct {
// Queue Name
Queue string `toml:"queue"`
QueueDurability string `toml:"queue_durability"`
+ QueuePassive bool `toml:"queue_passive"`
// Binding Key
BindingKey string `toml:"binding_key"`
@@ -52,12 +53,16 @@ type AMQPConsumer struct {
AuthMethod string
tls.ClientConfig
+ ContentEncoding string `toml:"content_encoding"`
+ Log telegraf.Logger
+
deliveries map[telegraf.TrackingID]amqp.Delivery
- parser parsers.Parser
- conn *amqp.Connection
- wg *sync.WaitGroup
- cancel context.CancelFunc
+ parser parsers.Parser
+ conn *amqp.Connection
+ wg *sync.WaitGroup
+ cancel context.CancelFunc
+ decoder internal.ContentDecoder
}
type externalAuth struct{}
@@ -97,7 +102,7 @@ func (a *AMQPConsumer) SampleConfig() string {
# username = ""
# password = ""
- ## Exchange to declare and consume from.
+ ## Name of the exchange to declare. If unset, no exchange will be declared.
exchange = "telegraf"
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
@@ -111,7 +116,7 @@ func (a *AMQPConsumer) SampleConfig() string {
## Additional exchange arguments.
# exchange_arguments = { }
- # exchange_arguments = {"hash_propery" = "timestamp"}
+ # exchange_arguments = {"hash_property" = "timestamp"}
## AMQP queue name.
queue = "telegraf"
@@ -119,7 +124,11 @@ func (a *AMQPConsumer) SampleConfig() string {
## AMQP queue durability can be "transient" or "durable".
queue_durability = "durable"
- ## Binding Key.
+ ## If true, queue will be passively declared.
+ # queue_passive = false
+
+ ## A binding between the exchange and queue using this binding key is
+ ## created. If unset, no binding is created.
binding_key = "#"
## Maximum number of messages server should give to the worker.
@@ -147,6 +156,10 @@ func (a *AMQPConsumer) SampleConfig() string {
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
+ ## Content encoding for message payloads, can be set to "gzip" to or
+ ## "identity" to apply no encoding.
+ # content_encoding = "identity"
+
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -201,6 +214,11 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
return err
}
+ a.decoder, err = internal.NewContentDecoder(a.ContentEncoding)
+ if err != nil {
+ return err
+ }
+
msgs, err := a.connect(amqpConf)
if err != nil {
return err
@@ -223,11 +241,11 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
break
}
- log.Printf("I! [inputs.amqp_consumer] connection closed: %s; trying to reconnect", err)
+ a.Log.Infof("Connection closed: %s; trying to reconnect", err)
for {
msgs, err := a.connect(amqpConf)
if err != nil {
- log.Printf("E! AMQP connection failed: %s", err)
+ a.Log.Errorf("AMQP connection failed: %s", err)
time.Sleep(10 * time.Second)
continue
}
@@ -254,14 +272,14 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
p := rand.Perm(len(brokers))
for _, n := range p {
broker := brokers[n]
- log.Printf("D! [inputs.amqp_consumer] connecting to %q", broker)
+ a.Log.Debugf("Connecting to %q", broker)
conn, err := amqp.DialConfig(broker, *amqpConf)
if err == nil {
a.conn = conn
- log.Printf("D! [inputs.amqp_consumer] connected to %q", broker)
+ a.Log.Debugf("Connected to %q", broker)
break
}
- log.Printf("D! [inputs.amqp_consumer] error connecting to %q", broker)
+ a.Log.Debugf("Error connecting to %q", broker)
}
if a.conn == nil {
@@ -270,62 +288,55 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
ch, err := a.conn.Channel()
if err != nil {
- return nil, fmt.Errorf("Failed to open a channel: %s", err)
+ return nil, fmt.Errorf("Failed to open a channel: %s", err.Error())
}
- var exchangeDurable = true
- switch a.ExchangeDurability {
- case "transient":
- exchangeDurable = false
- default:
- exchangeDurable = true
- }
+ if a.Exchange != "" {
+ var exchangeDurable = true
+ switch a.ExchangeDurability {
+ case "transient":
+ exchangeDurable = false
+ default:
+ exchangeDurable = true
+ }
- exchangeArgs := make(amqp.Table, len(a.ExchangeArguments))
- for k, v := range a.ExchangeArguments {
- exchangeArgs[k] = v
+ exchangeArgs := make(amqp.Table, len(a.ExchangeArguments))
+ for k, v := range a.ExchangeArguments {
+ exchangeArgs[k] = v
+ }
+
+ err = declareExchange(
+ ch,
+ a.Exchange,
+ a.ExchangeType,
+ a.ExchangePassive,
+ exchangeDurable,
+ exchangeArgs)
+ if err != nil {
+ return nil, err
+ }
}
- err = declareExchange(
+ q, err := declareQueue(
ch,
- a.Exchange,
- a.ExchangeType,
- a.ExchangePassive,
- exchangeDurable,
- exchangeArgs)
+ a.Queue,
+ a.QueueDurability,
+ a.QueuePassive)
if err != nil {
return nil, err
}
- var queueDurable = true
- switch a.QueueDurability {
- case "transient":
- queueDurable = false
- default:
- queueDurable = true
- }
-
- q, err := ch.QueueDeclare(
- a.Queue, // queue
- queueDurable, // durable
- false, // delete when unused
- false, // exclusive
- false, // no-wait
- nil, // arguments
- )
- if err != nil {
- return nil, fmt.Errorf("Failed to declare a queue: %s", err)
- }
-
- err = ch.QueueBind(
- q.Name, // queue
- a.BindingKey, // binding-key
- a.Exchange, // exchange
- false,
- nil,
- )
- if err != nil {
- return nil, fmt.Errorf("Failed to bind a queue: %s", err)
+ if a.BindingKey != "" {
+ err = ch.QueueBind(
+ q.Name, // queue
+ a.BindingKey, // binding-key
+ a.Exchange, // exchange
+ false,
+ nil,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("Failed to bind a queue: %s", err)
+ }
}
err = ch.Qos(
@@ -384,11 +395,53 @@ func declareExchange(
)
}
if err != nil {
- return fmt.Errorf("error declaring exchange: %v", err)
+ return fmt.Errorf("Error declaring exchange: %v", err)
}
return nil
}
+func declareQueue(
+ channel *amqp.Channel,
+ queueName string,
+ queueDurability string,
+ queuePassive bool,
+) (*amqp.Queue, error) {
+ var queue amqp.Queue
+ var err error
+
+ var queueDurable = true
+ switch queueDurability {
+ case "transient":
+ queueDurable = false
+ default:
+ queueDurable = true
+ }
+
+ if queuePassive {
+ queue, err = channel.QueueDeclarePassive(
+ queueName, // queue
+ queueDurable, // durable
+ false, // delete when unused
+ false, // exclusive
+ false, // no-wait
+ nil, // arguments
+ )
+ } else {
+ queue, err = channel.QueueDeclare(
+ queueName, // queue
+ queueDurable, // durable
+ false, // delete when unused
+ false, // exclusive
+ false, // no-wait
+ nil, // arguments
+ )
+ }
+ if err != nil {
+ return nil, fmt.Errorf("Error declaring queue: %v", err)
+ }
+ return &queue, nil
+}
+
// Read messages from queue and add them to the Accumulator
func (a *AMQPConsumer) process(ctx context.Context, msgs <-chan amqp.Delivery, ac telegraf.Accumulator) {
a.deliveries = make(map[telegraf.TrackingID]amqp.Delivery)
@@ -428,16 +481,25 @@ func (a *AMQPConsumer) process(ctx context.Context, msgs <-chan amqp.Delivery, a
}
func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delivery) error {
- metrics, err := a.parser.Parse(d.Body)
- if err != nil {
+ onError := func() {
// Discard the message from the queue; will never be able to process
// this message.
rejErr := d.Ack(false)
if rejErr != nil {
- log.Printf("E! [inputs.amqp_consumer] Unable to reject message: %d: %v",
- d.DeliveryTag, rejErr)
+ a.Log.Errorf("Unable to reject message: %d: %v", d.DeliveryTag, rejErr)
a.conn.Close()
}
+ }
+
+ body, err := a.decoder.Decode(d.Body)
+ if err != nil {
+ onError()
+ return err
+ }
+
+ metrics, err := a.parser.Parse(body)
+ if err != nil {
+ onError()
return err
}
@@ -456,15 +518,13 @@ func (a *AMQPConsumer) onDelivery(track telegraf.DeliveryInfo) bool {
if track.Delivered() {
err := delivery.Ack(false)
if err != nil {
- log.Printf("E! [inputs.amqp_consumer] Unable to ack written delivery: %d: %v",
- delivery.DeliveryTag, err)
+ a.Log.Errorf("Unable to ack written delivery: %d: %v", delivery.DeliveryTag, err)
a.conn.Close()
}
} else {
err := delivery.Reject(false)
if err != nil {
- log.Printf("E! [inputs.amqp_consumer] Unable to reject failed delivery: %d: %v",
- delivery.DeliveryTag, err)
+ a.Log.Errorf("Unable to reject failed delivery: %d: %v", delivery.DeliveryTag, err)
a.conn.Close()
}
}
@@ -478,7 +538,7 @@ func (a *AMQPConsumer) Stop() {
a.wg.Wait()
err := a.conn.Close()
if err != nil && err != amqp.ErrClosed {
- log.Printf("E! [inputs.amqp_consumer] Error closing AMQP connection: %s", err)
+ a.Log.Errorf("Error closing AMQP connection: %s", err)
return
}
}
diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go
index 1302e2d59e714..ff7341b838f75 100644
--- a/plugins/inputs/apache/apache.go
+++ b/plugins/inputs/apache/apache.go
@@ -13,7 +13,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
diff --git a/plugins/inputs/apcupsd/README.md b/plugins/inputs/apcupsd/README.md
new file mode 100644
index 0000000000000..97526d7ec3847
--- /dev/null
+++ b/plugins/inputs/apcupsd/README.md
@@ -0,0 +1,54 @@
+# APCUPSD Input Plugin
+
+This plugin reads data from an apcupsd daemon over its NIS network protocol.
+
+### Requirements
+
+apcupsd should be installed and it's daemon should be running.
+
+### Configuration
+
+```toml
+[[inputs.apcupsd]]
+ # A list of running apcupsd server to connect to.
+ # If not provided will default to tcp://127.0.0.1:3551
+ servers = ["tcp://127.0.0.1:3551"]
+
+ ## Timeout for dialing server.
+ timeout = "5s"
+```
+
+### Metrics
+
+- apcupsd
+ - tags:
+ - serial
+ - status (string representing the set status_flags)
+ - ups_name
+ - model
+ - fields:
+ - status_flags ([status-bits][])
+ - input_voltage
+ - load_percent
+ - battery_charge_percent
+ - time_left_ns
+ - output_voltage
+ - internal_temp
+ - battery_voltage
+ - input_frequency
+ - time_on_battery_ns
+ - battery_date
+ - nominal_input_voltage
+ - nominal_battery_voltage
+ - nominal_power
+ - firmware
+
+
+
+### Example output
+
+```
+apcupsd,serial=AS1231515,status=ONLINE,ups_name=name1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,input_voltage=230.4,battery_charge_percent=100,status_flags=8i 1490035922000000000
+```
+
+[status-bits]: http://www.apcupsd.org/manual/manual.html#status-bits
diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go
new file mode 100644
index 0000000000000..a862bbfc881f8
--- /dev/null
+++ b/plugins/inputs/apcupsd/apcupsd.go
@@ -0,0 +1,114 @@
+package apcupsd
+
+import (
+ "context"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/mdlayher/apcupsd"
+)
+
+const defaultAddress = "tcp://127.0.0.1:3551"
+
+var defaultTimeout = internal.Duration{Duration: time.Duration(time.Second * 5)}
+
+type ApcUpsd struct {
+ Servers []string
+ Timeout internal.Duration
+}
+
+func (*ApcUpsd) Description() string {
+ return "Monitor APC UPSes connected to apcupsd"
+}
+
+var sampleConfig = `
+ # A list of running apcupsd server to connect to.
+ # If not provided will default to tcp://127.0.0.1:3551
+ servers = ["tcp://127.0.0.1:3551"]
+
+ ## Timeout for dialing server.
+ timeout = "5s"
+`
+
+func (*ApcUpsd) SampleConfig() string {
+ return sampleConfig
+}
+
+func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error {
+ ctx := context.Background()
+
+ for _, addr := range h.Servers {
+ addrBits, err := url.Parse(addr)
+ if err != nil {
+ return err
+ }
+ if addrBits.Scheme == "" {
+ addrBits.Scheme = "tcp"
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, h.Timeout.Duration)
+ defer cancel()
+
+ status, err := fetchStatus(ctx, addrBits)
+ if err != nil {
+ return err
+ }
+
+ tags := map[string]string{
+ "serial": status.SerialNumber,
+ "ups_name": status.UPSName,
+ "status": status.Status,
+ "model": status.Model,
+ }
+
+ flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64)
+ if err != nil {
+ return err
+ }
+
+ fields := map[string]interface{}{
+ "status_flags": flags,
+ "input_voltage": status.LineVoltage,
+ "load_percent": status.LoadPercent,
+ "battery_charge_percent": status.BatteryChargePercent,
+ "time_left_ns": status.TimeLeft.Nanoseconds(),
+ "output_voltage": status.OutputVoltage,
+ "internal_temp": status.InternalTemp,
+ "battery_voltage": status.BatteryVoltage,
+ "input_frequency": status.LineFrequency,
+ "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(),
+ "nominal_input_voltage": status.NominalInputVoltage,
+ "nominal_battery_voltage": status.NominalBatteryVoltage,
+ "nominal_power": status.NominalPower,
+ "firmware": status.Firmware,
+ "battery_date": status.BatteryDate,
+ }
+
+ acc.AddFields("apcupsd", fields, tags)
+ }
+ return nil
+}
+
+func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsd.Status, error) {
+ client, err := apcupsd.DialContext(ctx, addr.Scheme, addr.Host)
+ if err != nil {
+ return nil, err
+ }
+ defer client.Close()
+
+ return client.Status()
+}
+
+func init() {
+ inputs.Add("apcupsd", func() telegraf.Input {
+ return &ApcUpsd{
+ Servers: []string{defaultAddress},
+ Timeout: defaultTimeout,
+ }
+ })
+}
diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go
new file mode 100644
index 0000000000000..e749d5137daba
--- /dev/null
+++ b/plugins/inputs/apcupsd/apcupsd_test.go
@@ -0,0 +1,235 @@
+package apcupsd
+
+import (
+ "context"
+ "encoding/binary"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestApcupsdDocs(t *testing.T) {
+ apc := &ApcUpsd{}
+ apc.Description()
+ apc.SampleConfig()
+}
+
+func TestApcupsdInit(t *testing.T) {
+ input, ok := inputs.Inputs["apcupsd"]
+ if !ok {
+ t.Fatal("Input not defined")
+ }
+
+ _ = input().(*ApcUpsd)
+}
+
+func listen(ctx context.Context, t *testing.T, out [][]byte) (string, error) {
+ lc := net.ListenConfig{}
+ ln, err := lc.Listen(ctx, "tcp4", "127.0.0.1:0")
+ if err != nil {
+ return "", err
+ }
+
+ go func() {
+ for ctx.Err() == nil {
+ defer ln.Close()
+
+ conn, err := ln.Accept()
+ if err != nil {
+ continue
+ }
+ defer conn.Close()
+ conn.SetReadDeadline(time.Now().Add(time.Second))
+
+ in := make([]byte, 128)
+ n, err := conn.Read(in)
+ require.NoError(t, err, "failed to read from connection")
+
+ status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'}
+ want, got := status, in[:n]
+ require.Equal(t, want, got)
+
+ // Run against test function and append EOF to end of output bytes
+ out = append(out, []byte{0, 0})
+
+ for _, o := range out {
+ _, err := conn.Write(o)
+ require.NoError(t, err, "failed to write to connection")
+ }
+ }
+ }()
+
+ return ln.Addr().String(), nil
+}
+
+func TestConfig(t *testing.T) {
+ apc := &ApcUpsd{Timeout: defaultTimeout}
+
+ var (
+ tests = []struct {
+ name string
+ servers []string
+ err bool
+ }{
+ {
+ name: "test listen address no scheme",
+ servers: []string{"127.0.0.1:1234"},
+ err: true,
+ },
+ {
+ name: "test no port",
+ servers: []string{"127.0.0.3"},
+ err: true,
+ },
+ }
+
+ acc testutil.Accumulator
+ )
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ apc.Servers = tt.servers
+
+ err := apc.Gather(&acc)
+ if tt.err {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+
+}
+
+func TestApcupsdGather(t *testing.T) {
+ apc := &ApcUpsd{Timeout: defaultTimeout}
+
+ var (
+ tests = []struct {
+ name string
+ err bool
+ tags map[string]string
+ fields map[string]interface{}
+ out func() [][]byte
+ }{
+ {
+ name: "test listening server with output",
+ err: false,
+ tags: map[string]string{
+ "serial": "ABC123",
+ "status": "ONLINE",
+ "ups_name": "BERTHA",
+ "model": "Model 12345",
+ },
+ fields: map[string]interface{}{
+ "status_flags": uint64(8),
+ "battery_charge_percent": float64(0),
+ "battery_voltage": float64(0),
+ "input_frequency": float64(0),
+ "input_voltage": float64(0),
+ "internal_temp": float64(0),
+ "load_percent": float64(13),
+ "output_voltage": float64(0),
+ "time_left_ns": int64(2790000000000),
+ "time_on_battery_ns": int64(0),
+ "nominal_input_voltage": float64(230),
+ "nominal_battery_voltage": float64(12),
+ "nominal_power": int(865),
+ "firmware": string("857.L3 .I USB FW:L3"),
+ "battery_date": time.Date(2016, time.September, 06, 0, 0, 0, 0, time.UTC),
+ },
+ out: genOutput,
+ },
+ {
+ name: "test with bad output",
+ err: true,
+ out: genBadOutput,
+ },
+ }
+
+ acc testutil.Accumulator
+ )
+
+ for _, tt := range tests {
+
+ t.Run(tt.name, func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ lAddr, err := listen(ctx, t, tt.out())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ apc.Servers = []string{"tcp://" + lAddr}
+
+ err = apc.Gather(&acc)
+ if tt.err {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ acc.AssertContainsTaggedFields(t, "apcupsd", tt.fields, tt.tags)
+ }
+ cancel()
+ })
+ }
+}
+
+// The following functionality is straight from apcupsd tests.
+
+// kvBytes is a helper to generate length and key/value byte buffers.
+func kvBytes(kv string) ([]byte, []byte) {
+ lenb := make([]byte, 2)
+ binary.BigEndian.PutUint16(lenb, uint16(len(kv)))
+
+ return lenb, []byte(kv)
+}
+
+func genOutput() [][]byte {
+ kvs := []string{
+ "SERIALNO : ABC123",
+ "STATUS : ONLINE",
+ "STATFLAG : 0x08 Status Flag",
+ "UPSNAME : BERTHA",
+ "MODEL : Model 12345",
+ "DATE : 2016-09-06 22:13:28 -0400",
+ "HOSTNAME : example",
+ "LOADPCT : 13.0 Percent Load Capacity",
+ "BATTDATE : 2016-09-06",
+ "TIMELEFT : 46.5 Minutes",
+ "TONBATT : 0 seconds",
+ "NUMXFERS : 0",
+ "SELFTEST : NO",
+ "NOMINV : 230 Volts",
+ "NOMBATTV : 12.0 Volts",
+ "NOMPOWER : 865 Watts",
+ "FIRMWARE : 857.L3 .I USB FW:L3",
+ }
+
+ var out [][]byte
+ for _, kv := range kvs {
+ lenb, kvb := kvBytes(kv)
+ out = append(out, lenb)
+ out = append(out, kvb)
+ }
+
+ return out
+}
+
+func genBadOutput() [][]byte {
+ kvs := []string{
+ "STATFLAG : 0x08Status Flag",
+ }
+
+ var out [][]byte
+ for _, kv := range kvs {
+ lenb, kvb := kvBytes(kv)
+ out = append(out, lenb)
+ out = append(out, kvb)
+ }
+
+ return out
+}
diff --git a/plugins/inputs/aurora/aurora.go b/plugins/inputs/aurora/aurora.go
index 9a5cafa537a0c..fc6f82aadda17 100644
--- a/plugins/inputs/aurora/aurora.go
+++ b/plugins/inputs/aurora/aurora.go
@@ -12,7 +12,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
diff --git a/plugins/inputs/azure_storage_queue/README.md b/plugins/inputs/azure_storage_queue/README.md
new file mode 100644
index 0000000000000..905e85e4cdea6
--- /dev/null
+++ b/plugins/inputs/azure_storage_queue/README.md
@@ -0,0 +1,35 @@
+# Azure Storage Queue Input Plugin
+
+This plugin gathers sizes of Azure Storage Queues.
+
+### Configuration:
+
+```toml
+# Description
+[[inputs.azure_storage_queue]]
+ ## Required Azure Storage Account name
+ account_name = "mystorageaccount"
+
+ ## Required Azure Storage Account access key
+ account_key = "storageaccountaccesskey"
+
+ ## Set to false to disable peeking age of oldest message (executes faster)
+ # peek_oldest_message_age = true
+```
+
+### Metrics
+- azure_storage_queues
+ - tags:
+ - queue
+ - account
+ - fields:
+ - size (integer, count)
+ - oldest_message_age_ns (integer, nanoseconds) Age of message at the head of the queue.
+ Requires `peek_oldest_message_age` to be configured to `true`.
+
+### Example Output
+
+```
+azure_storage_queues,queue=myqueue,account=mystorageaccount oldest_message_age=799714900i,size=7i 1565970503000000000
+azure_storage_queues,queue=myemptyqueue,account=mystorageaccount size=0i 1565970502000000000
+```
\ No newline at end of file
diff --git a/plugins/inputs/azure_storage_queue/azure_storage_queue.go b/plugins/inputs/azure_storage_queue/azure_storage_queue.go
new file mode 100644
index 0000000000000..6d132a5ef0171
--- /dev/null
+++ b/plugins/inputs/azure_storage_queue/azure_storage_queue.go
@@ -0,0 +1,134 @@
+package azure_storage_queue
+
+import (
+ "context"
+ "errors"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-storage-queue-go/azqueue"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type AzureStorageQueue struct {
+ StorageAccountName string `toml:"account_name"`
+ StorageAccountKey string `toml:"account_key"`
+ PeekOldestMessageAge bool `toml:"peek_oldest_message_age"`
+ Log telegraf.Logger
+
+ serviceURL *azqueue.ServiceURL
+}
+
+var sampleConfig = `
+ ## Required Azure Storage Account name
+ account_name = "mystorageaccount"
+
+ ## Required Azure Storage Account access key
+ account_key = "storageaccountaccesskey"
+
+ ## Set to false to disable peeking age of oldest message (executes faster)
+ # peek_oldest_message_age = true
+ `
+
+func (a *AzureStorageQueue) Description() string {
+ return "Gather Azure Storage Queue metrics"
+}
+
+func (a *AzureStorageQueue) SampleConfig() string {
+ return sampleConfig
+}
+
+func (a *AzureStorageQueue) Init() error {
+ if a.StorageAccountName == "" {
+ return errors.New("account_name must be configured")
+ }
+
+ if a.StorageAccountKey == "" {
+ return errors.New("account_key must be configured")
+ }
+ return nil
+}
+
+func (a *AzureStorageQueue) GetServiceURL() (azqueue.ServiceURL, error) {
+ if a.serviceURL == nil {
+ _url, err := url.Parse("https://" + a.StorageAccountName + ".queue.core.windows.net")
+ if err != nil {
+ return azqueue.ServiceURL{}, err
+ }
+
+ credential, err := azqueue.NewSharedKeyCredential(a.StorageAccountName, a.StorageAccountKey)
+ if err != nil {
+ return azqueue.ServiceURL{}, err
+ }
+
+ pipeline := azqueue.NewPipeline(credential, azqueue.PipelineOptions{})
+
+ serviceURL := azqueue.NewServiceURL(*_url, pipeline)
+ a.serviceURL = &serviceURL
+ }
+ return *a.serviceURL, nil
+}
+
+func (a *AzureStorageQueue) GatherQueueMetrics(acc telegraf.Accumulator, queueItem azqueue.QueueItem, properties *azqueue.QueueGetPropertiesResponse, peekedMessage *azqueue.PeekedMessage) {
+ fields := make(map[string]interface{})
+ tags := make(map[string]string)
+ tags["queue"] = strings.TrimSpace(queueItem.Name)
+ tags["account"] = a.StorageAccountName
+ fields["size"] = properties.ApproximateMessagesCount()
+ if peekedMessage != nil {
+ fields["oldest_message_age_ns"] = time.Now().UnixNano() - peekedMessage.InsertionTime.UnixNano()
+ }
+ acc.AddFields("azure_storage_queues", fields, tags)
+}
+
+func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error {
+ serviceURL, err := a.GetServiceURL()
+ if err != nil {
+ return err
+ }
+
+ ctx := context.TODO()
+
+ for marker := (azqueue.Marker{}); marker.NotDone(); {
+ a.Log.Debugf("Listing queues of storage account '%s'", a.StorageAccountName)
+ queuesSegment, err := serviceURL.ListQueuesSegment(ctx, marker,
+ azqueue.ListQueuesSegmentOptions{
+ Detail: azqueue.ListQueuesSegmentDetails{Metadata: false},
+ })
+ if err != nil {
+ return err
+ }
+ marker = queuesSegment.NextMarker
+
+ for _, queueItem := range queuesSegment.QueueItems {
+ a.Log.Debugf("Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName)
+ queueURL := serviceURL.NewQueueURL(queueItem.Name)
+ properties, err := queueURL.GetProperties(ctx)
+ if err != nil {
+ a.Log.Errorf("Error getting properties for queue %s: %s", queueItem.Name, err.Error())
+ continue
+ }
+ var peekedMessage *azqueue.PeekedMessage
+ if a.PeekOldestMessageAge {
+ messagesURL := queueURL.NewMessagesURL()
+ messagesResponse, err := messagesURL.Peek(ctx, 1)
+ if err != nil {
+ a.Log.Errorf("Error peeking queue %s: %s", queueItem.Name, err.Error())
+ } else if messagesResponse.NumMessages() > 0 {
+ peekedMessage = messagesResponse.Message(0)
+ }
+ }
+
+ a.GatherQueueMetrics(acc, queueItem, properties, peekedMessage)
+ }
+ }
+ return nil
+}
+
+func init() {
+ inputs.Add("azure_storage_queue", func() telegraf.Input {
+ return &AzureStorageQueue{PeekOldestMessageAge: true}
+ })
+}
diff --git a/plugins/inputs/bcache/README.md b/plugins/inputs/bcache/README.md
index bda8b02574438..11d567ec5616b 100644
--- a/plugins/inputs/bcache/README.md
+++ b/plugins/inputs/bcache/README.md
@@ -1,4 +1,4 @@
-# Telegraf plugin: bcache
+# bcache Input Plugin
Get bcache stat from stats_total directory and dirty_data file.
@@ -55,7 +55,7 @@ cache_readaheads
Using this configuration:
-```
+```toml
[bcache]
# Bcache sets path
# If not specified, then default is:
diff --git a/plugins/inputs/bind/README.md b/plugins/inputs/bind/README.md
index 34d419d3a1809..e3bcf6a75b252 100644
--- a/plugins/inputs/bind/README.md
+++ b/plugins/inputs/bind/README.md
@@ -77,7 +77,7 @@ for more information.
These are some useful queries (to generate dashboards or other) to run against data from this
plugin:
-```
+```sql
SELECT non_negative_derivative(mean(/^A$|^PTR$/), 5m) FROM bind_counter \
WHERE "url" = 'localhost:8053' AND "type" = 'qtype' AND time > now() - 1h \
GROUP BY time(5m), "type"
diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go
index b961d549db051..6ed953b691dd3 100644
--- a/plugins/inputs/bind/bind_test.go
+++ b/plugins/inputs/bind/bind_test.go
@@ -7,7 +7,6 @@ import (
"testing"
"github.com/influxdata/telegraf/testutil"
-
"github.com/stretchr/testify/assert"
)
@@ -26,7 +25,7 @@ func TestBindJsonStats(t *testing.T) {
var acc testutil.Accumulator
err := acc.GatherError(b.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
// Use subtests for counters, since they are similar structure
type fieldSet struct {
@@ -48,6 +47,36 @@ func TestBindJsonStats(t *testing.T) {
{"STATUS", 0},
},
},
+ {
+ "rcode",
+ []fieldSet{
+ {"NOERROR", 1732},
+ {"FORMERR", 0},
+ {"SERVFAIL", 6},
+ {"NXDOMAIN", 200},
+ {"NOTIMP", 0},
+ {"REFUSED", 6},
+ {"REFUSED", 0},
+ {"YXDOMAIN", 0},
+ {"YXRRSET", 0},
+ {"NXRRSET", 0},
+ {"NOTAUTH", 0},
+ {"NOTZONE", 0},
+ {"RESERVED11", 0},
+ {"RESERVED12", 0},
+ {"RESERVED13", 0},
+ {"RESERVED14", 0},
+ {"RESERVED15", 0},
+ {"BADVERS", 0},
+ {"17", 0},
+ {"18", 0},
+ {"19", 0},
+ {"20", 0},
+ {"21", 0},
+ {"22", 0},
+ {"BADCOOKIE", 0},
+ },
+ },
{
"qtype",
[]fieldSet{
@@ -97,6 +126,14 @@ func TestBindJsonStats(t *testing.T) {
{"TCP6Open", 2},
},
},
+ {
+ "zonestat",
+ []fieldSet{
+ {"NotifyOutv4", 8},
+ {"NotifyInv4", 5},
+ {"SOAOutv4", 5},
+ },
+ },
}
for _, tc := range testCases {
@@ -127,20 +164,19 @@ func TestBindJsonStats(t *testing.T) {
}
fields := map[string]interface{}{
- "block_size": 13893632,
- "context_size": 3685480,
- "in_use": 3064368,
- "lost": 0,
- "total_use": 18206566,
+ "block_size": int64(13893632),
+ "context_size": int64(3685480),
+ "in_use": int64(3064368),
+ "lost": int64(0),
+ "total_use": int64(18206566),
}
-
acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags)
})
// Subtest for per-context memory stats
t.Run("memory_context", func(t *testing.T) {
- assert.True(t, acc.HasIntField("bind_memory_context", "total"))
- assert.True(t, acc.HasIntField("bind_memory_context", "in_use"))
+ assert.True(t, acc.HasInt64Field("bind_memory_context", "total"))
+ assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use"))
})
}
@@ -159,7 +195,7 @@ func TestBindXmlStatsV2(t *testing.T) {
var acc testutil.Accumulator
err := acc.GatherError(b.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
// Use subtests for counters, since they are similar structure
type fieldSet struct {
@@ -329,11 +365,11 @@ func TestBindXmlStatsV2(t *testing.T) {
}
fields := map[string]interface{}{
- "block_size": 77070336,
- "context_size": 6663840,
- "in_use": 20772579,
- "lost": 0,
- "total_use": 81804609,
+ "block_size": int64(77070336),
+ "context_size": int64(6663840),
+ "in_use": int64(20772579),
+ "lost": int64(0),
+ "total_use": int64(81804609),
}
acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags)
@@ -341,8 +377,8 @@ func TestBindXmlStatsV2(t *testing.T) {
// Subtest for per-context memory stats
t.Run("memory_context", func(t *testing.T) {
- assert.True(t, acc.HasIntField("bind_memory_context", "total"))
- assert.True(t, acc.HasIntField("bind_memory_context", "in_use"))
+ assert.True(t, acc.HasInt64Field("bind_memory_context", "total"))
+ assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use"))
})
}
@@ -361,7 +397,7 @@ func TestBindXmlStatsV3(t *testing.T) {
var acc testutil.Accumulator
err := acc.GatherError(b.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
// Use subtests for counters, since they are similar structure
type fieldSet struct {
@@ -553,11 +589,11 @@ func TestBindXmlStatsV3(t *testing.T) {
}
fields := map[string]interface{}{
- "block_size": 45875200,
- "context_size": 10037400,
- "in_use": 6000232,
- "lost": 0,
- "total_use": 777821909,
+ "block_size": int64(45875200),
+ "context_size": int64(10037400),
+ "in_use": int64(6000232),
+ "lost": int64(0),
+ "total_use": int64(777821909),
}
acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags)
@@ -565,8 +601,8 @@ func TestBindXmlStatsV3(t *testing.T) {
// Subtest for per-context memory stats
t.Run("memory_context", func(t *testing.T) {
- assert.True(t, acc.HasIntField("bind_memory_context", "total"))
- assert.True(t, acc.HasIntField("bind_memory_context", "in_use"))
+ assert.True(t, acc.HasInt64Field("bind_memory_context", "total"))
+ assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use"))
})
}
diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go
index 95c7e6fe893bf..87b6065e2eb1c 100644
--- a/plugins/inputs/bind/json_stats.go
+++ b/plugins/inputs/bind/json_stats.go
@@ -16,6 +16,8 @@ import (
type jsonStats struct {
OpCodes map[string]int
QTypes map[string]int
+ RCodes map[string]int
+ ZoneStats map[string]int
NSStats map[string]int
SockStats map[string]int
Views map[string]jsonView
@@ -23,16 +25,16 @@ type jsonStats struct {
}
type jsonMemory struct {
- TotalUse int
- InUse int
- BlockSize int
- ContextSize int
- Lost int
+ TotalUse int64
+ InUse int64
+ BlockSize int64
+ ContextSize int64
+ Lost int64
Contexts []struct {
Id string
Name string
- Total int
- InUse int
+ Total int64
+ InUse int64
}
}
@@ -78,6 +80,10 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st
tags["type"] = "opcode"
addJSONCounter(acc, tags, stats.OpCodes)
+ // RCodes stats
+ tags["type"] = "rcode"
+ addJSONCounter(acc, tags, stats.RCodes)
+
// Query RDATA types
tags["type"] = "qtype"
addJSONCounter(acc, tags, stats.QTypes)
@@ -90,6 +96,10 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st
tags["type"] = "sockstat"
addJSONCounter(acc, tags, stats.SockStats)
+ // Zonestats
+ tags["type"] = "zonestat"
+ addJSONCounter(acc, tags, stats.ZoneStats)
+
// Memory stats
fields := map[string]interface{}{
"total_use": stats.Memory.TotalUse,
diff --git a/plugins/inputs/bind/testdata/json/v1/server b/plugins/inputs/bind/testdata/json/v1/server
index 53acd90672558..060fab6b1768b 100644
--- a/plugins/inputs/bind/testdata/json/v1/server
+++ b/plugins/inputs/bind/testdata/json/v1/server
@@ -21,6 +21,32 @@
"RESERVED14":0,
"RESERVED15":0
},
+ "rcodes":{
+ "NOERROR":1732,
+ "FORMERR":0,
+ "SERVFAIL":6,
+ "NXDOMAIN":200,
+ "NOTIMP":0,
+ "REFUSED":0,
+ "YXDOMAIN":0,
+ "YXRRSET":0,
+ "NXRRSET":0,
+ "NOTAUTH":0,
+ "NOTZONE":0,
+ "RESERVED11":0,
+ "RESERVED12":0,
+ "RESERVED13":0,
+ "RESERVED14":0,
+ "RESERVED15":0,
+ "BADVERS":0,
+ "17":0,
+ "18":0,
+ "19":0,
+ "20":0,
+ "21":0,
+ "22":0,
+ "BADCOOKIE":0
+ },
"qtypes":{
"A":2,
"PTR":7,
@@ -40,6 +66,11 @@
"QryDuplicate":1,
"QryUDP":13
},
+ "zonestats":{
+ "NotifyOutv4":8,
+ "NotifyInv4":5,
+ "SOAOutv4":5
+ },
"views":{
"_default":{
"resolver":{
@@ -138,4 +169,4 @@
}
}
}
-}
\ No newline at end of file
+}
diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go
index 45071bdc005f0..5e17851fb671c 100644
--- a/plugins/inputs/bind/xml_stats_v2.go
+++ b/plugins/inputs/bind/xml_stats_v2.go
@@ -44,15 +44,15 @@ type v2Statistics struct {
// Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater
Id string `xml:"id"`
Name string `xml:"name"`
- Total int `xml:"total"`
- InUse int `xml:"inuse"`
+ Total int64 `xml:"total"`
+ InUse int64 `xml:"inuse"`
} `xml:"contexts>context"`
Summary struct {
- TotalUse int
- InUse int
- BlockSize int
- ContextSize int
- Lost int
+ TotalUse int64
+ InUse int64
+ BlockSize int64
+ ContextSize int64
+ Lost int64
} `xml:"summary"`
} `xml:"memory"`
}
diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go
index ed2cc1b7faf9c..89e4ea0b8fcb6 100644
--- a/plugins/inputs/bind/xml_stats_v3.go
+++ b/plugins/inputs/bind/xml_stats_v3.go
@@ -27,15 +27,15 @@ type v3Memory struct {
// Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater
Id string `xml:"id"`
Name string `xml:"name"`
- Total int `xml:"total"`
- InUse int `xml:"inuse"`
+ Total int64 `xml:"total"`
+ InUse int64 `xml:"inuse"`
} `xml:"contexts>context"`
Summary struct {
- TotalUse int
- InUse int
- BlockSize int
- ContextSize int
- Lost int
+ TotalUse int64
+ InUse int64
+ BlockSize int64
+ ContextSize int64
+ Lost int64
} `xml:"summary"`
}
@@ -53,7 +53,7 @@ type v3View struct {
Name string `xml:"name,attr"`
RRSets []struct {
Name string `xml:"name"`
- Value int `xml:"counter"`
+ Value int64 `xml:"counter"`
} `xml:"rrset"`
} `xml:"cache"`
}
@@ -63,7 +63,7 @@ type v3CounterGroup struct {
Type string `xml:"type,attr"`
Counters []struct {
Name string `xml:"name,attr"`
- Value int `xml:",chardata"`
+ Value int64 `xml:",chardata"`
} `xml:"counter"`
}
diff --git a/plugins/inputs/burrow/README.md b/plugins/inputs/burrow/README.md
index d30a054d6a379..1d763a430455f 100644
--- a/plugins/inputs/burrow/README.md
+++ b/plugins/inputs/burrow/README.md
@@ -1,4 +1,4 @@
-# Telegraf Plugin: Burrow
+# Burrow Kafka Consumer Lag Checking Input Plugin
Collect Kafka topic, consumer and partition status
via [Burrow](https://github.com/linkedin/Burrow) HTTP [API](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint).
@@ -7,7 +7,7 @@ Supported Burrow version: `1.x`
### Configuration
-```
+```toml
[[inputs.burrow]]
## Burrow API endpoints in format "schema://host:port".
## Default is "http://localhost:8000".
diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go
index 9c532e3be3e32..501fddf16ad77 100644
--- a/plugins/inputs/burrow/burrow.go
+++ b/plugins/inputs/burrow/burrow.go
@@ -13,7 +13,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -432,6 +432,9 @@ func (b *burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, ac
func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
for _, partition := range r.Status.Partitions {
+ if !b.filterTopics.Match(partition.Topic) {
+ continue
+ }
acc.AddFields(
"burrow_partition",
map[string]interface{}{
diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go
index 3847a5d7c2d73..cafbcb9408775 100644
--- a/plugins/inputs/burrow/burrow_test.go
+++ b/plugins/inputs/burrow/burrow_test.go
@@ -262,7 +262,7 @@ func TestFilterGroups(t *testing.T) {
acc := &testutil.Accumulator{}
plugin.Gather(acc)
- require.Exactly(t, 4, len(acc.Metrics))
+ require.Exactly(t, 1, len(acc.Metrics))
require.Empty(t, acc.Errors)
}
diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md
index 86c6a65a36019..d89459533f55e 100644
--- a/plugins/inputs/cassandra/README.md
+++ b/plugins/inputs/cassandra/README.md
@@ -1,5 +1,4 @@
-
-# Telegraf plugin: Cassandra
+# Cassandra Input Plugin
### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration.
@@ -39,19 +38,19 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
- [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics)
- [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
-####measurement = javaGarbageCollector
+#### measurement = javaGarbageCollector
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
- /java.lang:type=GarbageCollector,name=ParNew/CollectionCount
-####measurement = javaMemory
+#### measurement = javaMemory
- /java.lang:type=Memory/HeapMemoryUsage
- /java.lang:type=Memory/NonHeapMemoryUsage
-####measurement = cassandraCache
+#### measurement = cassandraCache
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests
@@ -64,11 +63,11 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity
-####measurement = cassandraClient
+#### measurement = cassandraClient
- /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients
-####measurement = cassandraClientRequest
+#### measurement = cassandraClientRequest
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
@@ -81,24 +80,24 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
-####measurement = cassandraCommitLog
+#### measurement = cassandraCommitLog
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
- /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize
-####measurement = cassandraCompaction
+#### measurement = cassandraCompaction
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
- /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted
- /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted
-####measurement = cassandraStorage
+#### measurement = cassandraStorage
- /org.apache.cassandra.metrics:type=Storage,name=Load
-- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
+- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
-####measurement = cassandraTable
+#### measurement = cassandraTable
Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them.
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed
@@ -110,7 +109,7 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
-####measurement = cassandraThreadPools
+#### measurement = cassandraThreadPools
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go
index 1a7e5a657816e..43a9a0c1eb105 100644
--- a/plugins/inputs/cassandra/cassandra_test.go
+++ b/plugins/inputs/cassandra/cassandra_test.go
@@ -153,7 +153,7 @@ func TestHttpJsonJavaMultiValue(t *testing.T) {
acc.SetDebug(true)
err := acc.GatherError(cassandra.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.Equal(t, 2, len(acc.Metrics))
fields := map[string]interface{}{
@@ -182,7 +182,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
acc.SetDebug(true)
err := acc.GatherError(cassandra.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.Equal(t, 2, len(acc.Metrics))
fields := map[string]interface{}{
@@ -217,7 +217,7 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) {
var acc testutil.Accumulator
err := acc.GatherError(cassandra.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.Equal(t, 1, len(acc.Metrics))
fields := map[string]interface{}{
@@ -249,7 +249,7 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
acc.SetDebug(true)
err := acc.GatherError(cassandra.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.Equal(t, 2, len(acc.Metrics))
fields1 := map[string]interface{}{
diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md
index 33585b079562a..171b64760654f 100644
--- a/plugins/inputs/ceph/README.md
+++ b/plugins/inputs/ceph/README.md
@@ -6,13 +6,13 @@ Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The
*Admin Socket Stats*
-This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds
+This gatherer works by scanning the configured SocketDir for OSD, MON, MDS and RGW socket files. When it finds
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
used as collection tags, and all sub-keys are flattened. For example:
-```
+```json
{
"paxos": {
"refresh": 9363435,
@@ -44,7 +44,7 @@ the cluster. The currently supported commands are:
### Configuration:
-```
+```toml
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
[[inputs.ceph]]
## This is the recommended interval to poll. Too frequent and you will lose
@@ -62,6 +62,8 @@ the cluster. The currently supported commands are:
## prefix of MON and OSD socket files, used to determine socket type
mon_prefix = "ceph-mon"
osd_prefix = "ceph-osd"
+ mds_prefix = "ceph-mds"
+ rgw_prefix = "ceph-client"
## suffix used to identify socket files
socket_suffix = "asok"
@@ -95,7 +97,7 @@ All fields are collected under the **ceph** measurement and stored as float64s.
All admin measurements will have the following tags:
-- type: either 'osd' or 'mon' to indicate which type of node was queried
+- type: either 'osd', 'mon', 'mds' or 'rgw' to indicate which type of node was queried
- id: a unique string identifier, parsed from the socket file name for the node
- collection: the top-level key under which these fields were reported. Possible values are:
- for MON nodes:
@@ -133,6 +135,37 @@ All admin measurements will have the following tags:
- throttle-objecter_ops
- throttle-osd_client_bytes
- throttle-osd_client_messages
+ - for MDS nodes:
+ - AsyncMessenger::Worker-0
+ - AsyncMessenger::Worker-1
+ - AsyncMessenger::Worker-2
+ - finisher-PurgeQueue
+ - mds
+ - mds_cache
+ - mds_log
+ - mds_mem
+ - mds_server
+ - mds_sessions
+ - objecter
+ - purge_queue
+ - throttle-msgr_dispatch_throttler-mds
+ - throttle-objecter_bytes
+ - throttle-objecter_ops
+ - throttle-write_buf_throttle
+ - for RGW nodes:
+ - AsyncMessenger::Worker-0
+ - AsyncMessenger::Worker-1
+ - AsyncMessenger::Worker-2
+ - cct
+ - finisher-radosclient
+ - mempool
+ - objecter
+ - rgw
+ - simple-throttler
+ - throttle-msgr_dispatch_throttler-radosclient
+ - throttle-objecter_bytes
+ - throttle-objecter_ops
+ - throttle-rgw_async_rados_ops
*Cluster Stats*
@@ -209,62 +242,145 @@ All admin measurements will have the following tags:
*Cluster Stats*
```
-ceph_pool_stats,name=telegraf recovering_keys_per_sec=0,read_bytes_sec=0,write_bytes_sec=0,read_op_per_sec=0,write_op_per_sec=0,recovering_objects_per_sec=0,recovering_bytes_per_sec=0 1550658911000000000
-ceph_pool_usage,name=telegraf kb_used=0,bytes_used=0,objects=0 1550658911000000000
-ceph_pgmap_state,state=undersized+peered count=30 1550658910000000000
-ceph_pgmap bytes_total=10733223936,read_op_per_sec=0,write_op_per_sec=0,num_pgs=30,data_bytes=0,bytes_avail=9654697984,read_bytes_sec=0,write_bytes_sec=0,version=0,bytes_used=1078525952 1550658910000000000
-ceph_osdmap num_up_osds=1,num_in_osds=1,full=false,nearfull=false,num_remapped_pgs=0,epoch=34,num_osds=1 1550658910000000000
-ceph_health status="HEALTH_WARN",overall_status="HEALTH_WARN" 1550658910000000000
+ceph_health,host=stefanmon1 overall_status="",status="HEALTH_WARN" 1587118504000000000
+ceph_osdmap,host=stefanmon1 epoch=203,full=false,nearfull=false,num_in_osds=8,num_osds=9,num_remapped_pgs=0,num_up_osds=8 1587118504000000000
+ceph_pgmap,host=stefanmon1 bytes_avail=849879302144,bytes_total=858959904768,bytes_used=9080602624,data_bytes=5055,num_pgs=504,read_bytes_sec=0,read_op_per_sec=0,version=0,write_bytes_sec=0,write_op_per_sec=0 1587118504000000000
+ceph_pgmap_state,host=stefanmon1,state=active+clean count=504 1587118504000000000
+ceph_usage,host=stefanmon1 total_avail_bytes=849879302144,total_bytes=858959904768,total_used_bytes=196018176 1587118505000000000
+ceph_pool_usage,host=stefanmon1,name=cephfs_data bytes_used=0,kb_used=0,max_avail=285804986368,objects=0,percent_used=0 1587118505000000000
+ceph_pool_stats,host=stefanmon1,name=cephfs_data read_bytes_sec=0,read_op_per_sec=0,recovering_bytes_per_sec=0,recovering_keys_per_sec=0,recovering_objects_per_sec=0,write_bytes_sec=0,write_op_per_sec=0 1587118506000000000
```
*Admin Socket Stats*
```
-ceph,collection=recoverystate_perf,id=0,type=osd reprecovering_latency.avgtime=0,repwaitrecoveryreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.sum=0,reset_latency.avgtime=0.000090333,peering_latency.avgtime=0.824434333,stray_latency.avgtime=0.000030502,waitlocalrecoveryreserved_latency.sum=0,backfilling_latency.avgtime=0,reprecovering_latency.avgcount=0,incomplete_latency.avgtime=0,down_latency.avgtime=0,recovered_latency.sum=0.009692406,peering_latency.avgcount=40,notrecovering_latency.sum=0,waitremoterecoveryreserved_latency.sum=0,reprecovering_latency.sum=0,waitlocalbackfillreserved_latency.avgtime=0,started_latency.sum=9066.701648888,backfilling_latency.sum=0,waitactingchange_latency.avgcount=0,start_latency.avgtime=0.000030178,recovering_latency.avgtime=0,notbackfilling_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,incomplete_latency.avgcount=0,replicaactive_latency.sum=0,getinfo_latency.avgtime=0.000025945,down_latency.sum=0,recovered_latency.avgcount=40,waitactingchange_latency.avgtime=0,notrecovering_latency.avgcount=0,waitupthru_latency.sum=32.970965509,waitupthru_latency.avgtime=0.824274137,waitlocalrecoveryreserved_latency.avgcount=0,waitremoterecoveryreserved_latency.avgcount=0,activating_latency.avgcount=40,activating_latency.sum=0.83428466,activating_latency.avgtime=0.020857116,start_latency.avgcount=50,waitremotebackfillreserved_latency.avgcount=0,down_latency.avgcount=0,started_latency.avgcount=10,getlog_latency.avgcount=40,stray_latency.avgcount=10,notbackfilling_latency.sum=0,reset_latency.sum=0.00451665,active_latency.avgtime=906.505839265,repwaitbackfillreserved_latency.sum=0,waitactingchange_latency.sum=0,stray_latency.sum=0.000305022,waitremotebackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgtime=0,replicaactive_latency.avgtime=0,clean_latency.avgcount=10,waitremoterecoveryreserved_latency.avgtime=0,active_latency.avgcount=10,primary_latency.sum=9066.700828729,initial_latency.avgtime=0.000379351,waitlocalbackfillreserved_latency.avgcount=0,getinfo_latency.sum=0.001037815,reset_latency.avgcount=50,getlog_latency.sum=0.003079344,getlog_latency.avgtime=0.000076983,primary_latency.avgcount=10,repnotrecovering_latency.avgcount=0,initial_latency.sum=0.015174072,repwaitrecoveryreserved_latency.sum=0,replicaactive_latency.avgcount=0,clean_latency.avgtime=906.495755946,waitupthru_latency.avgcount=40,repnotrecovering_latency.sum=0,incomplete_latency.sum=0,active_latency.sum=9065.058392651,peering_latency.sum=32.977373355,repnotrecovering_latency.avgtime=0,notrecovering_latency.avgtime=0,waitlocalrecoveryreserved_latency.avgtime=0,repwaitbackfillreserved_latency.avgtime=0,recovering_latency.sum=0,getmissing_latency.sum=0.000902014,getmissing_latency.avgtime=0.00002255,clean_latency.sum=9064.957559467,getinfo_latency.avgcount=40,started_latency.avgtime=906.670164888,getmissing_latency.avgcount=40,notbackfilling_latency.avgtime=0,initial_latency.avgcount=40,recovered_latency.avgtime=0.00024231,repwaitbackfillreserved_latency.avgcount=0,backfilling_latency.avgcount=0,start_latency.sum=0.001508937,primary_latency.avgtime=906.670082872,recovering_latency.avgcount=0 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,id=0,type=osd put_sum=0,wait.avgtime=0,put=0,get_or_fail_success=0,wait.avgcount=0,val=0,get_sum=0,take=0,take_sum=0,max=104857600,get=0,get_or_fail_fail=0,wait.sum=0,get_started=0 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,id=0,type=osd wait.sum=0,val=0,take_sum=0,put=0,get_or_fail_success=0,put_sum=0,get=0,get_or_fail_fail=0,get_started=0,get_sum=0,wait.avgcount=0,wait.avgtime=0,max=104857600,take=0 1550658950000000000
-ceph,collection=bluefs,id=0,type=osd slow_used_bytes=0,wal_total_bytes=0,gift_bytes=1048576,log_compactions=0,logged_bytes=221184,files_written_sst=1,slow_total_bytes=0,bytes_written_wal=619403,bytes_written_sst=1517,reclaim_bytes=0,db_total_bytes=1086324736,wal_used_bytes=0,log_bytes=319488,num_files=10,files_written_wal=1,db_used_bytes=12582912 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,id=0,type=osd val=0,put=0,get=0,take=0,put_sum=0,get_started=0,take_sum=0,get_sum=0,wait.sum=0,wait.avgtime=0,get_or_fail_fail=0,get_or_fail_success=0,wait.avgcount=0,max=104857600 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-client,id=0,type=osd put=100,max=104857600,wait.sum=0,wait.avgtime=0,get_or_fail_fail=0,take_sum=0,val=0,wait.avgcount=0,get_sum=48561,get_or_fail_success=100,take=0,put_sum=48561,get_started=0,get=100 1550658950000000000
-ceph,collection=mutex-OSDShard.2::sdata_wait_lock,id=0,type=osd wait.sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000
-ceph,collection=throttle-objecter_ops,id=0,type=osd get_or_fail_fail=0,max=1024,get_sum=0,take=0,val=0,wait.avgtime=0,get_or_fail_success=0,wait.sum=0,put_sum=0,get=0,take_sum=0,put=0,wait.avgcount=0,get_started=0 1550658950000000000
-ceph,collection=AsyncMessenger::Worker-1,id=0,type=osd msgr_send_messages=266,msgr_recv_bytes=49074,msgr_active_connections=1,msgr_running_recv_time=0.136317251,msgr_running_fast_dispatch_time=0,msgr_created_connections=5,msgr_send_bytes=41569,msgr_running_send_time=0.514432253,msgr_recv_messages=81,msgr_running_total_time=0.766790051 1550658950000000000
-ceph,collection=throttle-bluestore_throttle_deferred_bytes,id=0,type=osd get_started=0,wait.sum=0,wait.avgcount=0,take_sum=0,val=12134038,max=201326592,take=0,get_or_fail_fail=0,put_sum=0,wait.avgtime=0,get_or_fail_success=18,get=18,get_sum=12134038,put=0 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,id=0,type=osd get=0,put_sum=0,val=0,get_or_fail_fail=0,get_or_fail_success=0,take=0,max=104857600,get_started=0,wait.sum=0,wait.avgtime=0,get_sum=0,take_sum=0,put=0,wait.avgcount=0 1550658950000000000
-ceph,collection=mutex-OSDShard.1::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000
-ceph,collection=finisher-defered_finisher,id=0,type=osd queue_len=0,complete_latency.avgcount=0,complete_latency.sum=0,complete_latency.avgtime=0 1550658950000000000
-ceph,collection=mutex-OSDShard.3::shard_lock,id=0,type=osd wait.avgtime=0,wait.avgcount=0,wait.sum=0 1550658950000000000
-ceph,collection=mutex-OSDShard.0::shard_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000
-ceph,collection=throttle-osd_client_bytes,id=0,type=osd get_or_fail_fail=0,get=22,get_sum=6262,take=0,max=524288000,put=31,wait.sum=0,val=0,get_started=0,put_sum=6262,get_or_fail_success=22,take_sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000
-ceph,collection=rocksdb,id=0,type=osd submit_latency.sum=0.019985172,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.avgcount=0,submit_sync_latency.sum=0.559604552,compact=0,compact_queue_len=0,get_latency.avgcount=140,submit_latency.avgtime=0.000095622,submit_transaction=209,compact_range=0,rocksdb_write_wal_time.avgcount=0,submit_sync_latency.avgtime=0.011906479,compact_queue_merge=0,rocksdb_write_memtable_time.avgtime=0,get_latency.sum=0.013135139,submit_latency.avgcount=209,submit_sync_latency.avgcount=47,submit_transaction_sync=47,rocksdb_write_wal_time.sum=0,rocksdb_write_delay_time.avgcount=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.sum=0,get=140,get_latency.avgtime=0.000093822,rocksdb_write_delay_time.sum=0 1550658950000000000
-ceph,collection=mutex-OSDShard.1::shard_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000
-ceph,collection=osd,id=0,type=osd subop_latency.avgtime=0,copyfrom=0,osd_pg_info=140,subop_push_latency.avgtime=0,subop_pull=0,op_rw_process_latency.sum=0,stat_bytes=10733223936,numpg_removing=0,op_latency.avgtime=0,op_w_process_latency.avgtime=0,op_rw_in_bytes=0,osd_map_cache_miss=0,loadavg=144,map_messages=31,op_w_latency.avgtime=0,op_prepare_latency.avgcount=0,op_r=0,op_latency.avgcount=0,osd_map_cache_hit=225,op_w_prepare_latency.sum=0,numpg_primary=30,op_rw_out_bytes=0,subop_w_latency.avgcount=0,subop_push_latency.avgcount=0,op_r_process_latency.avgcount=0,op_w_in_bytes=0,op_rw_latency.avgtime=0,subop_w_latency.avgtime=0,osd_map_cache_miss_low_avg.sum=0,agent_wake=0,op_before_queue_op_lat.avgtime=0.000065043,op_w_prepare_latency.avgcount=0,tier_proxy_write=0,op_rw_prepare_latency.avgtime=0,op_rw_process_latency.avgtime=0,op_in_bytes=0,op_cache_hit=0,tier_whiteout=0,op_w_prepare_latency.avgtime=0,heartbeat_to_peers=0,object_ctx_cache_hit=0,buffer_bytes=0,stat_bytes_avail=9654697984,op_w_latency.avgcount=0,tier_dirty=0,tier_flush_fail=0,op_rw_prepare_latency.avgcount=0,agent_flush=0,osd_tier_promote_lat.sum=0,subop_w_latency.sum=0,tier_promote=0,op_before_dequeue_op_lat.avgcount=22,push=0,tier_flush=0,osd_pg_biginfo=90,tier_try_flush_fail=0,subop_push_in_bytes=0,op_before_dequeue_op_lat.sum=0.00266744,osd_map_cache_miss_low=0,numpg=30,op_prepare_latency.avgtime=0,subop_pull_latency.avgtime=0,op_rw_latency.avgcount=0,subop_latency.avgcount=0,op=0,osd_tier_promote_lat.avgcount=0,cached_crc=0,op_r_prepare_latency.sum=0,subop_pull_latency.sum=0,op_before_dequeue_op_lat.avgtime=0.000121247,history_alloc_Mbytes=0,subop_push_latency.sum=0,subop_in_bytes=0,op_w_process_latency.sum=0,osd_map_cache_miss_low_avg.avgcount=0,subop=0,tier_clean=0,osd_tier_r_lat.avgtime=0,op_r_process_latency.avgtime=0,op_r_prepare_latency.avgcount=0,op_w_process_latency.avgcount=0,numpg_stray=0,op_r_prepare_latency.avgtime=0,object_ctx_cache_total=0,op_process_latency.avgtime=0,op_r_process_latency.sum=0,op_r_latency.sum=0,subop_w_in_bytes=0,op_rw=0,messages_delayed_for_map=4,map_message_epoch_dups=30,osd_map_bl_cache_miss=33,op_r_latency.avgtime=0,op_before_queue_op_lat.sum=0.001430955,map_message_epochs=64,agent_evict=0,op_out_bytes=0,op_process_latency.sum=0,osd_tier_flush_lat.sum=0,stat_bytes_used=1078525952,op_prepare_latency.sum=0,op_wip=0,osd_tier_flush_lat.avgtime=0,missed_crc=0,op_rw_latency.sum=0,op_r_latency.avgcount=0,pull=0,op_w_latency.sum=0,op_before_queue_op_lat.avgcount=22,tier_try_flush=0,numpg_replica=0,subop_push=0,osd_tier_r_lat.sum=0,op_latency.sum=0,push_out_bytes=0,op_w=0,osd_tier_promote_lat.avgtime=0,subop_latency.sum=0,osd_pg_fastinfo=0,tier_delay=0,op_rw_prepare_latency.sum=0,osd_tier_flush_lat.avgcount=0,osd_map_bl_cache_hit=0,op_r_out_bytes=0,subop_pull_latency.avgcount=0,op_process_latency.avgcount=0,tier_evict=0,tier_proxy_read=0,agent_skip=0,subop_w=0,history_alloc_num=0,osd_tier_r_lat.avgcount=0,recovery_ops=0,cached_crc_adjusted=0,op_rw_process_latency.avgcount=0 1550658950000000000
-ceph,collection=finisher-finisher-0,id=0,type=osd complete_latency.sum=0.015491438,complete_latency.avgtime=0.000174061,complete_latency.avgcount=89,queue_len=0 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,id=0,type=osd wait.avgtime=0,wait.avgcount=0,max=104857600,get_sum=0,take=0,get_or_fail_fail=0,val=0,get=0,get_or_fail_success=0,wait.sum=0,put=0,take_sum=0,get_started=0,put_sum=0 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-cluster,id=0,type=osd get_sum=0,take=0,val=0,max=104857600,get_or_fail_success=0,put=0,put_sum=0,wait.sum=0,wait.avgtime=0,get_started=0,get_or_fail_fail=0,take_sum=0,wait.avgcount=0,get=0 1550658950000000000
-ceph,collection=mutex-OSDShard.0::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000
-ceph,collection=throttle-bluestore_throttle_bytes,id=0,type=osd get_sum=140287253,put_sum=140287253,get=209,put=47,val=0,get_started=209,wait.sum=0,wait.avgcount=0,wait.avgtime=0,max=67108864,get_or_fail_fail=0,take=0,take_sum=0,get_or_fail_success=0 1550658950000000000
-ceph,collection=objecter,id=0,type=osd map_inc=15,op_w=0,osd_session_close=0,op=0,osdop_writefull=0,osdop_tmap_up=0,command_resend=0,poolstat_resend=0,osdop_setxattr=0,osdop_append=0,osdop_delete=0,op_rmw=0,poolstat_send=0,op_active=0,osdop_tmap_put=0,osdop_clonerange=0,osdop_rmxattr=0,op_send=0,op_resend=0,osdop_resetxattrs=0,osdop_call=0,osdop_pgls=0,poolstat_active=0,linger_resend=0,osdop_stat=0,op_reply=0,op_laggy=0,statfs_send=0,osdop_getxattr=0,osdop_pgls_filter=0,osdop_notify=0,linger_active=0,osdop_other=0,poolop_resend=0,statfs_active=0,command_active=0,map_epoch=34,osdop_create=0,osdop_watch=0,op_r=0,map_full=0,osdop_src_cmpxattr=0,omap_rd=0,osd_session_open=0,osdop_sparse_read=0,osdop_truncate=0,linger_ping=0,osdop_mapext=0,poolop_send=0,osdop_cmpxattr=0,osd_laggy=0,osdop_writesame=0,osd_sessions=0,osdop_tmap_get=0,op_pg=0,command_send=0,osdop_read=0,op_send_bytes=0,statfs_resend=0,omap_del=0,poolop_active=0,osdop_write=0,osdop_zero=0,omap_wr=0,linger_send=0 1550658950000000000
-ceph,collection=mutex-OSDShard.4::shard_lock,id=0,type=osd wait.avgtime=0,wait.avgcount=0,wait.sum=0 1550658950000000000
-ceph,collection=AsyncMessenger::Worker-0,id=0,type=osd msgr_recv_messages=112,msgr_recv_bytes=14550,msgr_created_connections=15,msgr_running_recv_time=0.026754699,msgr_active_connections=11,msgr_send_messages=11,msgr_running_fast_dispatch_time=0.003373472,msgr_send_bytes=2090,msgr_running_total_time=0.041323592,msgr_running_send_time=0.000441856 1550658950000000000
-ceph,collection=mutex-OSDShard.2::shard_lock,id=0,type=osd wait.sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000
-ceph,collection=bluestore,id=0,type=osd submit_lat.avgcount=209,kv_flush_lat.avgtime=0.000002175,bluestore_write_big_bytes=0,bluestore_txc=209,kv_commit_lat.avgcount=47,kv_commit_lat.sum=0.585164754,bluestore_buffer_miss_bytes=511,commit_lat.avgcount=209,bluestore_buffer_bytes=0,bluestore_onodes=102,state_kv_queued_lat.sum=1.439223859,deferred_write_bytes=0,bluestore_write_small_bytes=60279,decompress_lat.sum=0,state_kv_done_lat.avgcount=209,submit_lat.sum=0.055637603,state_prepare_lat.avgcount=209,bluestore_write_big=0,read_wait_aio_lat.avgcount=17,bluestore_write_small_deferred=18,kv_lat.sum=0.585267001,kv_flush_lat.sum=0.000102247,bluestore_buffers=0,state_prepare_lat.sum=0.051411998,bluestore_write_small_pre_read=18,state_deferred_queued_lat.sum=0,decompress_lat.avgtime=0,state_kv_done_lat.avgtime=0.000000629,bluestore_write_small_unused=0,read_lat.avgcount=34,bluestore_onode_shard_misses=0,bluestore_blobs=72,bluestore_read_eio=0,bluestore_blob_split=0,bluestore_onode_shard_hits=0,state_kv_commiting_lat.avgcount=209,bluestore_onode_hits=153,state_kv_commiting_lat.sum=2.477385041,read_onode_meta_lat.avgcount=51,state_finishing_lat.avgtime=0.000000489,bluestore_compressed_original=0,state_kv_queued_lat.avgtime=0.006886238,bluestore_gc_merged=0,throttle_lat.avgtime=0.000001247,state_aio_wait_lat.avgtime=0.000001326,bluestore_onode_reshard=0,state_done_lat.avgcount=191,bluestore_compressed_allocated=0,write_penalty_read_ops=0,bluestore_extents=72,compress_lat.avgtime=0,state_aio_wait_lat.avgcount=209,state_io_done_lat.avgtime=0.000000519,bluestore_write_big_blobs=0,state_kv_queued_lat.avgcount=209,kv_flush_lat.avgcount=47,state_finishing_lat.sum=0.000093565,state_io_done_lat.avgcount=209,kv_lat.avgtime=0.012452489,bluestore_buffer_hit_bytes=20750,read_wait_aio_lat.avgtime=0.000038077,bluestore_allocated=4718592,state_deferred_cleanup_lat.avgtime=0,compress_lat.avgcount=0,write_pad_bytes=304265,throttle_lat.sum=0.000260785,read_onode_meta_lat.avgtime=0.000038702,compress_success_count=0,state_deferred_aio_wait_lat.sum=0,decompress_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,bluestore_stored=51133,state_finishing_lat.avgcount=191,bluestore_onode_misses=132,deferred_write_ops=0,read_wait_aio_lat.sum=0.000647315,csum_lat.avgcount=1,state_kv_done_lat.sum=0.000131531,state_prepare_lat.avgtime=0.00024599,state_deferred_cleanup_lat.avgcount=0,state_deferred_queued_lat.avgcount=0,bluestore_reads_with_retries=0,state_kv_commiting_lat.avgtime=0.011853516,kv_commit_lat.avgtime=0.012450313,read_lat.sum=0.003031418,throttle_lat.avgcount=209,bluestore_write_small_new=71,state_deferred_queued_lat.avgtime=0,bluestore_extent_compress=0,bluestore_write_small=89,state_deferred_cleanup_lat.sum=0,submit_lat.avgtime=0.000266208,bluestore_fragmentation_micros=0,state_aio_wait_lat.sum=0.000277323,commit_lat.avgtime=0.018987901,compress_lat.sum=0,bluestore_compressed=0,state_done_lat.sum=0.000206953,csum_lat.avgtime=0.000023281,state_deferred_aio_wait_lat.avgcount=0,compress_rejected_count=0,kv_lat.avgcount=47,read_onode_meta_lat.sum=0.001973812,read_lat.avgtime=0.000089159,csum_lat.sum=0.000023281,state_io_done_lat.sum=0.00010855,state_done_lat.avgtime=0.000001083,commit_lat.sum=3.96847136 1550658950000000000
-ceph,collection=mutex-OSDShard.3::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000
-ceph,collection=AsyncMessenger::Worker-2,id=0,type=osd msgr_running_fast_dispatch_time=0,msgr_recv_bytes=246,msgr_created_connections=5,msgr_active_connections=1,msgr_running_recv_time=0.001392218,msgr_running_total_time=1.934101301,msgr_running_send_time=1.781171967,msgr_recv_messages=3,msgr_send_bytes=26504031,msgr_send_messages=15409 1550658950000000000
-ceph,collection=finisher-objecter-finisher-0,id=0,type=osd complete_latency.avgcount=0,complete_latency.sum=0,complete_latency.avgtime=0,queue_len=0 1550658950000000000
-ceph,collection=mutex-OSDShard.4::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000
-ceph,collection=throttle-objecter_bytes,id=0,type=osd take=0,get_sum=0,put_sum=0,put=0,val=0,get=0,get_or_fail_fail=0,wait.avgcount=0,get_or_fail_success=0,wait.sum=0,wait.avgtime=0,get_started=0,max=104857600,take_sum=0 1550658950000000000
-ceph,collection=throttle-mon_client_bytes,id=test,type=monitor get_or_fail_fail=0,take_sum=0,wait.avgtime=0,wait.avgcount=0,get_sum=64607,take=0,get_started=0,put=950,val=240,wait.sum=0,max=104857600,get_or_fail_success=953,put_sum=64367,get=953 1550658950000000000
-ceph,collection=mon,id=test,type=monitor election_win=1,election_lose=0,num_sessions=3,session_add=199,session_rm=196,session_trim=0,num_elections=1,election_call=0 1550658950000000000
-ceph,collection=cluster,id=test,type=monitor num_pg_active=0,num_mon=1,osd_bytes_avail=9654697984,num_object=0,num_osd_in=1,osd_bytes_used=1078525952,num_bytes=0,num_osd=1,num_pg_peering=0,num_pg_active_clean=0,num_pg=30,num_mon_quorum=1,num_object_degraded=0,osd_bytes=10733223936,num_object_unfound=0,num_osd_up=1,num_pool=1,num_object_misplaced=0,osd_epoch=34 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-mon-mgrc,id=test,type=monitor get=2,put=2,get_sum=16,take_sum=0,wait.avgtime=0,val=0,wait.avgcount=0,get_or_fail_success=2,put_sum=16,max=104857600,get_started=0,take=0,get_or_fail_fail=0,wait.sum=0 1550658950000000000
-ceph,collection=rocksdb,id=test,type=monitor rocksdb_write_memtable_time.avgtime=0,submit_sync_latency.avgtime=0.013689071,submit_transaction_sync=39173,rocksdb_write_pre_and_post_time.avgtime=0,get_latency.avgcount=724581,submit_latency.avgtime=0,submit_sync_latency.avgcount=39173,rocksdb_write_wal_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,compact_range=231,compact_queue_merge=0,rocksdb_write_memtable_time.avgcount=0,submit_sync_latency.sum=536.242007888,compact=0,rocksdb_write_delay_time.sum=0,get_latency.sum=9.578173532,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,compact_queue_len=0,get_latency.avgtime=0.000013218,submit_latency.sum=0,get=724581,rocksdb_write_wal_time.avgcount=0,submit_transaction=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_memtable_time.sum=0 1550658950000000000
-ceph,collection=finisher-mon_finisher,id=test,type=monitor complete_latency.avgtime=0,complete_latency.avgcount=0,complete_latency.sum=0,queue_len=0 1550658950000000000
-ceph,collection=paxos,id=test,type=monitor share_state_keys.sum=0,collect_keys.avgcount=0,collect=0,store_state_latency.avgtime=0,begin_latency.sum=338.90900364,collect_keys.sum=0,collect_bytes.avgcount=0,accept_timeout=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,commit_keys.sum=116820,share_state_bytes.sum=0,refresh_latency.avgcount=19576,store_state=0,collect_timeout=0,lease_ack_timeout=0,collect_latency.avgcount=0,store_state_keys.avgcount=0,commit_bytes.sum=38478195,refresh_latency.sum=8.341938952,collect_uncommitted=0,commit_latency.avgcount=19576,share_state=0,begin_latency.avgtime=0.017312474,commit_latency.avgtime=0.009926797,begin_keys.sum=58728,start_peon=0,commit_keys.avgcount=19576,begin_latency.avgcount=19576,store_state_latency.avgcount=0,start_leader=1,begin_keys.avgcount=19576,collect_bytes.sum=0,begin_bytes.avgcount=19576,store_state_bytes.sum=0,commit=19576,begin_bytes.sum=41771257,new_pn_latency.avgtime=0,refresh_latency.avgtime=0.00042613,commit_latency.sum=194.326980684,new_pn=0,refresh=19576,collect_latency.sum=0,collect_latency.avgtime=0,lease_timeout=0,begin=19576,share_state_bytes.avgcount=0,share_state_keys.avgcount=0,store_state_keys.sum=0,store_state_bytes.avgcount=0,store_state_latency.sum=0,commit_bytes.avgcount=19576,restart=2 1550658950000000000
-ceph,collection=finisher-monstore,id=test,type=monitor complete_latency.avgcount=19576,complete_latency.sum=208.300976568,complete_latency.avgtime=0.01064063,queue_len=0 1550658950000000000
-ceph,collection=AsyncMessenger::Worker-2,id=test,type=monitor msgr_created_connections=1,msgr_send_bytes=0,msgr_running_send_time=0,msgr_recv_bytes=0,msgr_send_messages=1,msgr_recv_messages=0,msgr_running_total_time=0.003026541,msgr_running_recv_time=0,msgr_running_fast_dispatch_time=0,msgr_active_connections=1 1550658950000000000
-ceph,collection=throttle-msgr_dispatch_throttler-mon,id=test,type=monitor take=0,take_sum=0,put=39933,get=39933,put_sum=56745184,wait.avgtime=0,get_or_fail_success=39933,wait.sum=0,get_sum=56745184,get_or_fail_fail=0,wait.avgcount=0,val=0,max=104857600,get_started=0 1550658950000000000
-ceph,collection=throttle-mon_daemon_bytes,id=test,type=monitor max=419430400,get_started=0,wait.avgtime=0,take_sum=0,get=262,take=0,put_sum=21212,wait.avgcount=0,get_or_fail_success=262,get_or_fail_fail=0,put=262,wait.sum=0,val=0,get_sum=21212 1550658950000000000
-ceph,collection=AsyncMessenger::Worker-1,id=test,type=monitor msgr_send_messages=1071,msgr_running_total_time=0.703589077,msgr_active_connections=146,msgr_send_bytes=3887863,msgr_running_send_time=0.361602994,msgr_running_recv_time=0.328218119,msgr_running_fast_dispatch_time=0,msgr_recv_messages=978,msgr_recv_bytes=142209,msgr_created_connections=197 1550658950000000000
-ceph,collection=AsyncMessenger::Worker-0,id=test,type=monitor msgr_created_connections=54,msgr_recv_messages=38957,msgr_active_connections=47,msgr_running_fast_dispatch_time=0,msgr_send_bytes=25338946,msgr_running_total_time=9.190267622,msgr_running_send_time=3.124663809,msgr_running_recv_time=13.03937269,msgr_send_messages=15973,msgr_recv_bytes=59558181 1550658950000000000
+> ceph,collection=cct,host=stefanmon1,id=stefanmon1,type=monitor total_workers=0,unhealthy_workers=0 1587117563000000000
+> ceph,collection=mempool,host=stefanmon1,id=stefanmon1,type=monitor bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=719152,buffer_anon_items=192,buffer_meta_bytes=352,buffer_meta_items=4,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=15872,osdmap_items=138,osdmap_mapping_bytes=63112,osdmap_mapping_items=7626,pgmap_bytes=38680,pgmap_items=477,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117563000000000
+> ceph,collection=throttle-mon_client_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=1041157,get_or_fail_fail=0,get_or_fail_success=1041157,get_started=0,get_sum=64928901,max=104857600,put=1041157,put_sum=64928901,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-mon,host=stefanmon1,id=stefanmon1,type=monitor get=12695426,get_or_fail_fail=0,get_or_fail_success=12695426,get_started=0,get_sum=42542216884,max=104857600,put=12695426,put_sum=42542216884,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000
+> ceph,collection=finisher-mon_finisher,host=stefanmon1,id=stefanmon1,type=monitor complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117563000000000
+> ceph,collection=finisher-monstore,host=stefanmon1,id=stefanmon1,type=monitor complete_latency.avgcount=1609831,complete_latency.avgtime=0.015857621,complete_latency.sum=25528.09131035,queue_len=0 1587117563000000000
+> ceph,collection=mon,host=stefanmon1,id=stefanmon1,type=monitor election_call=25,election_lose=0,election_win=22,num_elections=94,num_sessions=3,session_add=174679,session_rm=439316,session_trim=137 1587117563000000000
+> ceph,collection=throttle-mon_daemon_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=72697,get_or_fail_fail=0,get_or_fail_success=72697,get_started=0,get_sum=32261199,max=419430400,put=72697,put_sum=32261199,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000
+> ceph,collection=rocksdb,host=stefanmon1,id=stefanmon1,type=monitor compact=1,compact_queue_len=0,compact_queue_merge=1,compact_range=19126,get=62449211,get_latency.avgcount=62449211,get_latency.avgtime=0.000022216,get_latency.sum=1387.371811726,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=0,submit_latency.avgtime=0,submit_latency.sum=0,submit_sync_latency.avgcount=3219961,submit_sync_latency.avgtime=0.007532173,submit_sync_latency.sum=24253.303584224,submit_transaction=0,submit_transaction_sync=3219961 1587117563000000000
+> ceph,collection=AsyncMessenger::Worker-0,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=148317,msgr_created_connections=162806,msgr_recv_bytes=11557888328,msgr_recv_messages=5113369,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=868.377161686,msgr_running_send_time=1626.525392721,msgr_running_total_time=4222.235694322,msgr_send_bytes=91516226816,msgr_send_messages=6973706 1587117563000000000
+> ceph,collection=AsyncMessenger::Worker-2,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=146396,msgr_created_connections=159788,msgr_recv_bytes=2162802496,msgr_recv_messages=689168,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=164.148550562,msgr_running_send_time=153.462890368,msgr_running_total_time=644.188791379,msgr_send_bytes=7422484152,msgr_send_messages=749381 1587117563000000000
+> ceph,collection=cluster,host=stefanmon1,id=stefanmon1,type=monitor num_bytes=5055,num_mon=3,num_mon_quorum=3,num_object=245,num_object_degraded=0,num_object_misplaced=0,num_object_unfound=0,num_osd=9,num_osd_in=8,num_osd_up=8,num_pg=504,num_pg_active=504,num_pg_active_clean=504,num_pg_peering=0,num_pool=17,osd_bytes=858959904768,osd_bytes_avail=849889787904,osd_bytes_used=9070116864,osd_epoch=203 1587117563000000000
+> ceph,collection=paxos,host=stefanmon1,id=stefanmon1,type=monitor accept_timeout=1,begin=1609847,begin_bytes.avgcount=1609847,begin_bytes.sum=41408662074,begin_keys.avgcount=1609847,begin_keys.sum=4829541,begin_latency.avgcount=1609847,begin_latency.avgtime=0.007213392,begin_latency.sum=11612.457661116,collect=0,collect_bytes.avgcount=0,collect_bytes.sum=0,collect_keys.avgcount=0,collect_keys.sum=0,collect_latency.avgcount=0,collect_latency.avgtime=0,collect_latency.sum=0,collect_timeout=1,collect_uncommitted=17,commit=1609831,commit_bytes.avgcount=1609831,commit_bytes.sum=41087428442,commit_keys.avgcount=1609831,commit_keys.sum=11637931,commit_latency.avgcount=1609831,commit_latency.avgtime=0.006236333,commit_latency.sum=10039.442388355,lease_ack_timeout=0,lease_timeout=0,new_pn=33,new_pn_latency.avgcount=33,new_pn_latency.avgtime=3.844272773,new_pn_latency.sum=126.86100151,refresh=1609856,refresh_latency.avgcount=1609856,refresh_latency.avgtime=0.005900486,refresh_latency.sum=9498.932866761,restart=109,share_state=2,share_state_bytes.avgcount=2,share_state_bytes.sum=39612,share_state_keys.avgcount=2,share_state_keys.sum=2,start_leader=22,start_peon=0,store_state=14,store_state_bytes.avgcount=14,store_state_bytes.sum=51908281,store_state_keys.avgcount=14,store_state_keys.sum=7016,store_state_latency.avgcount=14,store_state_latency.avgtime=11.668377665,store_state_latency.sum=163.357287311 1587117563000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-mon-mgrc,host=stefanmon1,id=stefanmon1,type=monitor get=13225,get_or_fail_fail=0,get_or_fail_success=13225,get_started=0,get_sum=158700,max=104857600,put=13225,put_sum=158700,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000
+> ceph,collection=AsyncMessenger::Worker-1,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=147680,msgr_created_connections=162374,msgr_recv_bytes=29781706740,msgr_recv_messages=7170733,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=1728.559151358,msgr_running_send_time=2086.681244508,msgr_running_total_time=6084.532916585,msgr_send_bytes=94062125718,msgr_send_messages=9161564 1587117563000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=0,type=osd get=281745,get_or_fail_fail=0,get_or_fail_success=281745,get_started=0,get_sum=446024457,max=104857600,put=281745,put_sum=446024457,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=0,type=osd get=275707,get_or_fail_fail=0,get_or_fail_success=0,get_started=275707,get_sum=185073179842,max=67108864,put=268870,put_sum=185073179842,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=0,type=osd get=2606982,get_or_fail_fail=0,get_or_fail_success=2606982,get_started=0,get_sum=5224391928,max=104857600,put=2606982,put_sum=5224391928,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=rocksdb,host=stefanosd1,id=0,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1570,get_latency.avgcount=1570,get_latency.avgtime=0.000051233,get_latency.sum=0.080436788,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=275707,submit_latency.avgtime=0.000174936,submit_latency.sum=48.231345334,submit_sync_latency.avgcount=268870,submit_sync_latency.avgtime=0.006097313,submit_sync_latency.sum=1639.384555624,submit_transaction=275707,submit_transaction_sync=268870 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=0,type=osd get=2606982,get_or_fail_fail=0,get_or_fail_success=2606982,get_started=0,get_sum=5224391928,max=104857600,put=2606982,put_sum=5224391928,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=0,type=osd get=2610285,get_or_fail_fail=0,get_or_fail_success=2610285,get_started=0,get_sum=5231011140,max=104857600,put=2610285,put_sum=5231011140,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=0,type=osd msgr_active_connections=2093,msgr_created_connections=29142,msgr_recv_bytes=7214238199,msgr_recv_messages=3928206,msgr_running_fast_dispatch_time=171.289615064,msgr_running_recv_time=278.531155966,msgr_running_send_time=489.482588813,msgr_running_total_time=1134.004853662,msgr_send_bytes=9814725232,msgr_send_messages=3814927 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=0,type=osd get=488206,get_or_fail_fail=0,get_or_fail_success=488206,get_started=0,get_sum=104085134,max=104857600,put=488206,put_sum=104085134,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=0,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
+> ceph,collection=recoverystate_perf,host=stefanosd1,id=0,type=osd activating_latency.avgcount=87,activating_latency.avgtime=0.114348341,activating_latency.sum=9.948305683,active_latency.avgcount=25,active_latency.avgtime=1790.961574431,active_latency.sum=44774.039360795,backfilling_latency.avgcount=0,backfilling_latency.avgtime=0,backfilling_latency.sum=0,clean_latency.avgcount=25,clean_latency.avgtime=1790.830827794,clean_latency.sum=44770.770694867,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=141,getinfo_latency.avgtime=0.446233476,getinfo_latency.sum=62.918920183,getlog_latency.avgcount=87,getlog_latency.avgtime=0.007708069,getlog_latency.sum=0.670602073,getmissing_latency.avgcount=87,getmissing_latency.avgtime=0.000077594,getmissing_latency.sum=0.006750701,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=166,initial_latency.avgtime=0.001313715,initial_latency.sum=0.218076764,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=141,peering_latency.avgtime=0.948324273,peering_latency.sum=133.713722563,primary_latency.avgcount=79,primary_latency.avgtime=567.706192991,primary_latency.sum=44848.78924634,recovered_latency.avgcount=87,recovered_latency.avgtime=0.000378284,recovered_latency.sum=0.032910791,recovering_latency.avgcount=2,recovering_latency.avgtime=0.338242008,recovering_latency.sum=0.676484017,replicaactive_latency.avgcount=23,replicaactive_latency.avgtime=1790.893991295,replicaactive_latency.sum=41190.561799786,repnotrecovering_latency.avgcount=25,repnotrecovering_latency.avgtime=1647.627024984,repnotrecovering_latency.sum=41190.675624616,reprecovering_latency.avgcount=2,reprecovering_latency.avgtime=0.311884638,reprecovering_latency.sum=0.623769276,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=2,repwaitrecoveryreserved_latency.avgtime=0.000462873,repwaitrecoveryreserved_latency.sum=0.000925746,reset_latency.avgcount=372,reset_latency.avgtime=0.125056393,reset_latency.sum=46.520978537,start_latency.avgcount=372,start_latency.avgtime=0.000109397,start_latency.sum=0.040695881,started_latency.avgcount=206,started_latency.avgtime=418.299777245,started_latency.sum=86169.754112641,stray_latency.avgcount=231,stray_latency.avgtime=0.98203205,stray_latency.sum=226.849403565,waitactingchange_latency.avgcount=0,waitactingchange_latency.avgtime=0,waitactingchange_latency.sum=0,waitlocalbackfillreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.avgtime=0,waitlocalbackfillreserved_latency.sum=0,waitlocalrecoveryreserved_latency.avgcount=2,waitlocalrecoveryreserved_latency.avgtime=0.002802377,waitlocalrecoveryreserved_latency.sum=0.005604755,waitremotebackfillreserved_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,waitremotebackfillreserved_latency.sum=0,waitremoterecoveryreserved_latency.avgcount=2,waitremoterecoveryreserved_latency.avgtime=0.012855439,waitremoterecoveryreserved_latency.sum=0.025710878,waitupthru_latency.avgcount=87,waitupthru_latency.avgtime=0.805727895,waitupthru_latency.sum=70.09832695 1587117698000000000
+> ceph,collection=cct,host=stefanosd1,id=0,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=0,type=osd get=2610285,get_or_fail_fail=0,get_or_fail_success=2610285,get_started=0,get_sum=5231011140,max=104857600,put=2610285,put_sum=5231011140,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=bluefs,host=stefanosd1,id=0,type=osd bytes_written_slow=0,bytes_written_sst=9018781,bytes_written_wal=831081573,db_total_bytes=4294967296,db_used_bytes=434110464,files_written_sst=3,files_written_wal=2,gift_bytes=0,log_bytes=134291456,log_compactions=1,logged_bytes=1101668352,max_bytes_db=1234173952,max_bytes_slow=0,max_bytes_wal=0,num_files=11,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000
+> ceph,collection=mempool,host=stefanosd1,id=0,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=10600,bluefs_items=458,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=622592,bluestore_cache_data_items=43,bluestore_cache_onode_bytes=249280,bluestore_cache_onode_items=380,bluestore_cache_other_bytes=192678,bluestore_cache_other_items=20199,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=8272,bluestore_txc_items=11,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=670130,bluestore_writing_deferred_items=176,bluestore_writing_items=0,buffer_anon_bytes=2412465,buffer_anon_items=297,buffer_meta_bytes=5896,buffer_meta_items=67,mds_co_bytes=0,mds_co_items=0,osd_bytes=2124800,osd_items=166,osd_mapbl_bytes=155152,osd_mapbl_items=10,osd_pglog_bytes=3214704,osd_pglog_items=6288,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000
+> ceph,collection=osd,host=stefanosd1,id=0,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=21,map_message_epochs=40,map_messages=31,messages_delayed_for_map=0,missed_crc=0,numpg=166,numpg_primary=62,numpg_removing=0,numpg_replica=104,numpg_stray=0,object_ctx_cache_hit=476529,object_ctx_cache_total=476536,op=476525,op_before_dequeue_op_lat.avgcount=755708,op_before_dequeue_op_lat.avgtime=0.000205759,op_before_dequeue_op_lat.sum=155.493843473,op_before_queue_op_lat.avgcount=755702,op_before_queue_op_lat.avgtime=0.000047877,op_before_queue_op_lat.sum=36.181069552,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=476525,op_latency.avgtime=0.000365956,op_latency.sum=174.387387878,op_out_bytes=10882,op_prepare_latency.avgcount=476527,op_prepare_latency.avgtime=0.000205307,op_prepare_latency.sum=97.834380034,op_process_latency.avgcount=476525,op_process_latency.avgtime=0.000139616,op_process_latency.sum=66.530847665,op_r=476521,op_r_latency.avgcount=476521,op_r_latency.avgtime=0.00036559,op_r_latency.sum=174.21148267,op_r_out_bytes=10882,op_r_prepare_latency.avgcount=476523,op_r_prepare_latency.avgtime=0.000205302,op_r_prepare_latency.sum=97.831473175,op_r_process_latency.avgcount=476521,op_r_process_latency.avgtime=0.000139396,op_r_process_latency.sum=66.425498624,op_rw=2,op_rw_in_bytes=0,op_rw_latency.avgcount=2,op_rw_latency.avgtime=0.048818975,op_rw_latency.sum=0.097637951,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=2,op_rw_prepare_latency.avgtime=0.000467887,op_rw_prepare_latency.sum=0.000935775,op_rw_process_latency.avgcount=2,op_rw_process_latency.avgtime=0.013741256,op_rw_process_latency.sum=0.027482512,op_w=2,op_w_in_bytes=0,op_w_latency.avgcount=2,op_w_latency.avgtime=0.039133628,op_w_latency.sum=0.078267257,op_w_prepare_latency.avgcount=2,op_w_prepare_latency.avgtime=0.000985542,op_w_prepare_latency.sum=0.001971084,op_w_process_latency.avgcount=2,op_w_process_latency.avgtime=0.038933264,op_w_process_latency.sum=0.077866529,op_wip=0,osd_map_bl_cache_hit=22,osd_map_bl_cache_miss=40,osd_map_cache_hit=4570,osd_map_cache_miss=15,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=2050,osd_pg_fastinfo=265780,osd_pg_info=274542,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=2,push_out_bytes=10,recovery_bytes=10,recovery_ops=2,stat_bytes=107369988096,stat_bytes_avail=106271539200,stat_bytes_used=1098448896,subop=253554,subop_in_bytes=168644225,subop_latency.avgcount=253554,subop_latency.avgtime=0.0073036,subop_latency.sum=1851.857230388,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=253554,subop_w_in_bytes=168644225,subop_w_latency.avgcount=253554,subop_w_latency.avgtime=0.0073036,subop_w_latency.sum=1851.857230388,tier_clean=0,tier_delay=0,tier_dirty=0,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=0,type=osd msgr_active_connections=2055,msgr_created_connections=27411,msgr_recv_bytes=6431950009,msgr_recv_messages=3552443,msgr_running_fast_dispatch_time=162.271664213,msgr_running_recv_time=254.307853033,msgr_running_send_time=503.037285799,msgr_running_total_time=1130.21070681,msgr_send_bytes=10865436237,msgr_send_messages=3523374 1587117698000000000
+> ceph,collection=bluestore,host=stefanosd1,id=0,type=osd bluestore_allocated=24641536,bluestore_blob_split=0,bluestore_blobs=88,bluestore_buffer_bytes=622592,bluestore_buffer_hit_bytes=160578,bluestore_buffer_miss_bytes=540236,bluestore_buffers=43,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=88,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=532102,bluestore_onode_misses=388,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=380,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1987856,bluestore_txc=275707,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=60,bluestore_write_small_bytes=343843,bluestore_write_small_deferred=22,bluestore_write_small_new=38,bluestore_write_small_pre_read=22,bluestore_write_small_unused=0,commit_lat.avgcount=275707,commit_lat.avgtime=0.00699778,commit_lat.sum=1929.337103334,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=67,csum_lat.avgtime=0.000032601,csum_lat.sum=0.002184323,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=268870,kv_commit_lat.avgtime=0.006365428,kv_commit_lat.sum=1711.472749866,kv_final_lat.avgcount=268867,kv_final_lat.avgtime=0.000043227,kv_final_lat.sum=11.622427109,kv_flush_lat.avgcount=268870,kv_flush_lat.avgtime=0.000000223,kv_flush_lat.sum=0.060141588,kv_sync_lat.avgcount=268870,kv_sync_lat.avgtime=0.006365652,kv_sync_lat.sum=1711.532891454,omap_lower_bound_lat.avgcount=2,omap_lower_bound_lat.avgtime=0.000006524,omap_lower_bound_lat.sum=0.000013048,omap_next_lat.avgcount=6704,omap_next_lat.avgtime=0.000004721,omap_next_lat.sum=0.031654097,omap_seek_to_first_lat.avgcount=323,omap_seek_to_first_lat.avgtime=0.00000522,omap_seek_to_first_lat.sum=0.00168614,omap_upper_bound_lat.avgcount=4,omap_upper_bound_lat.avgtime=0.000013086,omap_upper_bound_lat.sum=0.000052344,read_lat.avgcount=227,read_lat.avgtime=0.000699457,read_lat.sum=0.158776879,read_onode_meta_lat.avgcount=311,read_onode_meta_lat.avgtime=0.000072207,read_onode_meta_lat.sum=0.022456667,read_wait_aio_lat.avgcount=84,read_wait_aio_lat.avgtime=0.001556141,read_wait_aio_lat.sum=0.130715885,state_aio_wait_lat.avgcount=275707,state_aio_wait_lat.avgtime=0.000000345,state_aio_wait_lat.sum=0.095246457,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=275696,state_done_lat.avgtime=0.00000286,state_done_lat.sum=0.788700007,state_finishing_lat.avgcount=275696,state_finishing_lat.avgtime=0.000000302,state_finishing_lat.sum=0.083437168,state_io_done_lat.avgcount=275707,state_io_done_lat.avgtime=0.000001041,state_io_done_lat.sum=0.287025147,state_kv_commiting_lat.avgcount=275707,state_kv_commiting_lat.avgtime=0.006424459,state_kv_commiting_lat.sum=1771.268407864,state_kv_done_lat.avgcount=275707,state_kv_done_lat.avgtime=0.000001627,state_kv_done_lat.sum=0.448805853,state_kv_queued_lat.avgcount=275707,state_kv_queued_lat.avgtime=0.000488565,state_kv_queued_lat.sum=134.7009424,state_prepare_lat.avgcount=275707,state_prepare_lat.avgtime=0.000082464,state_prepare_lat.sum=22.736065534,submit_lat.avgcount=275707,submit_lat.avgtime=0.000120236,submit_lat.sum=33.149934412,throttle_lat.avgcount=275707,throttle_lat.avgtime=0.000001571,throttle_lat.sum=0.433185935,write_pad_bytes=151773,write_penalty_read_ops=0 1587117698000000000
+> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=0,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
+> ceph,collection=objecter,host=stefanosd1,id=0,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000
+> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=0,type=osd complete_latency.avgcount=11,complete_latency.avgtime=0.003447516,complete_latency.sum=0.037922681,queue_len=0 1587117698000000000
+> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=0,type=osd msgr_active_connections=2128,msgr_created_connections=33685,msgr_recv_bytes=8679123051,msgr_recv_messages=4200356,msgr_running_fast_dispatch_time=151.889337454,msgr_running_recv_time=297.632294886,msgr_running_send_time=599.20020523,msgr_running_total_time=1321.361931202,msgr_send_bytes=11716202897,msgr_send_messages=4347418 1587117698000000000
+> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=0,type=osd get=476554,get_or_fail_fail=0,get_or_fail_success=476554,get_started=0,get_sum=103413728,max=524288000,put=476587,put_sum=103413728,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=0,type=osd get=11,get_or_fail_fail=0,get_or_fail_success=11,get_started=0,get_sum=7723117,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7723117,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=1,type=osd get=860895,get_or_fail_fail=0,get_or_fail_success=860895,get_started=0,get_sum=596482256,max=104857600,put=860895,put_sum=596482256,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=1,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
+> ceph,collection=osd,host=stefanosd1,id=1,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=29,map_message_epochs=50,map_messages=39,messages_delayed_for_map=0,missed_crc=0,numpg=188,numpg_primary=71,numpg_removing=0,numpg_replica=117,numpg_stray=0,object_ctx_cache_hit=1349777,object_ctx_cache_total=2934118,op=1319230,op_before_dequeue_op_lat.avgcount=3792053,op_before_dequeue_op_lat.avgtime=0.000405802,op_before_dequeue_op_lat.sum=1538.826381623,op_before_queue_op_lat.avgcount=3778690,op_before_queue_op_lat.avgtime=0.000033273,op_before_queue_op_lat.sum=125.731131596,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=1319230,op_latency.avgtime=0.002858138,op_latency.sum=3770.541581676,op_out_bytes=1789210,op_prepare_latency.avgcount=1336472,op_prepare_latency.avgtime=0.000279458,op_prepare_latency.sum=373.488913339,op_process_latency.avgcount=1319230,op_process_latency.avgtime=0.002666408,op_process_latency.sum=3517.606407526,op_r=1075394,op_r_latency.avgcount=1075394,op_r_latency.avgtime=0.000303779,op_r_latency.sum=326.682443032,op_r_out_bytes=1789210,op_r_prepare_latency.avgcount=1075394,op_r_prepare_latency.avgtime=0.000171228,op_r_prepare_latency.sum=184.138580631,op_r_process_latency.avgcount=1075394,op_r_process_latency.avgtime=0.00011609,op_r_process_latency.sum=124.842894319,op_rw=243832,op_rw_in_bytes=0,op_rw_latency.avgcount=243832,op_rw_latency.avgtime=0.014123636,op_rw_latency.sum=3443.79445124,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=261072,op_rw_prepare_latency.avgtime=0.000725265,op_rw_prepare_latency.sum=189.346543463,op_rw_process_latency.avgcount=243832,op_rw_process_latency.avgtime=0.013914089,op_rw_process_latency.sum=3392.700241086,op_w=4,op_w_in_bytes=0,op_w_latency.avgcount=4,op_w_latency.avgtime=0.016171851,op_w_latency.sum=0.064687404,op_w_prepare_latency.avgcount=6,op_w_prepare_latency.avgtime=0.00063154,op_w_prepare_latency.sum=0.003789245,op_w_process_latency.avgcount=4,op_w_process_latency.avgtime=0.01581803,op_w_process_latency.sum=0.063272121,op_wip=0,osd_map_bl_cache_hit=36,osd_map_bl_cache_miss=40,osd_map_cache_hit=5404,osd_map_cache_miss=14,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=2333,osd_pg_fastinfo=576157,osd_pg_info=591751,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=22,push_out_bytes=0,recovery_bytes=0,recovery_ops=21,stat_bytes=107369988096,stat_bytes_avail=106271997952,stat_bytes_used=1097990144,subop=306946,subop_in_bytes=204236742,subop_latency.avgcount=306946,subop_latency.avgtime=0.006744881,subop_latency.sum=2070.314452989,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=306946,subop_w_in_bytes=204236742,subop_w_latency.avgcount=306946,subop_w_latency.avgtime=0.006744881,subop_w_latency.sum=2070.314452989,tier_clean=0,tier_delay=0,tier_dirty=8,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000
+> ceph,collection=objecter,host=stefanosd1,id=1,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=1,type=osd msgr_active_connections=1356,msgr_created_connections=12290,msgr_recv_bytes=8577187219,msgr_recv_messages=6387040,msgr_running_fast_dispatch_time=475.903632306,msgr_running_recv_time=425.937196699,msgr_running_send_time=783.676217521,msgr_running_total_time=1989.242459076,msgr_send_bytes=12583034449,msgr_send_messages=6074344 1587117698000000000
+> ceph,collection=bluestore,host=stefanosd1,id=1,type=osd bluestore_allocated=24182784,bluestore_blob_split=0,bluestore_blobs=88,bluestore_buffer_bytes=614400,bluestore_buffer_hit_bytes=142047,bluestore_buffer_miss_bytes=541480,bluestore_buffers=41,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=88,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=1403948,bluestore_onode_misses=1584732,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=459,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1985647,bluestore_txc=593150,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=58,bluestore_write_small_bytes=343091,bluestore_write_small_deferred=20,bluestore_write_small_new=38,bluestore_write_small_pre_read=20,bluestore_write_small_unused=0,commit_lat.avgcount=593150,commit_lat.avgtime=0.006514834,commit_lat.sum=3864.274280733,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=60,csum_lat.avgtime=0.000028258,csum_lat.sum=0.001695512,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=578129,kv_commit_lat.avgtime=0.00570707,kv_commit_lat.sum=3299.423186928,kv_final_lat.avgcount=578124,kv_final_lat.avgtime=0.000042752,kv_final_lat.sum=24.716171934,kv_flush_lat.avgcount=578129,kv_flush_lat.avgtime=0.000000209,kv_flush_lat.sum=0.121169044,kv_sync_lat.avgcount=578129,kv_sync_lat.avgtime=0.00570728,kv_sync_lat.sum=3299.544355972,omap_lower_bound_lat.avgcount=22,omap_lower_bound_lat.avgtime=0.000005979,omap_lower_bound_lat.sum=0.000131539,omap_next_lat.avgcount=13248,omap_next_lat.avgtime=0.000004836,omap_next_lat.sum=0.064077797,omap_seek_to_first_lat.avgcount=525,omap_seek_to_first_lat.avgtime=0.000004906,omap_seek_to_first_lat.sum=0.002575786,omap_upper_bound_lat.avgcount=0,omap_upper_bound_lat.avgtime=0,omap_upper_bound_lat.sum=0,read_lat.avgcount=406,read_lat.avgtime=0.000383254,read_lat.sum=0.155601529,read_onode_meta_lat.avgcount=483,read_onode_meta_lat.avgtime=0.000008805,read_onode_meta_lat.sum=0.004252832,read_wait_aio_lat.avgcount=77,read_wait_aio_lat.avgtime=0.001907361,read_wait_aio_lat.sum=0.146866799,state_aio_wait_lat.avgcount=593150,state_aio_wait_lat.avgtime=0.000000388,state_aio_wait_lat.sum=0.230498048,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=593140,state_done_lat.avgtime=0.000003048,state_done_lat.sum=1.80789161,state_finishing_lat.avgcount=593140,state_finishing_lat.avgtime=0.000000325,state_finishing_lat.sum=0.192952339,state_io_done_lat.avgcount=593150,state_io_done_lat.avgtime=0.000001202,state_io_done_lat.sum=0.713333116,state_kv_commiting_lat.avgcount=593150,state_kv_commiting_lat.avgtime=0.005788541,state_kv_commiting_lat.sum=3433.473378536,state_kv_done_lat.avgcount=593150,state_kv_done_lat.avgtime=0.000001472,state_kv_done_lat.sum=0.873559611,state_kv_queued_lat.avgcount=593150,state_kv_queued_lat.avgtime=0.000634215,state_kv_queued_lat.sum=376.18491577,state_prepare_lat.avgcount=593150,state_prepare_lat.avgtime=0.000089694,state_prepare_lat.sum=53.202464675,submit_lat.avgcount=593150,submit_lat.avgtime=0.000127856,submit_lat.sum=75.83816759,throttle_lat.avgcount=593150,throttle_lat.avgtime=0.000001726,throttle_lat.sum=1.023832181,write_pad_bytes=144333,write_penalty_read_ops=0 1587117698000000000
+> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=1,type=osd get=2920772,get_or_fail_fail=0,get_or_fail_success=2920772,get_started=0,get_sum=739935873,max=524288000,put=4888498,put_sum=739935873,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=1,type=osd get=2605442,get_or_fail_fail=0,get_or_fail_success=2605442,get_started=0,get_sum=5221305768,max=104857600,put=2605442,put_sum=5221305768,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=1,type=osd msgr_active_connections=1375,msgr_created_connections=12689,msgr_recv_bytes=6393440855,msgr_recv_messages=3260458,msgr_running_fast_dispatch_time=120.622437418,msgr_running_recv_time=225.24709441,msgr_running_send_time=499.150587343,msgr_running_total_time=1043.340296846,msgr_send_bytes=11134862571,msgr_send_messages=3450760 1587117698000000000
+> ceph,collection=bluefs,host=stefanosd1,id=1,type=osd bytes_written_slow=0,bytes_written_sst=19824993,bytes_written_wal=1788507023,db_total_bytes=4294967296,db_used_bytes=522190848,files_written_sst=4,files_written_wal=2,gift_bytes=0,log_bytes=1056768,log_compactions=2,logged_bytes=1933271040,max_bytes_db=1483735040,max_bytes_slow=0,max_bytes_wal=0,num_files=12,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=1,type=osd get=2605442,get_or_fail_fail=0,get_or_fail_success=2605442,get_started=0,get_sum=5221305768,max=104857600,put=2605442,put_sum=5221305768,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=1,type=osd get=10,get_or_fail_fail=0,get_or_fail_success=10,get_started=0,get_sum=7052009,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7052009,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=rocksdb,host=stefanosd1,id=1,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1586061,get_latency.avgcount=1586061,get_latency.avgtime=0.000083009,get_latency.sum=131.658296684,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=593150,submit_latency.avgtime=0.000172072,submit_latency.sum=102.064900673,submit_sync_latency.avgcount=578129,submit_sync_latency.avgtime=0.005447017,submit_sync_latency.sum=3149.078822012,submit_transaction=593150,submit_transaction_sync=578129 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=1,type=osd get=2607669,get_or_fail_fail=0,get_or_fail_success=2607669,get_started=0,get_sum=5225768676,max=104857600,put=2607669,put_sum=5225768676,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=recoverystate_perf,host=stefanosd1,id=1,type=osd activating_latency.avgcount=104,activating_latency.avgtime=0.071646485,activating_latency.sum=7.451234493,active_latency.avgcount=33,active_latency.avgtime=1734.369034268,active_latency.sum=57234.178130859,backfilling_latency.avgcount=1,backfilling_latency.avgtime=2.598401698,backfilling_latency.sum=2.598401698,clean_latency.avgcount=33,clean_latency.avgtime=1734.213467342,clean_latency.sum=57229.044422292,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=167,getinfo_latency.avgtime=0.373444627,getinfo_latency.sum=62.365252849,getlog_latency.avgcount=105,getlog_latency.avgtime=0.003575062,getlog_latency.sum=0.375381569,getmissing_latency.avgcount=104,getmissing_latency.avgtime=0.000157091,getmissing_latency.sum=0.016337565,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=188,initial_latency.avgtime=0.001833512,initial_latency.sum=0.344700343,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=167,peering_latency.avgtime=1.501818082,peering_latency.sum=250.803619796,primary_latency.avgcount=97,primary_latency.avgtime=591.344286378,primary_latency.sum=57360.395778762,recovered_latency.avgcount=104,recovered_latency.avgtime=0.000291138,recovered_latency.sum=0.030278433,recovering_latency.avgcount=2,recovering_latency.avgtime=0.142378096,recovering_latency.sum=0.284756192,replicaactive_latency.avgcount=32,replicaactive_latency.avgtime=1788.474901442,replicaactive_latency.sum=57231.196846165,repnotrecovering_latency.avgcount=34,repnotrecovering_latency.avgtime=1683.273587087,repnotrecovering_latency.sum=57231.301960987,reprecovering_latency.avgcount=2,reprecovering_latency.avgtime=0.418094818,reprecovering_latency.sum=0.836189637,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=2,repwaitrecoveryreserved_latency.avgtime=0.000588413,repwaitrecoveryreserved_latency.sum=0.001176827,reset_latency.avgcount=433,reset_latency.avgtime=0.15669689,reset_latency.sum=67.849753631,start_latency.avgcount=433,start_latency.avgtime=0.000412707,start_latency.sum=0.178702508,started_latency.avgcount=245,started_latency.avgtime=468.419544137,started_latency.sum=114762.788313581,stray_latency.avgcount=266,stray_latency.avgtime=1.489291271,stray_latency.sum=396.151478238,waitactingchange_latency.avgcount=1,waitactingchange_latency.avgtime=0.982689906,waitactingchange_latency.sum=0.982689906,waitlocalbackfillreserved_latency.avgcount=1,waitlocalbackfillreserved_latency.avgtime=0.000542092,waitlocalbackfillreserved_latency.sum=0.000542092,waitlocalrecoveryreserved_latency.avgcount=2,waitlocalrecoveryreserved_latency.avgtime=0.00391669,waitlocalrecoveryreserved_latency.sum=0.007833381,waitremotebackfillreserved_latency.avgcount=1,waitremotebackfillreserved_latency.avgtime=0.003110409,waitremotebackfillreserved_latency.sum=0.003110409,waitremoterecoveryreserved_latency.avgcount=2,waitremoterecoveryreserved_latency.avgtime=0.012229338,waitremoterecoveryreserved_latency.sum=0.024458677,waitupthru_latency.avgcount=104,waitupthru_latency.avgtime=1.807608905,waitupthru_latency.sum=187.991326197 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=1,type=osd msgr_active_connections=1289,msgr_created_connections=9469,msgr_recv_bytes=8348149800,msgr_recv_messages=5048791,msgr_running_fast_dispatch_time=313.754567889,msgr_running_recv_time=372.054833029,msgr_running_send_time=694.900405016,msgr_running_total_time=1656.294769387,msgr_send_bytes=11550148208,msgr_send_messages=5175962 1587117698000000000
+> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=1,type=osd get=593150,get_or_fail_fail=0,get_or_fail_success=0,get_started=593150,get_sum=398147414260,max=67108864,put=578129,put_sum=398147414260,take=0,take_sum=0,val=0,wait.avgcount=29,wait.avgtime=0.000972655,wait.sum=0.028207005 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=cct,host=stefanosd1,id=1,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000
+> ceph,collection=mempool,host=stefanosd1,id=1,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=13064,bluefs_items=593,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=614400,bluestore_cache_data_items=41,bluestore_cache_onode_bytes=301104,bluestore_cache_onode_items=459,bluestore_cache_other_bytes=230945,bluestore_cache_other_items=26119,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=7520,bluestore_txc_items=10,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=657768,bluestore_writing_deferred_items=172,bluestore_writing_items=0,buffer_anon_bytes=2328515,buffer_anon_items=271,buffer_meta_bytes=5808,buffer_meta_items=66,mds_co_bytes=0,mds_co_items=0,osd_bytes=2406400,osd_items=188,osd_mapbl_bytes=139623,osd_mapbl_items=9,osd_pglog_bytes=6768784,osd_pglog_items=18179,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=1,type=osd get=2932513,get_or_fail_fail=0,get_or_fail_success=2932513,get_started=0,get_sum=740620215,max=104857600,put=2932513,put_sum=740620215,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=1,type=osd get=2607669,get_or_fail_fail=0,get_or_fail_success=2607669,get_started=0,get_sum=5225768676,max=104857600,put=2607669,put_sum=5225768676,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=1,type=osd complete_latency.avgcount=10,complete_latency.avgtime=0.002884646,complete_latency.sum=0.028846469,queue_len=0 1587117698000000000
+> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=1,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
+> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=2,type=osd complete_latency.avgcount=11,complete_latency.avgtime=0.002714416,complete_latency.sum=0.029858583,queue_len=0 1587117698000000000
+> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=2,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
+> ceph,collection=objecter,host=stefanosd1,id=2,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=2,type=osd get=2607136,get_or_fail_fail=0,get_or_fail_success=2607136,get_started=0,get_sum=5224700544,max=104857600,put=2607136,put_sum=5224700544,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=mempool,host=stefanosd1,id=2,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=11624,bluefs_items=522,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=614400,bluestore_cache_data_items=41,bluestore_cache_onode_bytes=228288,bluestore_cache_onode_items=348,bluestore_cache_other_bytes=174158,bluestore_cache_other_items=18527,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=8272,bluestore_txc_items=11,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=670130,bluestore_writing_deferred_items=176,bluestore_writing_items=0,buffer_anon_bytes=2311664,buffer_anon_items=244,buffer_meta_bytes=5456,buffer_meta_items=62,mds_co_bytes=0,mds_co_items=0,osd_bytes=1920000,osd_items=150,osd_mapbl_bytes=155152,osd_mapbl_items=10,osd_pglog_bytes=3393520,osd_pglog_items=9128,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000
+> ceph,collection=osd,host=stefanosd1,id=2,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=37,map_message_epochs=56,map_messages=37,messages_delayed_for_map=0,missed_crc=0,numpg=150,numpg_primary=59,numpg_removing=0,numpg_replica=91,numpg_stray=0,object_ctx_cache_hit=705923,object_ctx_cache_total=705951,op=690584,op_before_dequeue_op_lat.avgcount=1155697,op_before_dequeue_op_lat.avgtime=0.000217926,op_before_dequeue_op_lat.sum=251.856487141,op_before_queue_op_lat.avgcount=1148445,op_before_queue_op_lat.avgtime=0.000039696,op_before_queue_op_lat.sum=45.589516462,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=690584,op_latency.avgtime=0.002488685,op_latency.sum=1718.646504654,op_out_bytes=1026000,op_prepare_latency.avgcount=698700,op_prepare_latency.avgtime=0.000300375,op_prepare_latency.sum=209.872029659,op_process_latency.avgcount=690584,op_process_latency.avgtime=0.00230742,op_process_latency.sum=1593.46739165,op_r=548020,op_r_latency.avgcount=548020,op_r_latency.avgtime=0.000298287,op_r_latency.sum=163.467760649,op_r_out_bytes=1026000,op_r_prepare_latency.avgcount=548020,op_r_prepare_latency.avgtime=0.000186359,op_r_prepare_latency.sum=102.128629183,op_r_process_latency.avgcount=548020,op_r_process_latency.avgtime=0.00012716,op_r_process_latency.sum=69.686468884,op_rw=142562,op_rw_in_bytes=0,op_rw_latency.avgcount=142562,op_rw_latency.avgtime=0.010908597,op_rw_latency.sum=1555.151525732,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=150678,op_rw_prepare_latency.avgtime=0.000715043,op_rw_prepare_latency.sum=107.741399304,op_rw_process_latency.avgcount=142562,op_rw_process_latency.avgtime=0.01068836,op_rw_process_latency.sum=1523.754107887,op_w=2,op_w_in_bytes=0,op_w_latency.avgcount=2,op_w_latency.avgtime=0.013609136,op_w_latency.sum=0.027218273,op_w_prepare_latency.avgcount=2,op_w_prepare_latency.avgtime=0.001000586,op_w_prepare_latency.sum=0.002001172,op_w_process_latency.avgcount=2,op_w_process_latency.avgtime=0.013407439,op_w_process_latency.sum=0.026814879,op_wip=0,osd_map_bl_cache_hit=15,osd_map_bl_cache_miss=41,osd_map_cache_hit=4241,osd_map_cache_miss=14,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=1824,osd_pg_fastinfo=285998,osd_pg_info=294869,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=1,push_out_bytes=0,recovery_bytes=0,recovery_ops=0,stat_bytes=107369988096,stat_bytes_avail=106271932416,stat_bytes_used=1098055680,subop=134165,subop_in_bytes=89501237,subop_latency.avgcount=134165,subop_latency.avgtime=0.007313523,subop_latency.sum=981.218888627,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=134165,subop_w_in_bytes=89501237,subop_w_latency.avgcount=134165,subop_w_latency.avgtime=0.007313523,subop_w_latency.sum=981.218888627,tier_clean=0,tier_delay=0,tier_dirty=4,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=2,type=osd msgr_active_connections=746,msgr_created_connections=15212,msgr_recv_bytes=8633229006,msgr_recv_messages=4284202,msgr_running_fast_dispatch_time=153.820479102,msgr_running_recv_time=282.031655658,msgr_running_send_time=585.444749736,msgr_running_total_time=1231.431789242,msgr_send_bytes=11962769351,msgr_send_messages=4440622 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=2,type=osd get=2607136,get_or_fail_fail=0,get_or_fail_success=2607136,get_started=0,get_sum=5224700544,max=104857600,put=2607136,put_sum=5224700544,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=bluefs,host=stefanosd1,id=2,type=osd bytes_written_slow=0,bytes_written_sst=9065815,bytes_written_wal=901884611,db_total_bytes=4294967296,db_used_bytes=546308096,files_written_sst=3,files_written_wal=2,gift_bytes=0,log_bytes=225726464,log_compactions=1,logged_bytes=1195945984,max_bytes_db=1234173952,max_bytes_slow=0,max_bytes_wal=0,num_files=11,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000
+> ceph,collection=recoverystate_perf,host=stefanosd1,id=2,type=osd activating_latency.avgcount=88,activating_latency.avgtime=0.086149065,activating_latency.sum=7.581117751,active_latency.avgcount=29,active_latency.avgtime=1790.849396082,active_latency.sum=51934.632486379,backfilling_latency.avgcount=0,backfilling_latency.avgtime=0,backfilling_latency.sum=0,clean_latency.avgcount=29,clean_latency.avgtime=1790.754765195,clean_latency.sum=51931.888190683,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=134,getinfo_latency.avgtime=0.427567953,getinfo_latency.sum=57.294105786,getlog_latency.avgcount=88,getlog_latency.avgtime=0.011810192,getlog_latency.sum=1.03929697,getmissing_latency.avgcount=88,getmissing_latency.avgtime=0.000104598,getmissing_latency.sum=0.009204673,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=150,initial_latency.avgtime=0.001251361,initial_latency.sum=0.187704197,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=134,peering_latency.avgtime=0.998405763,peering_latency.sum=133.786372331,primary_latency.avgcount=75,primary_latency.avgtime=693.473306562,primary_latency.sum=52010.497992212,recovered_latency.avgcount=88,recovered_latency.avgtime=0.000609715,recovered_latency.sum=0.053654964,recovering_latency.avgcount=1,recovering_latency.avgtime=0.100713031,recovering_latency.sum=0.100713031,replicaactive_latency.avgcount=21,replicaactive_latency.avgtime=1790.852354921,replicaactive_latency.sum=37607.89945336,repnotrecovering_latency.avgcount=21,repnotrecovering_latency.avgtime=1790.852315529,repnotrecovering_latency.sum=37607.898626121,reprecovering_latency.avgcount=0,reprecovering_latency.avgtime=0,reprecovering_latency.sum=0,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=0,repwaitrecoveryreserved_latency.avgtime=0,repwaitrecoveryreserved_latency.sum=0,reset_latency.avgcount=346,reset_latency.avgtime=0.126826803,reset_latency.sum=43.882073917,start_latency.avgcount=346,start_latency.avgtime=0.000233277,start_latency.sum=0.080713962,started_latency.avgcount=196,started_latency.avgtime=457.885378797,started_latency.sum=89745.534244237,stray_latency.avgcount=212,stray_latency.avgtime=1.013774396,stray_latency.sum=214.920172121,waitactingchange_latency.avgcount=0,waitactingchange_latency.avgtime=0,waitactingchange_latency.sum=0,waitlocalbackfillreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.avgtime=0,waitlocalbackfillreserved_latency.sum=0,waitlocalrecoveryreserved_latency.avgcount=1,waitlocalrecoveryreserved_latency.avgtime=0.001572379,waitlocalrecoveryreserved_latency.sum=0.001572379,waitremotebackfillreserved_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,waitremotebackfillreserved_latency.sum=0,waitremoterecoveryreserved_latency.avgcount=1,waitremoterecoveryreserved_latency.avgtime=0.012729633,waitremoterecoveryreserved_latency.sum=0.012729633,waitupthru_latency.avgcount=88,waitupthru_latency.avgtime=0.857137729,waitupthru_latency.sum=75.428120205 1587117698000000000
+> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=bluestore,host=stefanosd1,id=2,type=osd bluestore_allocated=24248320,bluestore_blob_split=0,bluestore_blobs=83,bluestore_buffer_bytes=614400,bluestore_buffer_hit_bytes=161362,bluestore_buffer_miss_bytes=534799,bluestore_buffers=41,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=83,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=723852,bluestore_onode_misses=364,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=348,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1984402,bluestore_txc=295997,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=60,bluestore_write_small_bytes=343843,bluestore_write_small_deferred=22,bluestore_write_small_new=38,bluestore_write_small_pre_read=22,bluestore_write_small_unused=0,commit_lat.avgcount=295997,commit_lat.avgtime=0.006994931,commit_lat.sum=2070.478673619,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=47,csum_lat.avgtime=0.000034434,csum_lat.sum=0.001618423,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=291889,kv_commit_lat.avgtime=0.006347015,kv_commit_lat.sum=1852.624108527,kv_final_lat.avgcount=291885,kv_final_lat.avgtime=0.00004358,kv_final_lat.sum=12.720529751,kv_flush_lat.avgcount=291889,kv_flush_lat.avgtime=0.000000211,kv_flush_lat.sum=0.061636079,kv_sync_lat.avgcount=291889,kv_sync_lat.avgtime=0.006347227,kv_sync_lat.sum=1852.685744606,omap_lower_bound_lat.avgcount=1,omap_lower_bound_lat.avgtime=0.000004482,omap_lower_bound_lat.sum=0.000004482,omap_next_lat.avgcount=6933,omap_next_lat.avgtime=0.000003956,omap_next_lat.sum=0.027427456,omap_seek_to_first_lat.avgcount=309,omap_seek_to_first_lat.avgtime=0.000005879,omap_seek_to_first_lat.sum=0.001816658,omap_upper_bound_lat.avgcount=0,omap_upper_bound_lat.avgtime=0,omap_upper_bound_lat.sum=0,read_lat.avgcount=229,read_lat.avgtime=0.000394981,read_lat.sum=0.090450704,read_onode_meta_lat.avgcount=295,read_onode_meta_lat.avgtime=0.000016832,read_onode_meta_lat.sum=0.004965516,read_wait_aio_lat.avgcount=66,read_wait_aio_lat.avgtime=0.001237841,read_wait_aio_lat.sum=0.081697561,state_aio_wait_lat.avgcount=295997,state_aio_wait_lat.avgtime=0.000000357,state_aio_wait_lat.sum=0.105827433,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=295986,state_done_lat.avgtime=0.000003017,state_done_lat.sum=0.893199127,state_finishing_lat.avgcount=295986,state_finishing_lat.avgtime=0.000000306,state_finishing_lat.sum=0.090792683,state_io_done_lat.avgcount=295997,state_io_done_lat.avgtime=0.000001066,state_io_done_lat.sum=0.315577655,state_kv_commiting_lat.avgcount=295997,state_kv_commiting_lat.avgtime=0.006423586,state_kv_commiting_lat.sum=1901.362268572,state_kv_done_lat.avgcount=295997,state_kv_done_lat.avgtime=0.00000155,state_kv_done_lat.sum=0.458963064,state_kv_queued_lat.avgcount=295997,state_kv_queued_lat.avgtime=0.000477234,state_kv_queued_lat.sum=141.260101773,state_prepare_lat.avgcount=295997,state_prepare_lat.avgtime=0.000091806,state_prepare_lat.sum=27.174436583,submit_lat.avgcount=295997,submit_lat.avgtime=0.000135729,submit_lat.sum=40.17557682,throttle_lat.avgcount=295997,throttle_lat.avgtime=0.000002734,throttle_lat.sum=0.809479837,write_pad_bytes=151773,write_penalty_read_ops=0 1587117698000000000
+> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=2,type=osd get=295997,get_or_fail_fail=0,get_or_fail_success=0,get_started=295997,get_sum=198686579299,max=67108864,put=291889,put_sum=198686579299,take=0,take_sum=0,val=0,wait.avgcount=83,wait.avgtime=0.003670612,wait.sum=0.304660858 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=2,type=osd get=452060,get_or_fail_fail=0,get_or_fail_success=452060,get_started=0,get_sum=269934345,max=104857600,put=452060,put_sum=269934345,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=2,type=osd get=11,get_or_fail_fail=0,get_or_fail_success=11,get_started=0,get_sum=7723117,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7723117,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=2,type=osd get=2607433,get_or_fail_fail=0,get_or_fail_success=2607433,get_started=0,get_sum=5225295732,max=104857600,put=2607433,put_sum=5225295732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=2,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
+> ceph,collection=cct,host=stefanosd1,id=2,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=2,type=osd msgr_active_connections=670,msgr_created_connections=13455,msgr_recv_bytes=6334605563,msgr_recv_messages=3287843,msgr_running_fast_dispatch_time=137.016615819,msgr_running_recv_time=240.687997039,msgr_running_send_time=471.710658466,msgr_running_total_time=1034.029109337,msgr_send_bytes=9753423475,msgr_send_messages=3439611 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=2,type=osd get=710355,get_or_fail_fail=0,get_or_fail_success=710355,get_started=0,get_sum=166306283,max=104857600,put=710355,put_sum=166306283,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=2,type=osd get=2607433,get_or_fail_fail=0,get_or_fail_success=2607433,get_started=0,get_sum=5225295732,max=104857600,put=2607433,put_sum=5225295732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=2,type=osd msgr_active_connections=705,msgr_created_connections=17953,msgr_recv_bytes=7261438733,msgr_recv_messages=4496034,msgr_running_fast_dispatch_time=254.716476808,msgr_running_recv_time=272.196741555,msgr_running_send_time=571.102924903,msgr_running_total_time=1338.461077493,msgr_send_bytes=10772250508,msgr_send_messages=4192781 1587117698000000000
+> ceph,collection=rocksdb,host=stefanosd1,id=2,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1424,get_latency.avgcount=1424,get_latency.avgtime=0.000030752,get_latency.sum=0.043792142,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=295997,submit_latency.avgtime=0.000173137,submit_latency.sum=51.248072285,submit_sync_latency.avgcount=291889,submit_sync_latency.avgtime=0.006094397,submit_sync_latency.sum=1778.887521449,submit_transaction=295997,submit_transaction_sync=291889 1587117698000000000
+> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=2,type=osd get=698701,get_or_fail_fail=0,get_or_fail_success=698701,get_started=0,get_sum=165630172,max=524288000,put=920880,put_sum=165630172,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
+> ceph,collection=mds_sessions,host=stefanmds1,id=stefanmds1,type=mds average_load=0,avg_session_uptime=0,session_add=0,session_count=0,session_remove=0,sessions_open=0,sessions_stale=0,total_load=0 1587117476000000000
+> ceph,collection=mempool,host=stefanmds1,id=stefanmds1,type=mds bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=132069,buffer_anon_items=82,buffer_meta_bytes=0,buffer_meta_items=0,mds_co_bytes=44208,mds_co_items=154,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=16952,osdmap_items=139,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117476000000000
+> ceph,collection=objecter,host=stefanmds1,id=stefanmds1,type=mds command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=1,omap_del=0,omap_rd=28,omap_wr=1,op=33,op_active=0,op_laggy=0,op_pg=0,op_r=26,op_reply=33,op_resend=2,op_rmw=0,op_send=35,op_send_bytes=364,op_w=7,osd_laggy=0,osd_session_close=91462,osd_session_open=91468,osd_sessions=6,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=5,osdop_getxattr=14,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=8,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=2,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=1,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117476000000000
+> ceph,collection=cct,host=stefanmds1,id=stefanmds1,type=mds total_workers=1,unhealthy_workers=0 1587117476000000000
+> ceph,collection=mds_server,host=stefanmds1,id=stefanmds1,type=mds cap_revoke_eviction=0,dispatch_client_request=0,dispatch_server_request=0,handle_client_request=0,handle_client_session=0,handle_slave_request=0,req_create_latency.avgcount=0,req_create_latency.avgtime=0,req_create_latency.sum=0,req_getattr_latency.avgcount=0,req_getattr_latency.avgtime=0,req_getattr_latency.sum=0,req_getfilelock_latency.avgcount=0,req_getfilelock_latency.avgtime=0,req_getfilelock_latency.sum=0,req_link_latency.avgcount=0,req_link_latency.avgtime=0,req_link_latency.sum=0,req_lookup_latency.avgcount=0,req_lookup_latency.avgtime=0,req_lookup_latency.sum=0,req_lookuphash_latency.avgcount=0,req_lookuphash_latency.avgtime=0,req_lookuphash_latency.sum=0,req_lookupino_latency.avgcount=0,req_lookupino_latency.avgtime=0,req_lookupino_latency.sum=0,req_lookupname_latency.avgcount=0,req_lookupname_latency.avgtime=0,req_lookupname_latency.sum=0,req_lookupparent_latency.avgcount=0,req_lookupparent_latency.avgtime=0,req_lookupparent_latency.sum=0,req_lookupsnap_latency.avgcount=0,req_lookupsnap_latency.avgtime=0,req_lookupsnap_latency.sum=0,req_lssnap_latency.avgcount=0,req_lssnap_latency.avgtime=0,req_lssnap_latency.sum=0,req_mkdir_latency.avgcount=0,req_mkdir_latency.avgtime=0,req_mkdir_latency.sum=0,req_mknod_latency.avgcount=0,req_mknod_latency.avgtime=0,req_mknod_latency.sum=0,req_mksnap_latency.avgcount=0,req_mksnap_latency.avgtime=0,req_mksnap_latency.sum=0,req_open_latency.avgcount=0,req_open_latency.avgtime=0,req_open_latency.sum=0,req_readdir_latency.avgcount=0,req_readdir_latency.avgtime=0,req_readdir_latency.sum=0,req_rename_latency.avgcount=0,req_rename_latency.avgtime=0,req_rename_latency.sum=0,req_renamesnap_latency.avgcount=0,req_renamesnap_latency.avgtime=0,req_renamesnap_latency.sum=0,req_rmdir_latency.avgcount=0,req_rmdir_latency.avgtime=0,req_rmdir_latency.sum=0,req_rmsnap_latency.avgcount=0,req_rmsnap_latency.avgtime=0,req_rmsnap_latency.sum=0,req_rmxattr_latency.avgcount=0,req_rmxattr_latency.avgtime=0,req_rmxattr_latency.sum=0,req_setattr_latency.avgcount=0,req_setattr_latency.avgtime=0,req_setattr_latency.sum=0,req_setdirlayout_latency.avgcount=0,req_setdirlayout_latency.avgtime=0,req_setdirlayout_latency.sum=0,req_setfilelock_latency.avgcount=0,req_setfilelock_latency.avgtime=0,req_setfilelock_latency.sum=0,req_setlayout_latency.avgcount=0,req_setlayout_latency.avgtime=0,req_setlayout_latency.sum=0,req_setxattr_latency.avgcount=0,req_setxattr_latency.avgtime=0,req_setxattr_latency.sum=0,req_symlink_latency.avgcount=0,req_symlink_latency.avgtime=0,req_symlink_latency.sum=0,req_unlink_latency.avgcount=0,req_unlink_latency.avgtime=0,req_unlink_latency.sum=0 1587117476000000000
+> ceph,collection=AsyncMessenger::Worker-2,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=84,msgr_created_connections=68511,msgr_recv_bytes=238078,msgr_recv_messages=2655,msgr_running_fast_dispatch_time=0.004247777,msgr_running_recv_time=25.369012545,msgr_running_send_time=3.743427461,msgr_running_total_time=130.277111559,msgr_send_bytes=172767043,msgr_send_messages=18172 1587117476000000000
+> ceph,collection=mds_log,host=stefanmds1,id=stefanmds1,type=mds ev=0,evadd=0,evex=0,evexd=0,evexg=0,evtrm=0,expos=4194304,jlat.avgcount=0,jlat.avgtime=0,jlat.sum=0,rdpos=4194304,replayed=1,seg=1,segadd=0,segex=0,segexd=0,segexg=0,segtrm=0,wrpos=0 1587117476000000000
+> ceph,collection=AsyncMessenger::Worker-0,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=595,msgr_created_connections=943825,msgr_recv_bytes=78618003,msgr_recv_messages=914080,msgr_running_fast_dispatch_time=0.001544386,msgr_running_recv_time=459.627068807,msgr_running_send_time=469.337032316,msgr_running_total_time=2744.084305898,msgr_send_bytes=61684163658,msgr_send_messages=1858008 1587117476000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-mds,host=stefanmds1,id=stefanmds1,type=mds get=1216458,get_or_fail_fail=0,get_or_fail_success=1216458,get_started=0,get_sum=51976882,max=104857600,put=1216458,put_sum=51976882,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
+> ceph,collection=AsyncMessenger::Worker-1,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=226,msgr_created_connections=42679,msgr_recv_bytes=63140151,msgr_recv_messages=299727,msgr_running_fast_dispatch_time=26.316138629,msgr_running_recv_time=36.969916165,msgr_running_send_time=70.457421128,msgr_running_total_time=226.230019936,msgr_send_bytes=193154464,msgr_send_messages=310481 1587117476000000000
+> ceph,collection=mds,host=stefanmds1,id=stefanmds1,type=mds caps=0,dir_commit=0,dir_fetch=12,dir_merge=0,dir_split=0,exported=0,exported_inodes=0,forward=0,imported=0,imported_inodes=0,inode_max=2147483647,inodes=10,inodes_bottom=3,inodes_expired=0,inodes_pin_tail=0,inodes_pinned=10,inodes_top=7,inodes_with_caps=0,load_cent=0,openino_backtrace_fetch=0,openino_dir_fetch=0,openino_peer_discover=0,q=0,reply=0,reply_latency.avgcount=0,reply_latency.avgtime=0,reply_latency.sum=0,request=0,subtrees=2,traverse=0,traverse_dir_fetch=0,traverse_discover=0,traverse_forward=0,traverse_hit=0,traverse_lock=0,traverse_remote_ino=0 1587117476000000000
+> ceph,collection=purge_queue,host=stefanmds1,id=stefanmds1,type=mds pq_executed=0,pq_executing=0,pq_executing_ops=0 1587117476000000000
+> ceph,collection=throttle-write_buf_throttle,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=3758096384,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
+> ceph,collection=throttle-write_buf_throttle-0x5624e9377f40,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=3758096384,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
+> ceph,collection=mds_cache,host=stefanmds1,id=stefanmds1,type=mds ireq_enqueue_scrub=0,ireq_exportdir=0,ireq_flush=0,ireq_fragmentdir=0,ireq_fragstats=0,ireq_inodestats=0,num_recovering_enqueued=0,num_recovering_prioritized=0,num_recovering_processing=0,num_strays=0,num_strays_delayed=0,num_strays_enqueuing=0,recovery_completed=0,recovery_started=0,strays_created=0,strays_enqueued=0,strays_migrated=0,strays_reintegrated=0 1587117476000000000
+> ceph,collection=throttle-objecter_bytes,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=16,put_sum=1016,take=33,take_sum=1016,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
+> ceph,collection=throttle-objecter_ops,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=33,put_sum=33,take=33,take_sum=33,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
+> ceph,collection=mds_mem,host=stefanmds1,id=stefanmds1,type=mds cap=0,cap+=0,cap-=0,dir=12,dir+=12,dir-=0,dn=10,dn+=10,dn-=0,heap=322284,ino=13,ino+=13,ino-=0,rss=76032 1587117476000000000
+> ceph,collection=finisher-PurgeQueue,host=stefanmds1,id=stefanmds1,type=mds complete_latency.avgcount=4,complete_latency.avgtime=0.000176985,complete_latency.sum=0.000707941,queue_len=0 1587117476000000000
+> ceph,collection=cct,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw total_workers=0,unhealthy_workers=0 1587117156000000000
+> ceph,collection=throttle-objecter_bytes,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=791732,get_or_fail_fail=0,get_or_fail_success=791732,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
+> ceph,collection=rgw,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw cache_hit=0,cache_miss=791706,failed_req=0,get=0,get_b=0,get_initial_lat.avgcount=0,get_initial_lat.avgtime=0,get_initial_lat.sum=0,keystone_token_cache_hit=0,keystone_token_cache_miss=0,pubsub_event_lost=0,pubsub_event_triggered=0,pubsub_events=0,pubsub_push_failed=0,pubsub_push_ok=0,pubsub_push_pending=0,pubsub_store_fail=0,pubsub_store_ok=0,put=0,put_b=0,put_initial_lat.avgcount=0,put_initial_lat.avgtime=0,put_initial_lat.sum=0,qactive=0,qlen=0,req=791705 1587117156000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-radosclient,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=2697988,get_or_fail_fail=0,get_or_fail_success=2697988,get_started=0,get_sum=444563051,max=104857600,put=2697988,put_sum=444563051,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
+> ceph,collection=finisher-radosclient,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw complete_latency.avgcount=2,complete_latency.avgtime=0.003530161,complete_latency.sum=0.007060323,queue_len=0 1587117156000000000
+> ceph,collection=throttle-rgw_async_rados_ops,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=64,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
+> ceph,collection=throttle-objecter_ops,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=791732,get_or_fail_fail=0,get_or_fail_success=791732,get_started=0,get_sum=791732,max=24576,put=791732,put_sum=791732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
+> ceph,collection=throttle-objecter_bytes-0x5598969981c0,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1637900,get_or_fail_fail=0,get_or_fail_success=1637900,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
+> ceph,collection=objecter,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw command_active=0,command_resend=0,command_send=0,linger_active=8,linger_ping=1905736,linger_resend=4,linger_send=13,map_epoch=203,map_full=0,map_inc=17,omap_del=0,omap_rd=0,omap_wr=0,op=2697488,op_active=0,op_laggy=0,op_pg=0,op_r=791730,op_reply=2697476,op_resend=1,op_rmw=0,op_send=2697490,op_send_bytes=362,op_w=1905758,osd_laggy=5,osd_session_close=59558,osd_session_open=59566,osd_sessions=8,osdop_append=0,osdop_call=1,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=8,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=791714,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=16,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=791706,osdop_truncate=0,osdop_watch=1905750,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117156000000000
+> ceph,collection=AsyncMessenger::Worker-2,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=11,msgr_created_connections=59839,msgr_recv_bytes=342697143,msgr_recv_messages=1441603,msgr_running_fast_dispatch_time=161.807937536,msgr_running_recv_time=118.174064257,msgr_running_send_time=207.679154333,msgr_running_total_time=698.527662129,msgr_send_bytes=530785909,msgr_send_messages=1679950 1587117156000000000
+> ceph,collection=mempool,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=225471,buffer_anon_items=163,buffer_meta_bytes=0,buffer_meta_items=0,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=33904,osdmap_items=278,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117156000000000
+> ceph,collection=throttle-msgr_dispatch_throttler-radosclient-0x559896998120,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1652935,get_or_fail_fail=0,get_or_fail_success=1652935,get_started=0,get_sum=276333029,max=104857600,put=1652935,put_sum=276333029,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
+> ceph,collection=AsyncMessenger::Worker-1,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=17,msgr_created_connections=84859,msgr_recv_bytes=211170759,msgr_recv_messages=922646,msgr_running_fast_dispatch_time=31.487443762,msgr_running_recv_time=83.190789333,msgr_running_send_time=174.670510496,msgr_running_total_time=484.22086275,msgr_send_bytes=1322113179,msgr_send_messages=1636839 1587117156000000000
+> ceph,collection=finisher-radosclient-0x559896998080,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117156000000000
+> ceph,collection=throttle-objecter_ops-0x559896997b80,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1637900,get_or_fail_fail=0,get_or_fail_success=1637900,get_started=0,get_sum=1637900,max=24576,put=1637900,put_sum=1637900,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
+> ceph,collection=AsyncMessenger::Worker-0,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=18,msgr_created_connections=74757,msgr_recv_bytes=489001094,msgr_recv_messages=1986686,msgr_running_fast_dispatch_time=168.60950961,msgr_running_recv_time=142.903031533,msgr_running_send_time=267.911165712,msgr_running_total_time=824.885614951,msgr_send_bytes=707973504,msgr_send_messages=2463727 1587117156000000000
+> ceph,collection=objecter-0x559896997720,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=8,omap_del=0,omap_rd=0,omap_wr=0,op=1637998,op_active=0,op_laggy=0,op_pg=0,op_r=1062803,op_reply=1637998,op_resend=15,op_rmw=0,op_send=1638013,op_send_bytes=63321099,op_w=575195,osd_laggy=0,osd_session_close=125555,osd_session_open=125563,osd_sessions=8,osdop_append=0,osdop_call=1637886,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=112,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117156000000000
```
diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go
index e28f977d23777..c875de8dfaeba 100644
--- a/plugins/inputs/ceph/ceph.go
+++ b/plugins/inputs/ceph/ceph.go
@@ -18,8 +18,12 @@ const (
measurement = "ceph"
typeMon = "monitor"
typeOsd = "osd"
+ typeMds = "mds"
+ typeRgw = "rgw"
osdPrefix = "ceph-osd"
monPrefix = "ceph-mon"
+ mdsPrefix = "ceph-mds"
+ rgwPrefix = "ceph-client"
sockSuffix = "asok"
)
@@ -27,6 +31,8 @@ type Ceph struct {
CephBinary string
OsdPrefix string
MonPrefix string
+ MdsPrefix string
+ RgwPrefix string
SocketDir string
SocketSuffix string
CephUser string
@@ -36,7 +42,7 @@ type Ceph struct {
}
func (c *Ceph) Description() string {
- return "Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster."
+ return "Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster."
}
var sampleConfig = `
@@ -55,6 +61,8 @@ var sampleConfig = `
## prefix of MON and OSD socket files, used to determine socket type
mon_prefix = "ceph-mon"
osd_prefix = "ceph-osd"
+ mds_prefix = "ceph-mds"
+ rgw_prefix = "ceph-client"
## suffix used to identify socket files
socket_suffix = "asok"
@@ -101,12 +109,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
for _, s := range sockets {
dump, err := perfDump(c.CephBinary, s)
if err != nil {
- acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err))
+ acc.AddError(fmt.Errorf("error reading from socket '%s': %v", s.socket, err))
continue
}
data, err := parseDump(dump)
if err != nil {
- acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err))
+ acc.AddError(fmt.Errorf("error parsing dump from socket '%s': %v", s.socket, err))
continue
}
for tag, metrics := range data {
@@ -148,6 +156,8 @@ func init() {
CephBinary: "/usr/bin/ceph",
OsdPrefix: osdPrefix,
MonPrefix: monPrefix,
+ MdsPrefix: mdsPrefix,
+ RgwPrefix: rgwPrefix,
SocketDir: "/var/run/ceph",
SocketSuffix: sockSuffix,
CephUser: "client.admin",
@@ -165,6 +175,10 @@ var perfDump = func(binary string, socket *socket) (string, error) {
cmdArgs = append(cmdArgs, "perf", "dump")
} else if socket.sockType == typeMon {
cmdArgs = append(cmdArgs, "perfcounters_dump")
+ } else if socket.sockType == typeMds {
+ cmdArgs = append(cmdArgs, "perf", "dump")
+ } else if socket.sockType == typeRgw {
+ cmdArgs = append(cmdArgs, "perf", "dump")
} else {
return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType)
}
@@ -199,7 +213,18 @@ var findSockets = func(c *Ceph) ([]*socket, error) {
sockPrefix = osdPrefix
}
- if sockType == typeOsd || sockType == typeMon {
+ if strings.HasPrefix(f, c.MdsPrefix) {
+ sockType = typeMds
+ sockPrefix = mdsPrefix
+
+ }
+ if strings.HasPrefix(f, c.RgwPrefix) {
+ sockType = typeRgw
+ sockPrefix = rgwPrefix
+
+ }
+
+ if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw {
path := filepath.Join(c.SocketDir, f)
sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path})
}
@@ -287,7 +312,7 @@ func flatten(data interface{}) []*metric {
}
}
default:
- log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val)
+ log.Printf("I! [inputs.ceph] ignoring unexpected type '%T' for value %v", val, val)
}
return metrics
diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go
index 6403d6994ac1b..78da3438de691 100644
--- a/plugins/inputs/ceph/ceph_test.go
+++ b/plugins/inputs/ceph/ceph_test.go
@@ -42,6 +42,20 @@ func TestParseOsdDump(t *testing.T) {
assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
}
+func TestParseMdsDump(t *testing.T) {
+ dump, err := parseDump(mdsPerfDump)
+ assert.NoError(t, err)
+ assert.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon)
+ assert.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"])
+}
+
+func TestParseRgwDump(t *testing.T) {
+ dump, err := parseDump(rgwPerfDump)
+ assert.NoError(t, err)
+ assert.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon)
+ assert.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"])
+}
+
func TestDecodeStatus(t *testing.T) {
acc := &testutil.Accumulator{}
err := decodeStatus(acc, clusterStatusDump)
@@ -105,6 +119,8 @@ func TestFindSockets(t *testing.T) {
CephBinary: "foo",
OsdPrefix: "ceph-osd",
MonPrefix: "ceph-mon",
+ MdsPrefix: "ceph-mds",
+ RgwPrefix: "ceph-client",
SocketDir: tmpdir,
SocketSuffix: "asok",
CephUser: "client.admin",
@@ -126,6 +142,12 @@ func TestFindSockets(t *testing.T) {
for i := 1; i <= st.mons; i++ {
assertFoundSocket(t, tmpdir, typeMon, i, sockets)
}
+ for i := 1; i <= st.mdss; i++ {
+ assertFoundSocket(t, tmpdir, typeMds, i, sockets)
+ }
+ for i := 1; i <= st.rgws; i++ {
+ assertFoundSocket(t, tmpdir, typeRgw, i, sockets)
+ }
cleanupTestFiles(tmpdir, st)
}
}
@@ -134,6 +156,10 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc
var prefix string
if sockType == typeOsd {
prefix = osdPrefix
+ } else if sockType == typeMds {
+ prefix = mdsPrefix
+ } else if sockType == typeRgw {
+ prefix = rgwPrefix
} else {
prefix = monPrefix
}
@@ -182,17 +208,27 @@ func tstFileApply(st *SockTest, fn func(prefix string, i int)) {
for i := 1; i <= st.mons; i++ {
fn(monPrefix, i)
}
+ for i := 1; i <= st.mdss; i++ {
+ fn(mdsPrefix, i)
+ }
+ for i := 1; i <= st.rgws; i++ {
+ fn(rgwPrefix, i)
+ }
}
type SockTest struct {
osds int
mons int
+ mdss int
+ rgws int
}
var sockTestParams = []*SockTest{
{
osds: 2,
mons: 2,
+ mdss: 2,
+ rgws: 2,
},
{
mons: 1,
@@ -200,6 +236,12 @@ var sockTestParams = []*SockTest{
{
osds: 1,
},
+ {
+ mdss: 1,
+ },
+ {
+ rgws: 1,
+ },
{},
}
@@ -722,6 +764,996 @@ var osdPerfDump = `
"wait": { "avgcount": 0,
"sum": 0.000000000}}}
`
+var mdsPerfDump = `
+{
+ "AsyncMessenger::Worker-0": {
+ "msgr_recv_messages": 2723536628,
+ "msgr_send_messages": 1160771414,
+ "msgr_recv_bytes": 1112936719134,
+ "msgr_send_bytes": 1368194904867,
+ "msgr_created_connections": 18281,
+ "msgr_active_connections": 83,
+ "msgr_running_total_time": 109001.938705141,
+ "msgr_running_send_time": 33686.215323581,
+ "msgr_running_recv_time": 8374950.111041426,
+ "msgr_running_fast_dispatch_time": 5828.083761243
+ },
+ "AsyncMessenger::Worker-1": {
+ "msgr_recv_messages": 1426105165,
+ "msgr_send_messages": 783174767,
+ "msgr_recv_bytes": 800620150187,
+ "msgr_send_bytes": 1394738277392,
+ "msgr_created_connections": 17677,
+ "msgr_active_connections": 100,
+ "msgr_running_total_time": 70660.929329800,
+ "msgr_running_send_time": 24190.940207198,
+ "msgr_running_recv_time": 3920894.209204916,
+ "msgr_running_fast_dispatch_time": 8206.816536602
+ },
+ "AsyncMessenger::Worker-2": {
+ "msgr_recv_messages": 3471200310,
+ "msgr_send_messages": 2757725529,
+ "msgr_recv_bytes": 1331676471794,
+ "msgr_send_bytes": 2593968875674,
+ "msgr_created_connections": 16714,
+ "msgr_active_connections": 73,
+ "msgr_running_total_time": 167020.893916556,
+ "msgr_running_send_time": 61197.682840176,
+ "msgr_running_recv_time": 5816036.495319415,
+ "msgr_running_fast_dispatch_time": 8581.768789481
+ },
+ "finisher-PurgeQueue": {
+ "queue_len": 0,
+ "complete_latency": {
+ "avgcount": 20170260,
+ "sum": 70213.859039869,
+ "avgtime": 0.003481058
+ }
+ },
+ "mds": {
+ "request": 2167457412,
+ "reply": 2167457403,
+ "reply_latency": {
+ "avgcount": 2167457403,
+ "sum": 2408386.600934982,
+ "avgtime": 0.001111157
+ },
+ "forward": 0,
+ "dir_fetch": 585012985,
+ "dir_commit": 58926158,
+ "dir_split": 8,
+ "dir_merge": 7,
+ "inode_max": 2147483647,
+ "inodes": 39604287,
+ "inodes_top": 9743493,
+ "inodes_bottom": 29063656,
+ "inodes_pin_tail": 797138,
+ "inodes_pinned": 25685011,
+ "inodes_expired": 1302542128,
+ "inodes_with_caps": 4517329,
+ "caps": 6370838,
+ "subtrees": 2,
+ "traverse": 2426357623,
+ "traverse_hit": 2202314009,
+ "traverse_forward": 0,
+ "traverse_discover": 0,
+ "traverse_dir_fetch": 35332112,
+ "traverse_remote_ino": 0,
+ "traverse_lock": 4371557,
+ "load_cent": 1966748,
+ "q": 976,
+ "exported": 0,
+ "exported_inodes": 0,
+ "imported": 0,
+ "imported_inodes": 0,
+ "openino_dir_fetch": 22725418,
+ "openino_backtrace_fetch": 6,
+ "openino_peer_discover": 0
+ },
+ "mds_cache": {
+ "num_strays": 384,
+ "num_strays_delayed": 0,
+ "num_strays_enqueuing": 0,
+ "strays_created": 29140050,
+ "strays_enqueued": 29134399,
+ "strays_reintegrated": 10171,
+ "strays_migrated": 0,
+ "num_recovering_processing": 0,
+ "num_recovering_enqueued": 0,
+ "num_recovering_prioritized": 0,
+ "recovery_started": 229,
+ "recovery_completed": 229,
+ "ireq_enqueue_scrub": 0,
+ "ireq_exportdir": 0,
+ "ireq_flush": 0,
+ "ireq_fragmentdir": 15,
+ "ireq_fragstats": 0,
+ "ireq_inodestats": 0
+ },
+ "mds_log": {
+ "evadd": 1920368707,
+ "evex": 1920372003,
+ "evtrm": 1920372003,
+ "ev": 106627,
+ "evexg": 0,
+ "evexd": 4369,
+ "segadd": 2247990,
+ "segex": 2247995,
+ "segtrm": 2247995,
+ "seg": 123,
+ "segexg": 0,
+ "segexd": 5,
+ "expos": 24852063335817,
+ "wrpos": 24852205446582,
+ "rdpos": 22044255640175,
+ "jlat": {
+ "avgcount": 182241259,
+ "sum": 1732094.198366820,
+ "avgtime": 0.009504402
+ },
+ "replayed": 109923
+ },
+ "mds_mem": {
+ "ino": 39604292,
+ "ino+": 1307214891,
+ "ino-": 1267610599,
+ "dir": 22827008,
+ "dir+": 591593031,
+ "dir-": 568766023,
+ "dn": 39604761,
+ "dn+": 1376976677,
+ "dn-": 1337371916,
+ "cap": 6370838,
+ "cap+": 1720930015,
+ "cap-": 1714559177,
+ "rss": 167723320,
+ "heap": 322260,
+ "buf": 0
+ },
+ "mds_server": {
+ "dispatch_client_request": 2932764331,
+ "dispatch_server_request": 0,
+ "handle_client_request": 2167457412,
+ "handle_client_session": 10929454,
+ "handle_slave_request": 0,
+ "req_create_latency": {
+ "avgcount": 30590326,
+ "sum": 23887.274170412,
+ "avgtime": 0.000780876
+ },
+ "req_getattr_latency": {
+ "avgcount": 124767480,
+ "sum": 718160.497644305,
+ "avgtime": 0.005755991
+ },
+ "req_getfilelock_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_link_latency": {
+ "avgcount": 5636,
+ "sum": 2.371499732,
+ "avgtime": 0.000420777
+ },
+ "req_lookup_latency": {
+ "avgcount": 474590034,
+ "sum": 452548.849373476,
+ "avgtime": 0.000953557
+ },
+ "req_lookuphash_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_lookupino_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_lookupname_latency": {
+ "avgcount": 9794,
+ "sum": 54.118496591,
+ "avgtime": 0.005525678
+ },
+ "req_lookupparent_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_lookupsnap_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_lssnap_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_mkdir_latency": {
+ "avgcount": 13394317,
+ "sum": 13025.982105531,
+ "avgtime": 0.000972500
+ },
+ "req_mknod_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_mksnap_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_open_latency": {
+ "avgcount": 32849768,
+ "sum": 12862.382994977,
+ "avgtime": 0.000391551
+ },
+ "req_readdir_latency": {
+ "avgcount": 654394394,
+ "sum": 715669.609601541,
+ "avgtime": 0.001093636
+ },
+ "req_rename_latency": {
+ "avgcount": 6058807,
+ "sum": 2126.232719555,
+ "avgtime": 0.000350932
+ },
+ "req_renamesnap_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_rmdir_latency": {
+ "avgcount": 1901530,
+ "sum": 4064.121157858,
+ "avgtime": 0.002137290
+ },
+ "req_rmsnap_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_rmxattr_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_setattr_latency": {
+ "avgcount": 37051209,
+ "sum": 171198.037329531,
+ "avgtime": 0.004620578
+ },
+ "req_setdirlayout_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_setfilelock_latency": {
+ "avgcount": 765439143,
+ "sum": 262660.582883819,
+ "avgtime": 0.000343150
+ },
+ "req_setlayout_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "req_setxattr_latency": {
+ "avgcount": 41572,
+ "sum": 7.273371375,
+ "avgtime": 0.000174958
+ },
+ "req_symlink_latency": {
+ "avgcount": 329,
+ "sum": 0.117859965,
+ "avgtime": 0.000358236
+ },
+ "req_unlink_latency": {
+ "avgcount": 26363064,
+ "sum": 32119.149726314,
+ "avgtime": 0.001218339
+ },
+ "cap_revoke_eviction": 0
+ },
+ "mds_sessions": {
+ "session_count": 80,
+ "session_add": 90,
+ "session_remove": 10,
+ "sessions_open": 80,
+ "sessions_stale": 0,
+ "total_load": 112490,
+ "average_load": 1406,
+ "avg_session_uptime": 2221807
+ },
+ "objecter": {
+ "op_active": 0,
+ "op_laggy": 0,
+ "op_send": 955060080,
+ "op_send_bytes": 3178832110019,
+ "op_resend": 67,
+ "op_reply": 955060013,
+ "op": 955060013,
+ "op_r": 585982837,
+ "op_w": 369077176,
+ "op_rmw": 0,
+ "op_pg": 0,
+ "osdop_stat": 45924375,
+ "osdop_create": 31162274,
+ "osdop_read": 969513,
+ "osdop_write": 183211164,
+ "osdop_writefull": 1063233,
+ "osdop_writesame": 0,
+ "osdop_append": 0,
+ "osdop_zero": 2,
+ "osdop_truncate": 8,
+ "osdop_delete": 60594735,
+ "osdop_mapext": 0,
+ "osdop_sparse_read": 0,
+ "osdop_clonerange": 0,
+ "osdop_getxattr": 584941886,
+ "osdop_setxattr": 62324548,
+ "osdop_cmpxattr": 0,
+ "osdop_rmxattr": 0,
+ "osdop_resetxattrs": 0,
+ "osdop_tmap_up": 0,
+ "osdop_tmap_put": 0,
+ "osdop_tmap_get": 0,
+ "osdop_call": 0,
+ "osdop_watch": 0,
+ "osdop_notify": 0,
+ "osdop_src_cmpxattr": 0,
+ "osdop_pgls": 0,
+ "osdop_pgls_filter": 0,
+ "osdop_other": 32053182,
+ "linger_active": 0,
+ "linger_send": 0,
+ "linger_resend": 0,
+ "linger_ping": 0,
+ "poolop_active": 0,
+ "poolop_send": 0,
+ "poolop_resend": 0,
+ "poolstat_active": 0,
+ "poolstat_send": 0,
+ "poolstat_resend": 0,
+ "statfs_active": 0,
+ "statfs_send": 0,
+ "statfs_resend": 0,
+ "command_active": 0,
+ "command_send": 0,
+ "command_resend": 0,
+ "map_epoch": 66793,
+ "map_full": 0,
+ "map_inc": 1762,
+ "osd_sessions": 120,
+ "osd_session_open": 52554,
+ "osd_session_close": 52434,
+ "osd_laggy": 0,
+ "omap_wr": 106692727,
+ "omap_rd": 1170026044,
+ "omap_del": 5674762
+ },
+ "purge_queue": {
+ "pq_executing_ops": 0,
+ "pq_executing": 0,
+ "pq_executed": 29134399
+ },
+ "throttle-msgr_dispatch_throttler-mds": {
+ "val": 0,
+ "max": 104857600,
+ "get_started": 0,
+ "get": 7620842095,
+ "get_sum": 2681291022887,
+ "get_or_fail_fail": 53,
+ "get_or_fail_success": 7620842095,
+ "take": 0,
+ "take_sum": 0,
+ "put": 7620842095,
+ "put_sum": 2681291022887,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-objecter_bytes": {
+ "val": 0,
+ "max": 104857600,
+ "get_started": 0,
+ "get": 0,
+ "get_sum": 0,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 0,
+ "take": 955060013,
+ "take_sum": 3172776432475,
+ "put": 862340641,
+ "put_sum": 3172776432475,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-objecter_ops": {
+ "val": 0,
+ "max": 1024,
+ "get_started": 0,
+ "get": 0,
+ "get_sum": 0,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 0,
+ "take": 955060013,
+ "take_sum": 955060013,
+ "put": 955060013,
+ "put_sum": 955060013,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-write_buf_throttle": {
+ "val": 0,
+ "max": 3758096384,
+ "get_started": 0,
+ "get": 29134399,
+ "get_sum": 3160498139,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 29134399,
+ "take": 0,
+ "take_sum": 0,
+ "put": 969905,
+ "put_sum": 3160498139,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-write_buf_throttle-0x561894f0b8e0": {
+ "val": 286270,
+ "max": 3758096384,
+ "get_started": 0,
+ "get": 1920368707,
+ "get_sum": 2807949805409,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 1920368707,
+ "take": 0,
+ "take_sum": 0,
+ "put": 182241259,
+ "put_sum": 2807949519139,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ }
+}
+`
+
+var rgwPerfDump = `
+{
+ "AsyncMessenger::Worker-0": {
+ "msgr_recv_messages": 10684185,
+ "msgr_send_messages": 13448962,
+ "msgr_recv_bytes": 2622531258,
+ "msgr_send_bytes": 4195038384,
+ "msgr_created_connections": 8029,
+ "msgr_active_connections": 3,
+ "msgr_running_total_time": 3249.441108544,
+ "msgr_running_send_time": 739.821446096,
+ "msgr_running_recv_time": 310.354319110,
+ "msgr_running_fast_dispatch_time": 1915.410317430
+ },
+ "AsyncMessenger::Worker-1": {
+ "msgr_recv_messages": 2137773,
+ "msgr_send_messages": 3850070,
+ "msgr_recv_bytes": 503824366,
+ "msgr_send_bytes": 1130107261,
+ "msgr_created_connections": 11030,
+ "msgr_active_connections": 1,
+ "msgr_running_total_time": 445.055291782,
+ "msgr_running_send_time": 227.817750758,
+ "msgr_running_recv_time": 78.974093226,
+ "msgr_running_fast_dispatch_time": 47.587740615
+ },
+ "AsyncMessenger::Worker-2": {
+ "msgr_recv_messages": 2809014,
+ "msgr_send_messages": 4126613,
+ "msgr_recv_bytes": 653093470,
+ "msgr_send_bytes": 1022041970,
+ "msgr_created_connections": 14810,
+ "msgr_active_connections": 5,
+ "msgr_running_total_time": 453.384703728,
+ "msgr_running_send_time": 208.580910390,
+ "msgr_running_recv_time": 80.075306670,
+ "msgr_running_fast_dispatch_time": 46.854112208
+ },
+ "cct": {
+ "total_workers": 0,
+ "unhealthy_workers": 0
+ },
+ "finisher-radosclient": {
+ "queue_len": 0,
+ "complete_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "finisher-radosclient-0x55994098e460": {
+ "queue_len": 0,
+ "complete_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "finisher-radosclient-0x5599409901c0": {
+ "queue_len": 0,
+ "complete_latency": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "mempool": {
+ "bloom_filter_bytes": 0,
+ "bloom_filter_items": 0,
+ "bluestore_alloc_bytes": 0,
+ "bluestore_alloc_items": 0,
+ "bluestore_cache_data_bytes": 0,
+ "bluestore_cache_data_items": 0,
+ "bluestore_cache_onode_bytes": 0,
+ "bluestore_cache_onode_items": 0,
+ "bluestore_cache_other_bytes": 0,
+ "bluestore_cache_other_items": 0,
+ "bluestore_fsck_bytes": 0,
+ "bluestore_fsck_items": 0,
+ "bluestore_txc_bytes": 0,
+ "bluestore_txc_items": 0,
+ "bluestore_writing_deferred_bytes": 0,
+ "bluestore_writing_deferred_items": 0,
+ "bluestore_writing_bytes": 0,
+ "bluestore_writing_items": 0,
+ "bluefs_bytes": 0,
+ "bluefs_items": 0,
+ "buffer_anon_bytes": 258469,
+ "buffer_anon_items": 201,
+ "buffer_meta_bytes": 0,
+ "buffer_meta_items": 0,
+ "osd_bytes": 0,
+ "osd_items": 0,
+ "osd_mapbl_bytes": 0,
+ "osd_mapbl_items": 0,
+ "osd_pglog_bytes": 0,
+ "osd_pglog_items": 0,
+ "osdmap_bytes": 74448,
+ "osdmap_items": 732,
+ "osdmap_mapping_bytes": 0,
+ "osdmap_mapping_items": 0,
+ "pgmap_bytes": 0,
+ "pgmap_items": 0,
+ "mds_co_bytes": 0,
+ "mds_co_items": 0,
+ "unittest_1_bytes": 0,
+ "unittest_1_items": 0,
+ "unittest_2_bytes": 0,
+ "unittest_2_items": 0
+ },
+ "objecter": {
+ "op_active": 0,
+ "op_laggy": 0,
+ "op_send": 9377910,
+ "op_send_bytes": 312,
+ "op_resend": 0,
+ "op_reply": 9377904,
+ "op": 9377910,
+ "op_r": 2755291,
+ "op_w": 6622619,
+ "op_rmw": 0,
+ "op_pg": 0,
+ "osdop_stat": 2755258,
+ "osdop_create": 8,
+ "osdop_read": 25,
+ "osdop_write": 0,
+ "osdop_writefull": 0,
+ "osdop_writesame": 0,
+ "osdop_append": 0,
+ "osdop_zero": 0,
+ "osdop_truncate": 0,
+ "osdop_delete": 0,
+ "osdop_mapext": 0,
+ "osdop_sparse_read": 0,
+ "osdop_clonerange": 0,
+ "osdop_getxattr": 0,
+ "osdop_setxattr": 0,
+ "osdop_cmpxattr": 0,
+ "osdop_rmxattr": 0,
+ "osdop_resetxattrs": 0,
+ "osdop_call": 0,
+ "osdop_watch": 6622611,
+ "osdop_notify": 0,
+ "osdop_src_cmpxattr": 0,
+ "osdop_pgls": 0,
+ "osdop_pgls_filter": 0,
+ "osdop_other": 2755266,
+ "linger_active": 8,
+ "linger_send": 35,
+ "linger_resend": 27,
+ "linger_ping": 6622576,
+ "poolop_active": 0,
+ "poolop_send": 0,
+ "poolop_resend": 0,
+ "poolstat_active": 0,
+ "poolstat_send": 0,
+ "poolstat_resend": 0,
+ "statfs_active": 0,
+ "statfs_send": 0,
+ "statfs_resend": 0,
+ "command_active": 0,
+ "command_send": 0,
+ "command_resend": 0,
+ "map_epoch": 1064,
+ "map_full": 0,
+ "map_inc": 106,
+ "osd_sessions": 8,
+ "osd_session_open": 11928,
+ "osd_session_close": 11920,
+ "osd_laggy": 5,
+ "omap_wr": 0,
+ "omap_rd": 0,
+ "omap_del": 0
+ },
+ "objecter-0x55994098e500": {
+ "op_active": 0,
+ "op_laggy": 0,
+ "op_send": 827839,
+ "op_send_bytes": 0,
+ "op_resend": 0,
+ "op_reply": 827839,
+ "op": 827839,
+ "op_r": 0,
+ "op_w": 827839,
+ "op_rmw": 0,
+ "op_pg": 0,
+ "osdop_stat": 0,
+ "osdop_create": 0,
+ "osdop_read": 0,
+ "osdop_write": 0,
+ "osdop_writefull": 0,
+ "osdop_writesame": 0,
+ "osdop_append": 0,
+ "osdop_zero": 0,
+ "osdop_truncate": 0,
+ "osdop_delete": 0,
+ "osdop_mapext": 0,
+ "osdop_sparse_read": 0,
+ "osdop_clonerange": 0,
+ "osdop_getxattr": 0,
+ "osdop_setxattr": 0,
+ "osdop_cmpxattr": 0,
+ "osdop_rmxattr": 0,
+ "osdop_resetxattrs": 0,
+ "osdop_call": 0,
+ "osdop_watch": 827839,
+ "osdop_notify": 0,
+ "osdop_src_cmpxattr": 0,
+ "osdop_pgls": 0,
+ "osdop_pgls_filter": 0,
+ "osdop_other": 0,
+ "linger_active": 1,
+ "linger_send": 3,
+ "linger_resend": 2,
+ "linger_ping": 827836,
+ "poolop_active": 0,
+ "poolop_send": 0,
+ "poolop_resend": 0,
+ "poolstat_active": 0,
+ "poolstat_send": 0,
+ "poolstat_resend": 0,
+ "statfs_active": 0,
+ "statfs_send": 0,
+ "statfs_resend": 0,
+ "command_active": 0,
+ "command_send": 0,
+ "command_resend": 0,
+ "map_epoch": 1064,
+ "map_full": 0,
+ "map_inc": 106,
+ "osd_sessions": 1,
+ "osd_session_open": 1,
+ "osd_session_close": 0,
+ "osd_laggy": 1,
+ "omap_wr": 0,
+ "omap_rd": 0,
+ "omap_del": 0
+ },
+ "objecter-0x55994098f720": {
+ "op_active": 0,
+ "op_laggy": 0,
+ "op_send": 5415951,
+ "op_send_bytes": 205291238,
+ "op_resend": 8,
+ "op_reply": 5415943,
+ "op": 5415943,
+ "op_r": 3612105,
+ "op_w": 1803838,
+ "op_rmw": 0,
+ "op_pg": 0,
+ "osdop_stat": 0,
+ "osdop_create": 0,
+ "osdop_read": 0,
+ "osdop_write": 0,
+ "osdop_writefull": 0,
+ "osdop_writesame": 0,
+ "osdop_append": 0,
+ "osdop_zero": 0,
+ "osdop_truncate": 0,
+ "osdop_delete": 0,
+ "osdop_mapext": 0,
+ "osdop_sparse_read": 0,
+ "osdop_clonerange": 0,
+ "osdop_getxattr": 0,
+ "osdop_setxattr": 0,
+ "osdop_cmpxattr": 0,
+ "osdop_rmxattr": 0,
+ "osdop_resetxattrs": 0,
+ "osdop_call": 5415567,
+ "osdop_watch": 0,
+ "osdop_notify": 0,
+ "osdop_src_cmpxattr": 0,
+ "osdop_pgls": 0,
+ "osdop_pgls_filter": 0,
+ "osdop_other": 376,
+ "linger_active": 0,
+ "linger_send": 0,
+ "linger_resend": 0,
+ "linger_ping": 0,
+ "poolop_active": 0,
+ "poolop_send": 0,
+ "poolop_resend": 0,
+ "poolstat_active": 0,
+ "poolstat_send": 0,
+ "poolstat_resend": 0,
+ "statfs_active": 0,
+ "statfs_send": 0,
+ "statfs_resend": 0,
+ "command_active": 0,
+ "command_send": 0,
+ "command_resend": 0,
+ "map_epoch": 1064,
+ "map_full": 0,
+ "map_inc": 106,
+ "osd_sessions": 8,
+ "osd_session_open": 8834,
+ "osd_session_close": 8826,
+ "osd_laggy": 0,
+ "omap_wr": 0,
+ "omap_rd": 0,
+ "omap_del": 0
+ },
+ "rgw": {
+ "req": 2755258,
+ "failed_req": 0,
+ "get": 0,
+ "get_b": 0,
+ "get_initial_lat": {
+ "avgcount": 0,
+ "sum": 0.002219876,
+ "avgtime": 0.000000000
+ },
+ "put": 0,
+ "put_b": 0,
+ "put_initial_lat": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ },
+ "qlen": 0,
+ "qactive": 0,
+ "cache_hit": 0,
+ "cache_miss": 2755261,
+ "keystone_token_cache_hit": 0,
+ "keystone_token_cache_miss": 0,
+ "gc_retire_object": 0,
+ "pubsub_event_triggered": 0,
+ "pubsub_event_lost": 0,
+ "pubsub_store_ok": 0,
+ "pubsub_store_fail": 0,
+ "pubsub_events": 0,
+ "pubsub_push_ok": 0,
+ "pubsub_push_failed": 0,
+ "pubsub_push_pending": 0
+ },
+ "simple-throttler": {
+ "throttle": 0
+ },
+ "throttle-msgr_dispatch_throttler-radosclient": {
+ "val": 0,
+ "max": 104857600,
+ "get_started": 0,
+ "get": 9379775,
+ "get_sum": 1545393284,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 9379775,
+ "take": 0,
+ "take_sum": 0,
+ "put": 9379775,
+ "put_sum": 1545393284,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-msgr_dispatch_throttler-radosclient-0x55994098e320": {
+ "val": 0,
+ "max": 104857600,
+ "get_started": 0,
+ "get": 829631,
+ "get_sum": 162850310,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 829631,
+ "take": 0,
+ "take_sum": 0,
+ "put": 829631,
+ "put_sum": 162850310,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-msgr_dispatch_throttler-radosclient-0x55994098fa40": {
+ "val": 0,
+ "max": 104857600,
+ "get_started": 0,
+ "get": 5421553,
+ "get_sum": 914508527,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 5421553,
+ "take": 0,
+ "take_sum": 0,
+ "put": 5421553,
+ "put_sum": 914508527,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-objecter_bytes": {
+ "val": 0,
+ "max": 104857600,
+ "get_started": 0,
+ "get": 2755292,
+ "get_sum": 0,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 2755292,
+ "take": 0,
+ "take_sum": 0,
+ "put": 0,
+ "put_sum": 0,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-objecter_bytes-0x55994098e780": {
+ "val": 0,
+ "max": 104857600,
+ "get_started": 0,
+ "get": 0,
+ "get_sum": 0,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 0,
+ "take": 0,
+ "take_sum": 0,
+ "put": 0,
+ "put_sum": 0,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-objecter_bytes-0x55994098f7c0": {
+ "val": 0,
+ "max": 104857600,
+ "get_started": 0,
+ "get": 5415614,
+ "get_sum": 0,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 5415614,
+ "take": 0,
+ "take_sum": 0,
+ "put": 0,
+ "put_sum": 0,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-objecter_ops": {
+ "val": 0,
+ "max": 24576,
+ "get_started": 0,
+ "get": 2755292,
+ "get_sum": 2755292,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 2755292,
+ "take": 0,
+ "take_sum": 0,
+ "put": 2755292,
+ "put_sum": 2755292,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-objecter_ops-0x55994098e640": {
+ "val": 0,
+ "max": 24576,
+ "get_started": 0,
+ "get": 0,
+ "get_sum": 0,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 0,
+ "take": 0,
+ "take_sum": 0,
+ "put": 0,
+ "put_sum": 0,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-objecter_ops-0x55994098f0e0": {
+ "val": 0,
+ "max": 24576,
+ "get_started": 0,
+ "get": 5415614,
+ "get_sum": 5415614,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 5415614,
+ "take": 0,
+ "take_sum": 0,
+ "put": 5415614,
+ "put_sum": 5415614,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ },
+ "throttle-rgw_async_rados_ops": {
+ "val": 0,
+ "max": 64,
+ "get_started": 0,
+ "get": 0,
+ "get_sum": 0,
+ "get_or_fail_fail": 0,
+ "get_or_fail_success": 0,
+ "take": 0,
+ "take_sum": 0,
+ "put": 0,
+ "put_sum": 0,
+ "wait": {
+ "avgcount": 0,
+ "sum": 0.000000000,
+ "avgtime": 0.000000000
+ }
+ }
+}
+`
+
var clusterStatusDump = `
{
"health": {
diff --git a/plugins/inputs/cgroup/README.md b/plugins/inputs/cgroup/README.md
index e62c9b15e6c96..6982517bc5879 100644
--- a/plugins/inputs/cgroup/README.md
+++ b/plugins/inputs/cgroup/README.md
@@ -1,4 +1,4 @@
-# CGroup Input Plugin For Telegraf Agent
+# CGroup Input Plugin
This input plugin will capture specific statistics per cgroup.
@@ -43,20 +43,30 @@ All measurements have the following tags:
### Configuration:
-```
+```toml
# [[inputs.cgroup]]
# paths = [
- # "/cgroup/memory", # root cgroup
- # "/cgroup/memory/child1", # container cgroup
- # "/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
+ # "/sys/fs/cgroup/memory", # root cgroup
+ # "/sys/fs/cgroup/memory/child1", # container cgroup
+ # "/sys/fs/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
# ]
# files = ["memory.*usage*", "memory.limit_in_bytes"]
+```
+### usage examples:
+
+```toml
# [[inputs.cgroup]]
# paths = [
- # "/cgroup/cpu", # root cgroup
- # "/cgroup/cpu/*", # all container cgroups
- # "/cgroup/cpu/*/*", # all children cgroups under each container cgroup
+ # "/sys/fs/cgroup/cpu", # root cgroup
+ # "/sys/fs/cgroup/cpu/*", # all container cgroups
+ # "/sys/fs/cgroup/cpu/*/*", # all children cgroups under each container cgroup
# ]
# files = ["cpuacct.usage", "cpu.cfs_period_us", "cpu.cfs_quota_us"]
+
+# [[inputs.cgroup]]
+ # paths = [
+ # "/sys/fs/cgroup/unified/*", # root cgroup
+ # ]
+ # files = ["*"]
```
diff --git a/plugins/inputs/cgroup/cgroup.go b/plugins/inputs/cgroup/cgroup.go
index cc5e4b4968f2d..f3853a9da9a20 100644
--- a/plugins/inputs/cgroup/cgroup.go
+++ b/plugins/inputs/cgroup/cgroup.go
@@ -16,9 +16,9 @@ var sampleConfig = `
## want to monitor if you have a large number of cgroups, to avoid
## any cardinality issues.
# paths = [
- # "/cgroup/memory",
- # "/cgroup/memory/child1",
- # "/cgroup/memory/child2/*",
+ # "/sys/fs/cgroup/memory",
+ # "/sys/fs/cgroup/memory/child1",
+ # "/sys/fs/cgroup/memory/child2/*",
# ]
## cgroup stat fields, as file names, globs are supported.
## these file names are appended to each path from above.
diff --git a/plugins/inputs/chrony/chrony.go b/plugins/inputs/chrony/chrony.go
index 6173357cf4413..3fe18e89c91cb 100644
--- a/plugins/inputs/chrony/chrony.go
+++ b/plugins/inputs/chrony/chrony.go
@@ -33,11 +33,16 @@ func (*Chrony) SampleConfig() string {
`
}
-func (c *Chrony) Gather(acc telegraf.Accumulator) error {
- if len(c.path) == 0 {
+func (c *Chrony) Init() error {
+ var err error
+ c.path, err = exec.LookPath("chronyc")
+ if err != nil {
return errors.New("chronyc not found: verify that chrony is installed and that chronyc is in your PATH")
}
+ return nil
+}
+func (c *Chrony) Gather(acc telegraf.Accumulator) error {
flags := []string{}
if !c.DNSLookup {
flags = append(flags, "-n")
@@ -120,12 +125,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string
}
func init() {
- c := Chrony{}
- path, _ := exec.LookPath("chronyc")
- if len(path) > 0 {
- c.path = path
- }
inputs.Add("chrony", func() telegraf.Input {
- return &c
+ return &Chrony{}
})
}
diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md
new file mode 100644
index 0000000000000..9c4eb3645d491
--- /dev/null
+++ b/plugins/inputs/cisco_telemetry_mdt/README.md
@@ -0,0 +1,44 @@
+# Cisco Model-Driven Telemetry (MDT) Input Plugin
+
+Cisco model-driven telemetry (MDT) is an input plugin that consumes
+telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC dialout transports.
+GRPC-based transport can utilize TLS for authentication and encryption.
+Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded.
+
+The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and later, IOS XE 16.10 and later, as well as NX-OS 7.x and later platforms.
+
+The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and later.
+
+
+### Configuration:
+
+```toml
+[[inputs.cisco_telemetry_mdt]]
+ ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
+ ## using the grpc transport.
+ transport = "grpc"
+
+ ## Address and port to host telemetry listener
+ service_address = ":57000"
+
+ ## Enable TLS; grpc transport only.
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## Enable TLS client authentication and define allowed CA certificates; grpc
+ ## transport only.
+ # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+ ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
+ # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
+
+ ## Define aliases to map telemetry encoding paths to simple measurement names
+ [inputs.cisco_telemetry_mdt.aliases]
+ ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
+```
+
+### Example Output:
+```
+ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000
+ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000
+```
diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go
new file mode 100644
index 0000000000000..1a669e96f878e
--- /dev/null
+++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go
@@ -0,0 +1,558 @@
+package cisco_telemetry_mdt
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout"
+ telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis"
+ "github.com/golang/protobuf/proto"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+ internaltls "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials" // Register GRPC gzip decoder to support compressed telemetry
+ _ "google.golang.org/grpc/encoding/gzip"
+ "google.golang.org/grpc/peer"
+)
+
+const (
+ // Maximum telemetry payload size (in bytes) to accept for GRPC dialout transport
+ tcpMaxMsgLen uint32 = 1024 * 1024
+)
+
+// CiscoTelemetryMDT plugin for IOS XR, IOS XE and NXOS platforms
+type CiscoTelemetryMDT struct {
+ // Common configuration
+ Transport string
+ ServiceAddress string `toml:"service_address"`
+ MaxMsgSize int `toml:"max_msg_size"`
+ Aliases map[string]string `toml:"aliases"`
+ EmbeddedTags []string `toml:"embedded_tags"`
+
+ Log telegraf.Logger
+
+ // GRPC TLS settings
+ internaltls.ServerConfig
+
+ // Internal listener / client handle
+ grpcServer *grpc.Server
+ listener net.Listener
+
+ // Internal state
+ aliases map[string]string
+ warned map[string]struct{}
+ extraTags map[string]map[string]struct{}
+ mutex sync.Mutex
+ acc telegraf.Accumulator
+ wg sync.WaitGroup
+}
+
+// Start the Cisco MDT service
+func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error {
+ var err error
+ c.acc = acc
+ c.listener, err = net.Listen("tcp", c.ServiceAddress)
+ if err != nil {
+ return err
+ }
+
+ // Invert aliases list
+ c.warned = make(map[string]struct{})
+ c.aliases = make(map[string]string, len(c.Aliases))
+ for alias, path := range c.Aliases {
+ c.aliases[path] = alias
+ }
+
+ // Fill extra tags
+ c.extraTags = make(map[string]map[string]struct{})
+ for _, tag := range c.EmbeddedTags {
+ dir := strings.Replace(path.Dir(tag), "-", "_", -1)
+ if _, hasKey := c.extraTags[dir]; !hasKey {
+ c.extraTags[dir] = make(map[string]struct{})
+ }
+ c.extraTags[dir][path.Base(tag)] = struct{}{}
+ }
+
+ switch c.Transport {
+ case "tcp":
+ // TCP dialout server accept routine
+ c.wg.Add(1)
+ go func() {
+ c.acceptTCPClients()
+ c.wg.Done()
+ }()
+
+ case "grpc":
+ var opts []grpc.ServerOption
+ tlsConfig, err := c.ServerConfig.TLSConfig()
+ if err != nil {
+ c.listener.Close()
+ return err
+ } else if tlsConfig != nil {
+ opts = append(opts, grpc.Creds(credentials.NewTLS(tlsConfig)))
+ }
+
+ if c.MaxMsgSize > 0 {
+ opts = append(opts, grpc.MaxRecvMsgSize(c.MaxMsgSize))
+ }
+
+ c.grpcServer = grpc.NewServer(opts...)
+ dialout.RegisterGRPCMdtDialoutServer(c.grpcServer, c)
+
+ c.wg.Add(1)
+ go func() {
+ c.grpcServer.Serve(c.listener)
+ c.wg.Done()
+ }()
+
+ default:
+ c.listener.Close()
+ return fmt.Errorf("invalid Cisco MDT transport: %s", c.Transport)
+ }
+
+ return nil
+}
+
+// AcceptTCPDialoutClients defines the TCP dialout server main routine
+func (c *CiscoTelemetryMDT) acceptTCPClients() {
+ // Keep track of all active connections, so we can close them if necessary
+ var mutex sync.Mutex
+ clients := make(map[net.Conn]struct{})
+
+ for {
+ conn, err := c.listener.Accept()
+ if neterr, ok := err.(*net.OpError); ok && (neterr.Timeout() || neterr.Temporary()) {
+ continue
+ } else if err != nil {
+ break // Stop() will close the connection so Accept() will fail here
+ }
+
+ mutex.Lock()
+ clients[conn] = struct{}{}
+ mutex.Unlock()
+
+ // Individual client connection routine
+ c.wg.Add(1)
+ go func() {
+ c.Log.Debugf("Accepted Cisco MDT TCP dialout connection from %s", conn.RemoteAddr())
+ if err := c.handleTCPClient(conn); err != nil {
+ c.acc.AddError(err)
+ }
+ c.Log.Debugf("Closed Cisco MDT TCP dialout connection from %s", conn.RemoteAddr())
+
+ mutex.Lock()
+ delete(clients, conn)
+ mutex.Unlock()
+
+ conn.Close()
+ c.wg.Done()
+ }()
+ }
+
+ // Close all remaining client connections
+ mutex.Lock()
+ for client := range clients {
+ if err := client.Close(); err != nil {
+ c.Log.Errorf("Failed to close TCP dialout client: %v", err)
+ }
+ }
+ mutex.Unlock()
+}
+
+// Handle a TCP telemetry client
+func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error {
+ // TCP Dialout telemetry framing header
+ var hdr struct {
+ MsgType uint16
+ MsgEncap uint16
+ MsgHdrVersion uint16
+ MsgFlags uint16
+ MsgLen uint32
+ }
+
+ var payload bytes.Buffer
+
+ for {
+ // Read and validate dialout telemetry header
+ if err := binary.Read(conn, binary.BigEndian, &hdr); err != nil {
+ return err
+ }
+
+ maxMsgSize := tcpMaxMsgLen
+ if c.MaxMsgSize > 0 {
+ maxMsgSize = uint32(c.MaxMsgSize)
+ }
+
+ if hdr.MsgLen > maxMsgSize {
+ return fmt.Errorf("dialout packet too long: %v", hdr.MsgLen)
+ } else if hdr.MsgFlags != 0 {
+ return fmt.Errorf("invalid dialout flags: %v", hdr.MsgFlags)
+ }
+
+ // Read and handle telemetry packet
+ payload.Reset()
+ if size, err := payload.ReadFrom(io.LimitReader(conn, int64(hdr.MsgLen))); size != int64(hdr.MsgLen) {
+ if err != nil {
+ return err
+ }
+ return fmt.Errorf("TCP dialout premature EOF")
+ }
+
+ c.handleTelemetry(payload.Bytes())
+ }
+}
+
+// MdtDialout RPC server method for grpc-dialout transport
+func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error {
+ peer, peerOK := peer.FromContext(stream.Context())
+ if peerOK {
+ c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr)
+ }
+
+ var chunkBuffer bytes.Buffer
+
+ for {
+ packet, err := stream.Recv()
+ if err != nil {
+ if err != io.EOF {
+ c.acc.AddError(fmt.Errorf("GRPC dialout receive error: %v", err))
+ }
+ break
+ }
+
+ if len(packet.Data) == 0 && len(packet.Errors) != 0 {
+ c.acc.AddError(fmt.Errorf("GRPC dialout error: %s", packet.Errors))
+ break
+ }
+
+ // Reassemble chunked telemetry data received from NX-OS
+ if packet.TotalSize == 0 {
+ c.handleTelemetry(packet.Data)
+ } else if int(packet.TotalSize) <= c.MaxMsgSize {
+ chunkBuffer.Write(packet.Data)
+ if chunkBuffer.Len() >= int(packet.TotalSize) {
+ c.handleTelemetry(chunkBuffer.Bytes())
+ chunkBuffer.Reset()
+ }
+ } else {
+ c.acc.AddError(fmt.Errorf("dropped too large packet: %dB > %dB", packet.TotalSize, c.MaxMsgSize))
+ }
+ }
+
+ if peerOK {
+ c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peer.Addr)
+ }
+
+ return nil
+}
+
+// Handle telemetry packet from any transport, decode and add as measurement
+func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) {
+ msg := &telemetry.Telemetry{}
+ err := proto.Unmarshal(data, msg)
+ if err != nil {
+ c.acc.AddError(fmt.Errorf("Cisco MDT failed to decode: %v", err))
+ return
+ }
+
+ grouper := metric.NewSeriesGrouper()
+ for _, gpbkv := range msg.DataGpbkv {
+ // Produce metadata tags
+ var tags map[string]string
+
+ // Top-level field may have measurement timestamp, if not use message timestamp
+ measured := gpbkv.Timestamp
+ if measured == 0 {
+ measured = msg.MsgTimestamp
+ }
+
+ timestamp := time.Unix(int64(measured/1000), int64(measured%1000)*1000000)
+
+ // Find toplevel GPBKV fields "keys" and "content"
+ var keys, content *telemetry.TelemetryField = nil, nil
+ for _, field := range gpbkv.Fields {
+ if field.Name == "keys" {
+ keys = field
+ } else if field.Name == "content" {
+ content = field
+ }
+ }
+
+ if keys == nil || content == nil {
+ c.Log.Infof("Message from %s missing keys or content", msg.GetNodeIdStr())
+ continue
+ }
+
+ // Parse keys
+ tags = make(map[string]string, len(keys.Fields)+3)
+ tags["source"] = msg.GetNodeIdStr()
+ tags["subscription"] = msg.GetSubscriptionIdStr()
+ tags["path"] = msg.GetEncodingPath()
+
+ for _, subfield := range keys.Fields {
+ c.parseKeyField(tags, subfield, "")
+ }
+
+ // Parse values
+ for _, subfield := range content.Fields {
+ c.parseContentField(grouper, subfield, "", msg.EncodingPath, tags, timestamp)
+ }
+ }
+
+ for _, metric := range grouper.Metrics() {
+ c.acc.AddMetric(metric)
+ }
+}
+
+func decodeValue(field *telemetry.TelemetryField) interface{} {
+ switch val := field.ValueByType.(type) {
+ case *telemetry.TelemetryField_BytesValue:
+ return val.BytesValue
+ case *telemetry.TelemetryField_StringValue:
+ if len(val.StringValue) > 0 {
+ return val.StringValue
+ }
+ case *telemetry.TelemetryField_BoolValue:
+ return val.BoolValue
+ case *telemetry.TelemetryField_Uint32Value:
+ return val.Uint32Value
+ case *telemetry.TelemetryField_Uint64Value:
+ return val.Uint64Value
+ case *telemetry.TelemetryField_Sint32Value:
+ return val.Sint32Value
+ case *telemetry.TelemetryField_Sint64Value:
+ return val.Sint64Value
+ case *telemetry.TelemetryField_DoubleValue:
+ return val.DoubleValue
+ case *telemetry.TelemetryField_FloatValue:
+ return val.FloatValue
+ }
+ return nil
+}
+
+func decodeTag(field *telemetry.TelemetryField) string {
+ switch val := field.ValueByType.(type) {
+ case *telemetry.TelemetryField_BytesValue:
+ return string(val.BytesValue)
+ case *telemetry.TelemetryField_StringValue:
+ return val.StringValue
+ case *telemetry.TelemetryField_BoolValue:
+ if val.BoolValue {
+ return "true"
+ }
+ return "false"
+ case *telemetry.TelemetryField_Uint32Value:
+ return strconv.FormatUint(uint64(val.Uint32Value), 10)
+ case *telemetry.TelemetryField_Uint64Value:
+ return strconv.FormatUint(val.Uint64Value, 10)
+ case *telemetry.TelemetryField_Sint32Value:
+ return strconv.FormatInt(int64(val.Sint32Value), 10)
+ case *telemetry.TelemetryField_Sint64Value:
+ return strconv.FormatInt(val.Sint64Value, 10)
+ case *telemetry.TelemetryField_DoubleValue:
+ return strconv.FormatFloat(val.DoubleValue, 'f', -1, 64)
+ case *telemetry.TelemetryField_FloatValue:
+ return strconv.FormatFloat(float64(val.FloatValue), 'f', -1, 32)
+ default:
+ return ""
+ }
+}
+
+// Recursively parse tag fields
+func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemetry.TelemetryField, prefix string) {
+ localname := strings.Replace(field.Name, "-", "_", -1)
+ name := localname
+ if len(localname) == 0 {
+ name = prefix
+ } else if len(prefix) > 0 {
+ name = prefix + "/" + localname
+ }
+
+ if tag := decodeTag(field); len(name) > 0 && len(tag) > 0 {
+ if _, exists := tags[localname]; !exists { // Use short keys whenever possible
+ tags[localname] = tag
+ } else {
+ tags[name] = tag
+ }
+ }
+
+ for _, subfield := range field.Fields {
+ c.parseKeyField(tags, subfield, name)
+ }
+}
+
+func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string,
+ path string, tags map[string]string, timestamp time.Time) {
+ name := strings.Replace(field.Name, "-", "_", -1)
+ if len(name) == 0 {
+ name = prefix
+ } else if len(prefix) > 0 {
+ name = prefix + "/" + name
+ }
+
+ extraTags := c.extraTags[strings.Replace(path, "-", "_", -1)+"/"+name]
+
+ if value := decodeValue(field); value != nil {
+ // Do alias lookup, to shorten measurement names
+ measurement := path
+ if alias, ok := c.aliases[path]; ok {
+ measurement = alias
+ } else {
+ c.mutex.Lock()
+ if _, haveWarned := c.warned[path]; !haveWarned {
+ c.Log.Debugf("No measurement alias for encoding path: %s", path)
+ c.warned[path] = struct{}{}
+ }
+ c.mutex.Unlock()
+ }
+
+ grouper.Add(measurement, tags, timestamp, name, value)
+ return
+ }
+
+ if len(extraTags) > 0 {
+ for _, subfield := range field.Fields {
+ if _, isExtraTag := extraTags[subfield.Name]; isExtraTag {
+ tags[name+"/"+strings.Replace(subfield.Name, "-", "_", -1)] = decodeTag(subfield)
+ }
+ }
+ }
+
+ var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField
+ isNXOS := !strings.ContainsRune(path, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not
+ for _, subfield := range field.Fields {
+ if isNXOS && subfield.Name == "attributes" && len(subfield.Fields) > 0 {
+ nxAttributes = subfield.Fields[0]
+ } else if isNXOS && subfield.Name == "children" && len(subfield.Fields) > 0 {
+ nxChildren = subfield
+ } else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") {
+ nxRows = subfield
+ } else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding
+ c.parseContentField(grouper, subfield, name, path, tags, timestamp)
+ }
+ }
+
+ if nxAttributes == nil && nxRows == nil {
+ return
+ } else if nxRows != nil {
+ // NXAPI structure: https://developer.cisco.com/docs/cisco-nexus-9000-series-nx-api-cli-reference-release-9-2x/
+ for _, row := range nxRows.Fields {
+ for i, subfield := range row.Fields {
+ if i == 0 { // First subfield contains the index, promote it from value to tag
+ tags[prefix] = decodeTag(subfield)
+ } else {
+ c.parseContentField(grouper, subfield, "", path, tags, timestamp)
+ }
+ }
+ delete(tags, prefix)
+ }
+ return
+ }
+
+ // DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/
+ rn := ""
+ dn := false
+
+ for _, subfield := range nxAttributes.Fields {
+ if subfield.Name == "rn" {
+ rn = decodeTag(subfield)
+ } else if subfield.Name == "dn" {
+ dn = true
+ }
+ }
+
+ if len(rn) > 0 {
+ tags[prefix] = rn
+ } else if !dn { // Check for distinguished name being present
+ c.acc.AddError(fmt.Errorf("NX-OS decoding failed: missing dn field"))
+ return
+ }
+
+ for _, subfield := range nxAttributes.Fields {
+ if subfield.Name != "rn" {
+ c.parseContentField(grouper, subfield, "", path, tags, timestamp)
+ }
+ }
+
+ if nxChildren != nil {
+ // This is a nested structure, children will inherit relative name keys of parent
+ for _, subfield := range nxChildren.Fields {
+ c.parseContentField(grouper, subfield, prefix, path, tags, timestamp)
+ }
+ }
+ delete(tags, prefix)
+}
+
+func (c *CiscoTelemetryMDT) Address() net.Addr {
+ return c.listener.Addr()
+}
+
+// Stop listener and cleanup
+func (c *CiscoTelemetryMDT) Stop() {
+ if c.grpcServer != nil {
+ // Stop server and terminate all running dialout routines
+ c.grpcServer.Stop()
+ }
+ if c.listener != nil {
+ c.listener.Close()
+ }
+ c.wg.Wait()
+}
+
+const sampleConfig = `
+ ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
+ ## using the grpc transport.
+ transport = "grpc"
+
+ ## Address and port to host telemetry listener
+ service_address = ":57000"
+
+ ## Enable TLS; grpc transport only.
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## Enable TLS client authentication and define allowed CA certificates; grpc
+ ## transport only.
+ # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+ ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
+ # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
+
+ ## Define aliases to map telemetry encoding paths to simple measurement names
+ [inputs.cisco_telemetry_mdt.aliases]
+ ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
+`
+
+// SampleConfig of plugin
+func (c *CiscoTelemetryMDT) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description of plugin
+func (c *CiscoTelemetryMDT) Description() string {
+ return "Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms"
+}
+
+// Gather plugin measurements (unused)
+func (c *CiscoTelemetryMDT) Gather(_ telegraf.Accumulator) error {
+ return nil
+}
+
+func init() {
+ inputs.Add("cisco_telemetry_mdt", func() telegraf.Input {
+ return &CiscoTelemetryMDT{
+ Transport: "grpc",
+ ServiceAddress: "127.0.0.1:57000",
+ }
+ })
+}
diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go
new file mode 100644
index 0000000000000..ea200bc744a7d
--- /dev/null
+++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go
@@ -0,0 +1,592 @@
+package cisco_telemetry_mdt
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "net"
+ "testing"
+
+ dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout"
+ telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis"
+ "github.com/golang/protobuf/proto"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+)
+
+func TestHandleTelemetryTwoSimple(t *testing.T) {
+ c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ // error is expected since we are passing in dummy transport
+ require.Error(t, err)
+
+ telemetry := &telemetry.Telemetry{
+ MsgTimestamp: 1543236572000,
+ EncodingPath: "type:model/some/path",
+ NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
+ Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
+ DataGpbkv: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "keys",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "name",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"},
+ },
+ {
+ Name: "uint64",
+ ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 1234},
+ },
+ },
+ },
+ {
+ Name: "content",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "bool",
+ ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: true},
+ },
+ },
+ },
+ },
+ },
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "keys",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "name",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str2"},
+ },
+ },
+ },
+ {
+ Name: "content",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "bool",
+ ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false},
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ data, _ := proto.Marshal(telemetry)
+
+ c.handleTelemetry(data)
+ require.Empty(t, acc.Errors)
+
+ tags := map[string]string{"path": "type:model/some/path", "name": "str", "uint64": "1234", "source": "hostname", "subscription": "subscription"}
+ fields := map[string]interface{}{"bool": true}
+ acc.AssertContainsTaggedFields(t, "alias", fields, tags)
+
+ tags = map[string]string{"path": "type:model/some/path", "name": "str2", "source": "hostname", "subscription": "subscription"}
+ fields = map[string]interface{}{"bool": false}
+ acc.AssertContainsTaggedFields(t, "alias", fields, tags)
+}
+
+func TestHandleTelemetrySingleNested(t *testing.T) {
+ c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"nested": "type:model/nested/path"}}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ // error is expected since we are passing in dummy transport
+ require.Error(t, err)
+
+ telemetry := &telemetry.Telemetry{
+ MsgTimestamp: 1543236572000,
+ EncodingPath: "type:model/nested/path",
+ NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
+ Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
+ DataGpbkv: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "keys",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "nested",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "key",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "level",
+ ValueByType: &telemetry.TelemetryField_DoubleValue{DoubleValue: 3},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: "content",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "nested",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "value",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "foo",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ data, _ := proto.Marshal(telemetry)
+
+ c.handleTelemetry(data)
+ require.Empty(t, acc.Errors)
+
+ tags := map[string]string{"path": "type:model/nested/path", "level": "3", "source": "hostname", "subscription": "subscription"}
+ fields := map[string]interface{}{"nested/value/foo": "bar"}
+ acc.AssertContainsTaggedFields(t, "nested", fields, tags)
+}
+
+func TestHandleEmbeddedTags(t *testing.T) {
+ c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"extra": "type:model/extra"}, EmbeddedTags: []string{"type:model/extra/list/name"}}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ // error is expected since we are passing in dummy transport
+ require.Error(t, err)
+
+ telemetry := &telemetry.Telemetry{
+ MsgTimestamp: 1543236572000,
+ EncodingPath: "type:model/extra",
+ NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
+ Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
+ DataGpbkv: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "keys",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "foo",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
+ },
+ },
+ },
+ {
+ Name: "content",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "list",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "name",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry1"},
+ },
+ {
+ Name: "test",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"},
+ },
+ },
+ },
+ {
+ Name: "list",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "name",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry2"},
+ },
+ {
+ Name: "test",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ data, _ := proto.Marshal(telemetry)
+
+ c.handleTelemetry(data)
+ require.Empty(t, acc.Errors)
+
+ tags1 := map[string]string{"path": "type:model/extra", "foo": "bar", "source": "hostname", "subscription": "subscription", "list/name": "entry1"}
+ fields1 := map[string]interface{}{"list/test": "foo"}
+ tags2 := map[string]string{"path": "type:model/extra", "foo": "bar", "source": "hostname", "subscription": "subscription", "list/name": "entry2"}
+ fields2 := map[string]interface{}{"list/test": "bar"}
+ acc.AssertContainsTaggedFields(t, "extra", fields1, tags1)
+ acc.AssertContainsTaggedFields(t, "extra", fields2, tags2)
+}
+
+func TestHandleNXAPI(t *testing.T) {
+ c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"nxapi": "show nxapi"}}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ // error is expected since we are passing in dummy transport
+ require.Error(t, err)
+
+ telemetry := &telemetry.Telemetry{
+ MsgTimestamp: 1543236572000,
+ EncodingPath: "show nxapi",
+ NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
+ Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
+ DataGpbkv: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "keys",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "foo",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
+ },
+ },
+ },
+ {
+ Name: "content",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "TABLE_nxapi",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "ROW_nxapi",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "index",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"},
+ },
+ {
+ Name: "value",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"},
+ },
+ },
+ },
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "index",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i2"},
+ },
+ {
+ Name: "value",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ data, _ := proto.Marshal(telemetry)
+
+ c.handleTelemetry(data)
+ require.Empty(t, acc.Errors)
+
+ tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "source": "hostname", "subscription": "subscription"}
+ fields1 := map[string]interface{}{"value": "foo"}
+ tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "source": "hostname", "subscription": "subscription"}
+ fields2 := map[string]interface{}{"value": "bar"}
+ acc.AssertContainsTaggedFields(t, "nxapi", fields1, tags1)
+ acc.AssertContainsTaggedFields(t, "nxapi", fields2, tags2)
+}
+
+func TestHandleNXDME(t *testing.T) {
+ c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/dme"}}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ // error is expected since we are passing in dummy transport
+ require.Error(t, err)
+
+ telemetry := &telemetry.Telemetry{
+ MsgTimestamp: 1543236572000,
+ EncodingPath: "sys/dme",
+ NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
+ Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
+ DataGpbkv: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "keys",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "foo",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
+ },
+ },
+ },
+ {
+ Name: "content",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "fooEntity",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "attributes",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "rn",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"},
+ },
+ {
+ Name: "value",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ data, _ := proto.Marshal(telemetry)
+
+ c.handleTelemetry(data)
+ require.Empty(t, acc.Errors)
+
+ tags1 := map[string]string{"path": "sys/dme", "foo": "bar", "fooEntity": "some-rn", "source": "hostname", "subscription": "subscription"}
+ fields1 := map[string]interface{}{"value": "foo"}
+ acc.AssertContainsTaggedFields(t, "dme", fields1, tags1)
+}
+
+func TestTCPDialoutOverflow(t *testing.T) {
+ c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:0"}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ require.NoError(t, err)
+
+ hdr := struct {
+ MsgType uint16
+ MsgEncap uint16
+ MsgHdrVersion uint16
+ MsgFlags uint16
+ MsgLen uint32
+ }{MsgLen: uint32(1000000000)}
+
+ addr := c.Address()
+ conn, err := net.Dial(addr.Network(), addr.String())
+ require.NoError(t, err)
+ binary.Write(conn, binary.BigEndian, hdr)
+ conn.Read([]byte{0})
+ conn.Close()
+
+ c.Stop()
+
+ require.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000"))
+}
+
+func mockTelemetryMessage() *telemetry.Telemetry {
+ return &telemetry.Telemetry{
+ MsgTimestamp: 1543236572000,
+ EncodingPath: "type:model/some/path",
+ NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
+ Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
+ DataGpbkv: []*telemetry.TelemetryField{
+ {
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "keys",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "name",
+ ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"},
+ },
+ },
+ },
+ {
+ Name: "content",
+ Fields: []*telemetry.TelemetryField{
+ {
+ Name: "value",
+ ValueByType: &telemetry.TelemetryField_Sint64Value{Sint64Value: -1},
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func TestTCPDialoutMultiple(t *testing.T) {
+ c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:0", Aliases: map[string]string{
+ "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ require.NoError(t, err)
+
+ telemetry := mockTelemetryMessage()
+
+ hdr := struct {
+ MsgType uint16
+ MsgEncap uint16
+ MsgHdrVersion uint16
+ MsgFlags uint16
+ MsgLen uint32
+ }{}
+
+ addr := c.Address()
+ conn, err := net.Dial(addr.Network(), addr.String())
+ require.NoError(t, err)
+
+ data, _ := proto.Marshal(telemetry)
+ hdr.MsgLen = uint32(len(data))
+ binary.Write(conn, binary.BigEndian, hdr)
+ conn.Write(data)
+
+ conn2, err := net.Dial(addr.Network(), addr.String())
+ require.NoError(t, err)
+
+ telemetry.EncodingPath = "type:model/parallel/path"
+ data, _ = proto.Marshal(telemetry)
+ hdr.MsgLen = uint32(len(data))
+ binary.Write(conn2, binary.BigEndian, hdr)
+ conn2.Write(data)
+ conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0})
+ conn2.Read([]byte{0})
+ conn2.Close()
+
+ telemetry.EncodingPath = "type:model/other/path"
+ data, _ = proto.Marshal(telemetry)
+ hdr.MsgLen = uint32(len(data))
+ binary.Write(conn, binary.BigEndian, hdr)
+ conn.Write(data)
+ conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0})
+ conn.Read([]byte{0})
+ c.Stop()
+ conn.Close()
+
+ // We use the invalid dialout flags to let the server close the connection
+ require.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")})
+
+ tags := map[string]string{"path": "type:model/some/path", "name": "str", "source": "hostname", "subscription": "subscription"}
+ fields := map[string]interface{}{"value": int64(-1)}
+ acc.AssertContainsTaggedFields(t, "some", fields, tags)
+
+ tags = map[string]string{"path": "type:model/parallel/path", "name": "str", "source": "hostname", "subscription": "subscription"}
+ fields = map[string]interface{}{"value": int64(-1)}
+ acc.AssertContainsTaggedFields(t, "parallel", fields, tags)
+
+ tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"}
+ fields = map[string]interface{}{"value": int64(-1)}
+ acc.AssertContainsTaggedFields(t, "other", fields, tags)
+}
+
+func TestGRPCDialoutError(t *testing.T) {
+ c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:0"}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ require.NoError(t, err)
+
+ addr := c.Address()
+ conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure())
+ client := dialout.NewGRPCMdtDialoutClient(conn)
+ stream, _ := client.MdtDialout(context.Background())
+
+ args := &dialout.MdtDialoutArgs{Errors: "foobar"}
+ stream.Send(args)
+
+ // Wait for the server to close
+ stream.Recv()
+ c.Stop()
+
+ require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")})
+}
+
+func TestGRPCDialoutMultiple(t *testing.T) {
+ c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:0", Aliases: map[string]string{
+ "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}}
+ acc := &testutil.Accumulator{}
+ err := c.Start(acc)
+ require.NoError(t, err)
+ telemetry := mockTelemetryMessage()
+
+ addr := c.Address()
+ conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock())
+ client := dialout.NewGRPCMdtDialoutClient(conn)
+ stream, _ := client.MdtDialout(context.TODO())
+
+ data, _ := proto.Marshal(telemetry)
+ args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456}
+ stream.Send(args)
+
+ conn2, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock())
+ client2 := dialout.NewGRPCMdtDialoutClient(conn2)
+ stream2, _ := client2.MdtDialout(context.TODO())
+
+ telemetry.EncodingPath = "type:model/parallel/path"
+ data, _ = proto.Marshal(telemetry)
+ args = &dialout.MdtDialoutArgs{Data: data}
+ stream2.Send(args)
+ stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})
+ stream2.Recv()
+ conn2.Close()
+
+ telemetry.EncodingPath = "type:model/other/path"
+ data, _ = proto.Marshal(telemetry)
+ args = &dialout.MdtDialoutArgs{Data: data}
+ stream.Send(args)
+ stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})
+ stream.Recv()
+
+ c.Stop()
+ conn.Close()
+
+ require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")})
+
+ tags := map[string]string{"path": "type:model/some/path", "name": "str", "source": "hostname", "subscription": "subscription"}
+ fields := map[string]interface{}{"value": int64(-1)}
+ acc.AssertContainsTaggedFields(t, "some", fields, tags)
+
+ tags = map[string]string{"path": "type:model/parallel/path", "name": "str", "source": "hostname", "subscription": "subscription"}
+ fields = map[string]interface{}{"value": int64(-1)}
+ acc.AssertContainsTaggedFields(t, "parallel", fields, tags)
+
+ tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"}
+ fields = map[string]interface{}{"value": int64(-1)}
+ acc.AssertContainsTaggedFields(t, "other", fields, tags)
+
+}
diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md
new file mode 100644
index 0000000000000..9b9e6caa904f7
--- /dev/null
+++ b/plugins/inputs/clickhouse/README.md
@@ -0,0 +1,205 @@
+# ClickHouse Input Plugin
+
+This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server.
+
+### Configuration
+```toml
+# Read metrics from one or many ClickHouse servers
+[[inputs.clickhouse]]
+ ## Username for authorization on ClickHouse server
+ ## example: username = "default"
+ username = "default"
+
+ ## Password for authorization on ClickHouse server
+ ## example: password = "super_secret"
+
+ ## HTTP(s) timeout while getting metrics values
+ ## The timeout includes connection time, any redirects, and reading the response body.
+ ## example: timeout = 1s
+ # timeout = 5s
+
+ ## List of servers for metrics scraping
+ ## metrics scrape via HTTP(s) clickhouse interface
+ ## https://clickhouse.tech/docs/en/interfaces/http/
+ ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"]
+ servers = ["http://127.0.0.1:8123"]
+
+ ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster
+ ## with using same "user:password" described in "user" and "password" parameters
+ ## and get this server hostname list from "system.clusters" table
+ ## see
+ ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
+ ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
+ ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
+ ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
+ ## example: auto_discovery = false
+ # auto_discovery = true
+
+ ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+ ## when this filter present then "WHERE cluster IN (...)" filter will apply
+ ## please use only full cluster names here, regexp and glob filters is not allowed
+ ## for "/etc/clickhouse-server/config.d/remote.xml"
+ ##
+ ##
+ ##
+ ##
+ ## clickhouse-ru-1.local 9000
+ ## clickhouse-ru-2.local 9000
+ ##
+ ##
+ ## clickhouse-eu-1.local 9000
+ ## clickhouse-eu-2.local 9000
+ ##
+ ##
+ ##
+ ##
+ ##
+ ##
+ ## example: cluster_include = ["my-own-cluster"]
+ # cluster_include = []
+
+ ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+ ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply
+ ## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
+ # cluster_exclude = []
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+```
+
+### Metrics
+
+- clickhouse_events
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - all rows from [system.events][]
+
++ clickhouse_metrics
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - all rows from [system.metrics][]
+
+- clickhouse_asynchronous_metrics
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - all rows from [system.asynchronous_metrics][]
+
++ clickhouse_tables
+ - tags:
+ - source (ClickHouse server hostname)
+ - table
+ - database
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - bytes
+ - parts
+ - rows
+
+- clickhouse_zookeeper
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - root_nodes (count of node from [system.zookeeper][] where path=/)
+
++ clickhouse_replication_queue
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - too_many_tries_replicas (count of replicas which have num_tries > 1 in `system.replication_queue`)
+
+- clickhouse_detached_parts
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - detached_parts (total detached parts for all tables and databases from [system.detached_parts][])
+
++ clickhouse_dictionaries
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - dict_origin (xml Filename when dictionary created from *_dictionary.xml, database.table when dictionary created from DDL)
+ - fields:
+ - is_loaded (0 - when dictionary data not successful load, 1 - when dictionary data loading fail, see [system.dictionaries][] for details)
+ - bytes_allocated (how many bytes allocated in RAM after a dictionary loaded)
+
+- clickhouse_mutations
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - running - gauge which show how much mutation doesn't complete now, see [system.mutations][] for details
+ - failed - counter which show total failed mutations from first clickhouse-server run
+ - completed - counter which show total successful finished mutations from first clickhouse-server run
+
++ clickhouse_disks
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - name (disk name in storage configuration)
+ - path (path to disk)
+ - fields:
+ - free_space_percent - 0-100, gauge which show current percent of free disk space bytes relative to total disk space bytes
+ - keep_free_space_percent - 0-100, gauge which show current percent of required keep free disk bytes relative to total disk space bytes
+
+- clickhouse_processes
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - fields:
+ - percentile_50 - float gauge which show 50% percentile (quantile 0.5) for `elapsed` field of running processes, see [system.processes][] for details
+ - percentile_90 - float gauge which show 90% percentile (quantile 0.9) for `elapsed` field of running processes, see [system.processes][] for details
+ - longest_running - float gauge which show maximum value for `elapsed` field of running processes, see [system.processes][] for details
+
+- clickhouse_text_log
+ - tags:
+ - source (ClickHouse server hostname)
+ - cluster (Name of the cluster [optional])
+ - shard_num (Shard number in the cluster [optional])
+ - level (message level, only message with level less or equal Notice is collects), see details on [system.text_log][]
+ - fields:
+ - messages_last_10_min - gauge which show how many messages collected
+
+### Example Output
+
+```
+clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000
+clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000
+clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000
+clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=system,host=kshvakov,source=localhost,shard_num=1,table=trace_log bytes=754i,parts=1i,rows=1i 1569421000000000000
+clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,host=kshvakov,source=localhost,shard_num=1,table=example bytes=326i,parts=2i,rows=2i 1569421000000000000
+```
+
+[system.events]: https://clickhouse.tech/docs/en/operations/system-tables/events/
+[system.metrics]: https://clickhouse.tech/docs/en/operations/system-tables/metrics/
+[system.asynchronous_metrics]: https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics/
+[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/
+[system.detached_parts]: https://clickhouse.tech/docs/en/operations/system-tables/detached_parts/
+[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/
+[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/
+[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/
+[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/
+[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/
diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go
new file mode 100644
index 0000000000000..187ead5cf6790
--- /dev/null
+++ b/plugins/inputs/clickhouse/clickhouse.go
@@ -0,0 +1,666 @@
+package clickhouse
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+var defaultTimeout = 5 * time.Second
+
+var sampleConfig = `
+ ## Username for authorization on ClickHouse server
+ ## example: username = "default""
+ username = "default"
+
+ ## Password for authorization on ClickHouse server
+ ## example: password = "super_secret"
+
+ ## HTTP(s) timeout while getting metrics values
+ ## The timeout includes connection time, any redirects, and reading the response body.
+ ## example: timeout = 1s
+ # timeout = 5s
+
+ ## List of servers for metrics scraping
+ ## metrics scrape via HTTP(s) clickhouse interface
+ ## https://clickhouse.tech/docs/en/interfaces/http/
+ ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"]
+ servers = ["http://127.0.0.1:8123"]
+
+ ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster
+ ## with using same "user:password" described in "user" and "password" parameters
+ ## and get this server hostname list from "system.clusters" table
+ ## see
+ ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
+ ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
+ ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
+ ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
+ ## example: auto_discovery = false
+ # auto_discovery = true
+
+ ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+ ## when this filter present then "WHERE cluster IN (...)" filter will apply
+ ## please use only full cluster names here, regexp and glob filters is not allowed
+ ## for "/etc/clickhouse-server/config.d/remote.xml"
+ ##
+ ##
+ ##
+ ##
+ ## clickhouse-ru-1.local 9000
+ ## clickhouse-ru-2.local 9000
+ ##
+ ##
+ ## clickhouse-eu-1.local 9000
+ ## clickhouse-eu-2.local 9000
+ ##
+ ##
+ ##
+ ##
+ ##
+ ##
+ ## example: cluster_include = ["my-own-cluster"]
+ # cluster_include = []
+
+ ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+ ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply
+ ## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
+ # cluster_exclude = []
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+`
+
+type connect struct {
+ Cluster string `json:"cluster"`
+ ShardNum int `json:"shard_num"`
+ Hostname string `json:"host_name"`
+ url *url.URL
+}
+
+func init() {
+ inputs.Add("clickhouse", func() telegraf.Input {
+ return &ClickHouse{
+ AutoDiscovery: true,
+ ClientConfig: tls.ClientConfig{
+ InsecureSkipVerify: false,
+ },
+ Timeout: internal.Duration{Duration: defaultTimeout},
+ }
+ })
+}
+
+// ClickHouse Telegraf Input Plugin
+type ClickHouse struct {
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ Servers []string `toml:"servers"`
+ AutoDiscovery bool `toml:"auto_discovery"`
+ ClusterInclude []string `toml:"cluster_include"`
+ ClusterExclude []string `toml:"cluster_exclude"`
+ Timeout internal.Duration `toml:"timeout"`
+ HTTPClient http.Client
+ tls.ClientConfig
+}
+
+// SampleConfig returns the sample config
+func (*ClickHouse) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description return plugin description
+func (*ClickHouse) Description() string {
+ return "Read metrics from one or many ClickHouse servers"
+}
+
+// Start ClickHouse input service
+func (ch *ClickHouse) Start(telegraf.Accumulator) error {
+ timeout := defaultTimeout
+ if ch.Timeout.Duration != 0 {
+ timeout = ch.Timeout.Duration
+ }
+ tlsCfg, err := ch.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ ch.HTTPClient = http.Client{
+ Timeout: timeout,
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ Proxy: http.ProxyFromEnvironment,
+ MaxIdleConnsPerHost: 1,
+ },
+ }
+ return nil
+}
+
+// Gather collect data from ClickHouse server
+func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) {
+ var (
+ connects []connect
+ exists = func(host string) bool {
+ for _, c := range connects {
+ if c.Hostname == host {
+ return true
+ }
+ }
+ return false
+ }
+ )
+
+ for _, server := range ch.Servers {
+ u, err := url.Parse(server)
+ if err != nil {
+ return err
+ }
+ switch {
+ case ch.AutoDiscovery:
+ var conns []connect
+ if err := ch.execQuery(u, "SELECT cluster, shard_num, host_name FROM system.clusters "+ch.clusterIncludeExcludeFilter(), &conns); err != nil {
+ acc.AddError(err)
+ continue
+ }
+ for _, c := range conns {
+ if !exists(c.Hostname) {
+ c.url = &url.URL{
+ Scheme: u.Scheme,
+ Host: net.JoinHostPort(c.Hostname, u.Port()),
+ }
+ connects = append(connects, c)
+ }
+ }
+ default:
+ connects = append(connects, connect{
+ Hostname: u.Hostname(),
+ url: u,
+ })
+ }
+ }
+
+ for _, conn := range connects {
+
+ metricsFuncs := []func(acc telegraf.Accumulator, conn *connect) error{
+ ch.tables,
+ ch.zookeeper,
+ ch.replicationQueue,
+ ch.detachedParts,
+ ch.dictionaries,
+ ch.mutations,
+ ch.disks,
+ ch.processes,
+ ch.textLog,
+ }
+
+ for _, metricFunc := range metricsFuncs {
+ if err := metricFunc(acc, &conn); err != nil {
+ acc.AddError(err)
+ }
+
+ }
+
+ for metric := range commonMetrics {
+ if err := ch.commonMetrics(acc, &conn, metric); err != nil {
+ acc.AddError(err)
+ }
+ }
+ }
+ return nil
+}
+
+func (ch *ClickHouse) Stop() {
+ ch.HTTPClient.CloseIdleConnections()
+}
+
+func (ch *ClickHouse) clusterIncludeExcludeFilter() string {
+ if len(ch.ClusterInclude) == 0 && len(ch.ClusterExclude) == 0 {
+ return ""
+ }
+ var (
+ escape = func(in string) string {
+ return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(in) + "'"
+ }
+ makeFilter = func(expr string, args []string) string {
+ in := make([]string, 0, len(args))
+ for _, v := range args {
+ in = append(in, escape(v))
+ }
+ return fmt.Sprintf("cluster %s (%s)", expr, strings.Join(in, ", "))
+ }
+ includeFilter, excludeFilter string
+ )
+
+ if len(ch.ClusterInclude) != 0 {
+ includeFilter = makeFilter("IN", ch.ClusterInclude)
+ }
+ if len(ch.ClusterExclude) != 0 {
+ excludeFilter = makeFilter("NOT IN", ch.ClusterExclude)
+ }
+ if includeFilter != "" && excludeFilter != "" {
+ return "WHERE " + includeFilter + " OR " + excludeFilter
+ }
+ if includeFilter == "" && excludeFilter != "" {
+ return "WHERE " + excludeFilter
+ }
+ return "WHERE " + includeFilter
+}
+
+func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, metric string) error {
+ var result []struct {
+ Metric string `json:"metric"`
+ Value chUInt64 `json:"value"`
+ }
+ if err := ch.execQuery(conn.url, commonMetrics[metric], &result); err != nil {
+ return err
+ }
+
+ tags := ch.makeDefaultTags(conn)
+
+ fields := make(map[string]interface{})
+ for _, r := range result {
+ fields[internal.SnakeCase(r.Metric)] = uint64(r.Value)
+ }
+
+ acc.AddFields("clickhouse_"+metric, fields, tags)
+
+ return nil
+}
+
+func (ch *ClickHouse) zookeeper(acc telegraf.Accumulator, conn *connect) error {
+ var zkExists []struct {
+ ZkExists chUInt64 `json:"zk_exists"`
+ }
+
+ if err := ch.execQuery(conn.url, systemZookeeperExistsSQL, &zkExists); err != nil {
+ return err
+ }
+ tags := ch.makeDefaultTags(conn)
+
+ if len(zkExists) > 0 && zkExists[0].ZkExists > 0 {
+ var zkRootNodes []struct {
+ ZkRootNodes chUInt64 `json:"zk_root_nodes"`
+ }
+ if err := ch.execQuery(conn.url, systemZookeeperRootNodesSQL, &zkRootNodes); err != nil {
+ return err
+ }
+
+ acc.AddFields("clickhouse_zookeeper",
+ map[string]interface{}{
+ "root_nodes": uint64(zkRootNodes[0].ZkRootNodes),
+ },
+ tags,
+ )
+ }
+ return nil
+}
+
+func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect) error {
+ var replicationQueueExists []struct {
+ ReplicationQueueExists chUInt64 `json:"replication_queue_exists"`
+ }
+
+ if err := ch.execQuery(conn.url, systemReplicationExistsSQL, &replicationQueueExists); err != nil {
+ return err
+ }
+
+ tags := ch.makeDefaultTags(conn)
+
+ if len(replicationQueueExists) > 0 && replicationQueueExists[0].ReplicationQueueExists > 0 {
+ var replicationTooManyTries []struct {
+ NumTriesReplicas chUInt64 `json:"replication_num_tries_replicas"`
+ TooManyTriesReplicas chUInt64 `json:"replication_too_many_tries_replicas"`
+ }
+ if err := ch.execQuery(conn.url, systemReplicationNumTriesSQL, &replicationTooManyTries); err != nil {
+ return err
+ }
+
+ acc.AddFields("clickhouse_replication_queue",
+ map[string]interface{}{
+ "too_many_tries_replicas": uint64(replicationTooManyTries[0].TooManyTriesReplicas),
+ "num_tries_replicas": uint64(replicationTooManyTries[0].NumTriesReplicas),
+ },
+ tags,
+ )
+ }
+ return nil
+}
+
+func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) error {
+
+ var detachedParts []struct {
+ DetachedParts chUInt64 `json:"detached_parts"`
+ }
+ if err := ch.execQuery(conn.url, systemDetachedPartsSQL, &detachedParts); err != nil {
+ return err
+ }
+
+ if len(detachedParts) > 0 {
+ tags := ch.makeDefaultTags(conn)
+ acc.AddFields("clickhouse_detached_parts",
+ map[string]interface{}{
+ "detached_parts": uint64(detachedParts[0].DetachedParts),
+ },
+ tags,
+ )
+ }
+ return nil
+}
+
+func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) error {
+
+ var brokenDictionaries []struct {
+ Origin string `json:"origin"`
+ BytesAllocated chUInt64 `json:"bytes_allocated"`
+ Status string `json:"status"`
+ }
+ if err := ch.execQuery(conn.url, systemDictionariesSQL, &brokenDictionaries); err != nil {
+ return err
+ }
+
+ for _, dict := range brokenDictionaries {
+ tags := ch.makeDefaultTags(conn)
+
+ isLoaded := uint64(1)
+ if dict.Status != "LOADED" {
+ isLoaded = 0
+ }
+
+ if dict.Origin != "" {
+ tags["dict_origin"] = dict.Origin
+ acc.AddFields("clickhouse_dictionaries",
+ map[string]interface{}{
+ "is_loaded": isLoaded,
+ "bytes_allocated": uint64(dict.BytesAllocated),
+ },
+ tags,
+ )
+ }
+ }
+
+ return nil
+}
+
+func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error {
+
+ var mutationsStatus []struct {
+ Failed chUInt64 `json:"failed"`
+ Running chUInt64 `json:"running"`
+ Completed chUInt64 `json:"completed"`
+ }
+ if err := ch.execQuery(conn.url, systemMutationSQL, &mutationsStatus); err != nil {
+ return err
+ }
+
+ if len(mutationsStatus) > 0 {
+ tags := ch.makeDefaultTags(conn)
+
+ acc.AddFields("clickhouse_mutations",
+ map[string]interface{}{
+ "failed": uint64(mutationsStatus[0].Failed),
+ "running": uint64(mutationsStatus[0].Running),
+ "completed": uint64(mutationsStatus[0].Completed),
+ },
+ tags,
+ )
+ }
+
+ return nil
+}
+
+func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error {
+
+ var disksStatus []struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ FreePercent chUInt64 `json:"free_space_percent"`
+ KeepFreePercent chUInt64 `json:"keep_free_space_percent"`
+ }
+
+ if err := ch.execQuery(conn.url, systemDisksSQL, &disksStatus); err != nil {
+ return err
+ }
+
+ for _, disk := range disksStatus {
+ tags := ch.makeDefaultTags(conn)
+ tags["name"] = disk.Name
+ tags["path"] = disk.Path
+
+ acc.AddFields("clickhouse_disks",
+ map[string]interface{}{
+ "free_space_percent": uint64(disk.FreePercent),
+ "keep_free_space_percent": uint64(disk.KeepFreePercent),
+ },
+ tags,
+ )
+
+ }
+
+ return nil
+}
+
+func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error {
+
+ var processesStats []struct {
+ QueryType string `json:"query_type"`
+ Percentile50 float64 `json:"p50"`
+ Percentile90 float64 `json:"p90"`
+ LongestRunning float64 `json:"longest_running"`
+ }
+
+ if err := ch.execQuery(conn.url, systemProcessesSQL, &processesStats); err != nil {
+ return err
+ }
+
+ for _, process := range processesStats {
+ tags := ch.makeDefaultTags(conn)
+ tags["query_type"] = process.QueryType
+
+ acc.AddFields("clickhouse_processes",
+ map[string]interface{}{
+ "percentile_50": process.Percentile50,
+ "percentile_90": process.Percentile90,
+ "longest_running": process.LongestRunning,
+ },
+ tags,
+ )
+
+ }
+
+ return nil
+}
+
+func (ch *ClickHouse) textLog(acc telegraf.Accumulator, conn *connect) error {
+ var textLogExists []struct {
+ TextLogExists chUInt64 `json:"text_log_exists"`
+ }
+
+ if err := ch.execQuery(conn.url, systemTextLogExistsSQL, &textLogExists); err != nil {
+ return err
+ }
+
+ if len(textLogExists) > 0 && textLogExists[0].TextLogExists > 0 {
+ var textLogLast10MinMessages []struct {
+ Level string `json:"level"`
+ MessagesLast10Min chUInt64 `json:"messages_last_10_min"`
+ }
+ if err := ch.execQuery(conn.url, systemTextLogSQL, &textLogLast10MinMessages); err != nil {
+ return err
+ }
+
+ for _, textLogItem := range textLogLast10MinMessages {
+ tags := ch.makeDefaultTags(conn)
+ tags["level"] = textLogItem.Level
+ acc.AddFields("clickhouse_text_log",
+ map[string]interface{}{
+ "messages_last_10_min": uint64(textLogItem.MessagesLast10Min),
+ },
+ tags,
+ )
+ }
+ }
+ return nil
+}
+
+func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error {
+ var parts []struct {
+ Database string `json:"database"`
+ Table string `json:"table"`
+ Bytes chUInt64 `json:"bytes"`
+ Parts chUInt64 `json:"parts"`
+ Rows chUInt64 `json:"rows"`
+ }
+
+ if err := ch.execQuery(conn.url, systemPartsSQL, &parts); err != nil {
+ return err
+ }
+ tags := ch.makeDefaultTags(conn)
+
+ for _, part := range parts {
+ tags["table"] = part.Table
+ tags["database"] = part.Database
+ acc.AddFields("clickhouse_tables",
+ map[string]interface{}{
+ "bytes": uint64(part.Bytes),
+ "parts": uint64(part.Parts),
+ "rows": uint64(part.Rows),
+ },
+ tags,
+ )
+ }
+ return nil
+}
+
+func (ch *ClickHouse) makeDefaultTags(conn *connect) map[string]string {
+ tags := map[string]string{
+ "source": conn.Hostname,
+ }
+ if len(conn.Cluster) != 0 {
+ tags["cluster"] = conn.Cluster
+ }
+ if conn.ShardNum != 0 {
+ tags["shard_num"] = strconv.Itoa(conn.ShardNum)
+ }
+ return tags
+}
+
+type clickhouseError struct {
+ StatusCode int
+ body []byte
+}
+
+func (e *clickhouseError) Error() string {
+ return fmt.Sprintf("received error code %d: %s", e.StatusCode, e.body)
+}
+
+func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error {
+ q := url.Query()
+ q.Set("query", query+" FORMAT JSON")
+ url.RawQuery = q.Encode()
+ req, _ := http.NewRequest("GET", url.String(), nil)
+ if ch.Username != "" {
+ req.Header.Add("X-ClickHouse-User", ch.Username)
+ }
+ if ch.Password != "" {
+ req.Header.Add("X-ClickHouse-Key", ch.Password)
+ }
+ resp, err := ch.HTTPClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode >= 300 {
+ body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200))
+ return &clickhouseError{
+ StatusCode: resp.StatusCode,
+ body: body,
+ }
+ }
+ var response struct {
+ Data json.RawMessage
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(response.Data, i); err != nil {
+ return err
+ }
+
+ if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil {
+ return err
+ }
+ return nil
+}
+
+// see https://clickhouse.yandex/docs/en/operations/settings/settings/#session_settings-output_format_json_quote_64bit_integers
+type chUInt64 uint64
+
+func (i *chUInt64) UnmarshalJSON(b []byte) error {
+ b = bytes.TrimPrefix(b, []byte(`"`))
+ b = bytes.TrimSuffix(b, []byte(`"`))
+ v, err := strconv.ParseUint(string(b), 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = chUInt64(v)
+ return nil
+}
+
+const (
+ systemEventsSQL = "SELECT event AS metric, CAST(value AS UInt64) AS value FROM system.events"
+ systemMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.metrics"
+ systemAsyncMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.asynchronous_metrics"
+ systemPartsSQL = `
+ SELECT
+ database,
+ table,
+ SUM(bytes) AS bytes,
+ COUNT(*) AS parts,
+ SUM(rows) AS rows
+ FROM system.parts
+ WHERE active = 1
+ GROUP BY
+ database, table
+ ORDER BY
+ database, table
+ `
+ systemZookeeperExistsSQL = "SELECT count() AS zk_exists FROM system.tables WHERE database='system' AND name='zookeeper'"
+ systemZookeeperRootNodesSQL = "SELECT count() AS zk_root_nodes FROM system.zookeeper WHERE path='/'"
+
+ systemReplicationExistsSQL = "SELECT count() AS replication_queue_exists FROM system.tables WHERE database='system' AND name='replication_queue'"
+ systemReplicationNumTriesSQL = "SELECT countIf(num_tries>1) AS replication_num_tries_replicas, countIf(num_tries>100) AS replication_too_many_tries_replicas FROM system.replication_queue"
+
+ systemDetachedPartsSQL = "SELECT count() AS detached_parts FROM system.detached_parts"
+
+ systemDictionariesSQL = "SELECT origin, status, bytes_allocated FROM system.dictionaries"
+
+ systemMutationSQL = "SELECT countIf(latest_fail_time>toDateTime('0000-00-00 00:00:00') AND is_done=0) AS failed, countIf(latest_fail_time=toDateTime('0000-00-00 00:00:00') AND is_done=0) AS running, countIf(is_done=1) AS completed FROM system.mutations"
+ systemDisksSQL = "SELECT name, path, toUInt64(100*free_space / total_space) AS free_space_percent, toUInt64( 100 * keep_free_space / total_space) AS keep_free_space_percent FROM system.disks"
+ systemProcessesSQL = "SELECT multiIf(positionCaseInsensitive(query,'select')=1,'select',positionCaseInsensitive(query,'insert')=1,'insert','other') AS query_type, quantile\n(0.5)(elapsed) AS p50, quantile(0.9)(elapsed) AS p90, max(elapsed) AS longest_running FROM system.processes GROUP BY query_type"
+
+ systemTextLogExistsSQL = "SELECT count() AS text_log_exists FROM system.tables WHERE database='system' AND name='text_log'"
+ systemTextLogSQL = "SELECT count() AS messages_last_10_min, level FROM system.text_log WHERE level <= 'Notice' AND event_time >= now() - INTERVAL 600 SECOND GROUP BY level"
+)
+
+var commonMetrics = map[string]string{
+ "events": systemEventsSQL,
+ "metrics": systemMetricsSQL,
+ "asynchronous_metrics": systemAsyncMetricsSQL,
+}
+
+var _ telegraf.ServiceInput = &ClickHouse{}
diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go
new file mode 100644
index 0000000000000..68a4438442d12
--- /dev/null
+++ b/plugins/inputs/clickhouse/clickhouse_test.go
@@ -0,0 +1,587 @@
+package clickhouse
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestClusterIncludeExcludeFilter(t *testing.T) {
+ ch := ClickHouse{}
+ if assert.Equal(t, "", ch.clusterIncludeExcludeFilter()) {
+ ch.ClusterExclude = []string{"test_cluster"}
+ assert.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter())
+
+ ch.ClusterExclude = []string{"test_cluster"}
+ ch.ClusterInclude = []string{"cluster"}
+ assert.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter())
+
+ ch.ClusterExclude = []string{}
+ ch.ClusterInclude = []string{"cluster1", "cluster2"}
+ assert.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter())
+
+ ch.ClusterExclude = []string{"cluster1", "cluster2"}
+ ch.ClusterInclude = []string{}
+ assert.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter())
+ }
+}
+
+func TestChInt64(t *testing.T) {
+ assets := map[string]uint64{
+ `"1"`: 1,
+ "1": 1,
+ "42": 42,
+ `"42"`: 42,
+ "18446743937525109187": 18446743937525109187,
+ }
+ for src, expected := range assets {
+ var v chUInt64
+ if err := v.UnmarshalJSON([]byte(src)); assert.NoError(t, err) {
+ assert.Equal(t, expected, uint64(v))
+ }
+ }
+}
+
+func TestGather(t *testing.T) {
+ var (
+ ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ type result struct {
+ Data interface{} `json:"data"`
+ }
+ enc := json.NewEncoder(w)
+ switch query := r.URL.Query().Get("query"); {
+ case strings.Contains(query, "system.parts"):
+ enc.Encode(result{
+ Data: []struct {
+ Database string `json:"database"`
+ Table string `json:"table"`
+ Bytes chUInt64 `json:"bytes"`
+ Parts chUInt64 `json:"parts"`
+ Rows chUInt64 `json:"rows"`
+ }{
+ {
+ Database: "test_database",
+ Table: "test_table",
+ Bytes: 1,
+ Parts: 10,
+ Rows: 100,
+ },
+ },
+ })
+ case strings.Contains(query, "system.events"):
+ enc.Encode(result{
+ Data: []struct {
+ Metric string `json:"metric"`
+ Value chUInt64 `json:"value"`
+ }{
+ {
+ Metric: "TestSystemEvent",
+ Value: 1000,
+ },
+ {
+ Metric: "TestSystemEvent2",
+ Value: 2000,
+ },
+ },
+ })
+ case strings.Contains(query, "system.metrics"):
+ enc.Encode(result{
+ Data: []struct {
+ Metric string `json:"metric"`
+ Value chUInt64 `json:"value"`
+ }{
+ {
+ Metric: "TestSystemMetric",
+ Value: 1000,
+ },
+ {
+ Metric: "TestSystemMetric2",
+ Value: 2000,
+ },
+ },
+ })
+ case strings.Contains(query, "system.asynchronous_metrics"):
+ enc.Encode(result{
+ Data: []struct {
+ Metric string `json:"metric"`
+ Value chUInt64 `json:"value"`
+ }{
+ {
+ Metric: "TestSystemAsynchronousMetric",
+ Value: 1000,
+ },
+ {
+ Metric: "TestSystemAsynchronousMetric2",
+ Value: 2000,
+ },
+ },
+ })
+ case strings.Contains(query, "zk_exists"):
+ enc.Encode(result{
+ Data: []struct {
+ ZkExists chUInt64 `json:"zk_exists"`
+ }{
+ {
+ ZkExists: 1,
+ },
+ },
+ })
+ case strings.Contains(query, "zk_root_nodes"):
+ enc.Encode(result{
+ Data: []struct {
+ ZkRootNodes chUInt64 `json:"zk_root_nodes"`
+ }{
+ {
+ ZkRootNodes: 2,
+ },
+ },
+ })
+ case strings.Contains(query, "replication_queue_exists"):
+ enc.Encode(result{
+ Data: []struct {
+ ReplicationQueueExists chUInt64 `json:"replication_queue_exists"`
+ }{
+ {
+ ReplicationQueueExists: 1,
+ },
+ },
+ })
+ case strings.Contains(query, "replication_too_many_tries_replicas"):
+ enc.Encode(result{
+ Data: []struct {
+ TooManyTriesReplicas chUInt64 `json:"replication_too_many_tries_replicas"`
+ NumTriesReplicas chUInt64 `json:"replication_num_tries_replicas"`
+ }{
+ {
+ TooManyTriesReplicas: 10,
+ NumTriesReplicas: 100,
+ },
+ },
+ })
+ case strings.Contains(query, "system.detached_parts"):
+ enc.Encode(result{
+ Data: []struct {
+ DetachedParts chUInt64 `json:"detached_parts"`
+ }{
+ {
+ DetachedParts: 10,
+ },
+ },
+ })
+ case strings.Contains(query, "system.dictionaries"):
+ enc.Encode(result{
+ Data: []struct {
+ Origin string `json:"origin"`
+ Status string `json:"status"`
+ BytesAllocated chUInt64 `json:"bytes_allocated"`
+ }{
+ {
+ Origin: "default.test_dict",
+ Status: "NOT_LOADED",
+ BytesAllocated: 100,
+ },
+ },
+ })
+ case strings.Contains(query, "system.mutations"):
+ enc.Encode(result{
+ Data: []struct {
+ Failed chUInt64 `json:"failed"`
+ Completed chUInt64 `json:"completed"`
+ Running chUInt64 `json:"running"`
+ }{
+ {
+ Failed: 10,
+ Running: 1,
+ Completed: 100,
+ },
+ },
+ })
+ case strings.Contains(query, "system.disks"):
+ enc.Encode(result{
+ Data: []struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ FreePercent chUInt64 `json:"free_space_percent"`
+ KeepFreePercent chUInt64 `json:"keep_free_space_percent"`
+ }{
+ {
+ Name: "default",
+ Path: "/var/lib/clickhouse",
+ FreePercent: 1,
+ KeepFreePercent: 10,
+ },
+ },
+ })
+ case strings.Contains(query, "system.processes"):
+ enc.Encode(result{
+ Data: []struct {
+ QueryType string `json:"query_type"`
+ Percentile50 float64 `json:"p50"`
+ Percentile90 float64 `json:"p90"`
+ LongestRunning float64 `json:"longest_running"`
+ }{
+ {
+ QueryType: "select",
+ Percentile50: 0.1,
+ Percentile90: 0.5,
+ LongestRunning: 10,
+ },
+ {
+ QueryType: "insert",
+ Percentile50: 0.2,
+ Percentile90: 1.5,
+ LongestRunning: 100,
+ },
+ {
+ QueryType: "other",
+ Percentile50: 0.4,
+ Percentile90: 4.5,
+ LongestRunning: 1000,
+ },
+ },
+ })
+ case strings.Contains(query, "text_log_exists"):
+ enc.Encode(result{
+ Data: []struct {
+ TextLogExists chUInt64 `json:"text_log_exists"`
+ }{
+ {
+ TextLogExists: 1,
+ },
+ },
+ })
+ case strings.Contains(query, "system.text_log"):
+ enc.Encode(result{
+ Data: []struct {
+ Level string `json:"level"`
+ LastMessagesLast10Min chUInt64 `json:"messages_last_10_min"`
+ }{
+ {
+ Level: "Fatal",
+ LastMessagesLast10Min: 0,
+ },
+ {
+ Level: "Critical",
+ LastMessagesLast10Min: 10,
+ },
+ {
+ Level: "Error",
+ LastMessagesLast10Min: 20,
+ },
+ {
+ Level: "Warning",
+ LastMessagesLast10Min: 30,
+ },
+ {
+ Level: "Notice",
+ LastMessagesLast10Min: 40,
+ },
+ },
+ })
+ }
+ }))
+ ch = &ClickHouse{
+ Servers: []string{
+ ts.URL,
+ },
+ }
+ acc = &testutil.Accumulator{}
+ )
+ defer ts.Close()
+ ch.Gather(acc)
+
+ acc.AssertContainsTaggedFields(t, "clickhouse_tables",
+ map[string]interface{}{
+ "bytes": uint64(1),
+ "parts": uint64(10),
+ "rows": uint64(100),
+ },
+ map[string]string{
+ "source": "127.0.0.1",
+ "table": "test_table",
+ "database": "test_database",
+ },
+ )
+ acc.AssertContainsFields(t, "clickhouse_events",
+ map[string]interface{}{
+ "test_system_event": uint64(1000),
+ "test_system_event2": uint64(2000),
+ },
+ )
+ acc.AssertContainsFields(t, "clickhouse_metrics",
+ map[string]interface{}{
+ "test_system_metric": uint64(1000),
+ "test_system_metric2": uint64(2000),
+ },
+ )
+ acc.AssertContainsFields(t, "clickhouse_asynchronous_metrics",
+ map[string]interface{}{
+ "test_system_asynchronous_metric": uint64(1000),
+ "test_system_asynchronous_metric2": uint64(2000),
+ },
+ )
+ acc.AssertContainsFields(t, "clickhouse_zookeeper",
+ map[string]interface{}{
+ "root_nodes": uint64(2),
+ },
+ )
+ acc.AssertContainsFields(t, "clickhouse_replication_queue",
+ map[string]interface{}{
+ "too_many_tries_replicas": uint64(10),
+ "num_tries_replicas": uint64(100),
+ },
+ )
+ acc.AssertContainsFields(t, "clickhouse_detached_parts",
+ map[string]interface{}{
+ "detached_parts": uint64(10),
+ },
+ )
+ acc.AssertContainsTaggedFields(t, "clickhouse_dictionaries",
+ map[string]interface{}{
+ "is_loaded": uint64(0),
+ "bytes_allocated": uint64(100),
+ },
+ map[string]string{
+ "source": "127.0.0.1",
+ "dict_origin": "default.test_dict",
+ },
+ )
+ acc.AssertContainsFields(t, "clickhouse_mutations",
+ map[string]interface{}{
+ "running": uint64(1),
+ "failed": uint64(10),
+ "completed": uint64(100),
+ },
+ )
+ acc.AssertContainsTaggedFields(t, "clickhouse_disks",
+ map[string]interface{}{
+ "free_space_percent": uint64(1),
+ "keep_free_space_percent": uint64(10),
+ },
+ map[string]string{
+ "source": "127.0.0.1",
+ "name": "default",
+ "path": "/var/lib/clickhouse",
+ },
+ )
+ acc.AssertContainsTaggedFields(t, "clickhouse_processes",
+ map[string]interface{}{
+ "percentile_50": 0.1,
+ "percentile_90": 0.5,
+ "longest_running": float64(10),
+ },
+ map[string]string{
+ "source": "127.0.0.1",
+ "query_type": "select",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t, "clickhouse_processes",
+ map[string]interface{}{
+ "percentile_50": 0.2,
+ "percentile_90": 1.5,
+ "longest_running": float64(100),
+ },
+ map[string]string{
+ "source": "127.0.0.1",
+ "query_type": "insert",
+ },
+ )
+ acc.AssertContainsTaggedFields(t, "clickhouse_processes",
+ map[string]interface{}{
+ "percentile_50": 0.4,
+ "percentile_90": 4.5,
+ "longest_running": float64(1000),
+ },
+ map[string]string{
+ "source": "127.0.0.1",
+ "query_type": "other",
+ },
+ )
+
+ for i, level := range []string{"Fatal", "Critical", "Error", "Warning", "Notice"} {
+ acc.AssertContainsTaggedFields(t, "clickhouse_text_log",
+ map[string]interface{}{
+ "messages_last_10_min": uint64(i * 10),
+ },
+ map[string]string{
+ "source": "127.0.0.1",
+ "level": level,
+ },
+ )
+ }
+}
+
+func TestGatherWithSomeTablesNotExists(t *testing.T) {
+ var (
+ ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ type result struct {
+ Data interface{} `json:"data"`
+ }
+ enc := json.NewEncoder(w)
+ switch query := r.URL.Query().Get("query"); {
+ case strings.Contains(query, "zk_exists"):
+ enc.Encode(result{
+ Data: []struct {
+ ZkExists chUInt64 `json:"zk_exists"`
+ }{
+ {
+ ZkExists: 0,
+ },
+ },
+ })
+ case strings.Contains(query, "replication_queue_exists"):
+ enc.Encode(result{
+ Data: []struct {
+ ReplicationQueueExists chUInt64 `json:"replication_queue_exists"`
+ }{
+ {
+ ReplicationQueueExists: 0,
+ },
+ },
+ })
+ case strings.Contains(query, "text_log_exists"):
+ enc.Encode(result{
+ Data: []struct {
+ TextLogExists chUInt64 `json:"text_log_exists"`
+ }{
+ {
+ TextLogExists: 0,
+ },
+ },
+ })
+ }
+ }))
+ ch = &ClickHouse{
+ Servers: []string{
+ ts.URL,
+ },
+ Username: "default",
+ }
+ acc = &testutil.Accumulator{}
+ )
+ defer ts.Close()
+ ch.Gather(acc)
+
+ acc.AssertDoesNotContainMeasurement(t, "clickhouse_zookeeper")
+ acc.AssertDoesNotContainMeasurement(t, "clickhouse_replication_queue")
+ acc.AssertDoesNotContainMeasurement(t, "clickhouse_text_log")
+}
+
+func TestWrongJSONMarshalling(t *testing.T) {
+ var (
+ ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ type result struct {
+ Data interface{} `json:"data"`
+ }
+ enc := json.NewEncoder(w)
+ //wrong data section json
+ enc.Encode(result{
+ Data: []struct{}{},
+ })
+ }))
+ ch = &ClickHouse{
+ Servers: []string{
+ ts.URL,
+ },
+ Username: "default",
+ }
+ acc = &testutil.Accumulator{}
+ )
+ defer ts.Close()
+ ch.Gather(acc)
+
+ assert.Equal(t, 0, len(acc.Metrics))
+ allMeasurements := []string{
+ "clickhouse_events",
+ "clickhouse_metrics",
+ "clickhouse_asynchronous_metrics",
+ "clickhouse_tables",
+ "clickhouse_zookeeper",
+ "clickhouse_replication_queue",
+ "clickhouse_detached_parts",
+ "clickhouse_dictionaries",
+ "clickhouse_mutations",
+ "clickhouse_disks",
+ "clickhouse_processes",
+ "clickhouse_text_log",
+ }
+ assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors))
+}
+
+func TestOfflineServer(t *testing.T) {
+ var (
+ acc = &testutil.Accumulator{}
+ ch = &ClickHouse{
+ Servers: []string{
+ "http://wrong-domain.local:8123",
+ },
+ Username: "default",
+ HTTPClient: http.Client{
+ Timeout: 1 * time.Millisecond,
+ },
+ }
+ )
+ ch.Gather(acc)
+
+ assert.Equal(t, 0, len(acc.Metrics))
+ allMeasurements := []string{
+ "clickhouse_events",
+ "clickhouse_metrics",
+ "clickhouse_asynchronous_metrics",
+ "clickhouse_tables",
+ "clickhouse_zookeeper",
+ "clickhouse_replication_queue",
+ "clickhouse_detached_parts",
+ "clickhouse_dictionaries",
+ "clickhouse_mutations",
+ "clickhouse_disks",
+ "clickhouse_processes",
+ "clickhouse_text_log",
+ }
+ assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors))
+}
+
+func TestAutoDiscovery(t *testing.T) {
+ var (
+ ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ type result struct {
+ Data interface{} `json:"data"`
+ }
+ enc := json.NewEncoder(w)
+ switch query := r.URL.Query().Get("query"); {
+ case strings.Contains(query, "system.clusters"):
+ enc.Encode(result{
+ Data: []struct {
+ Cluster string `json:"test"`
+ Hostname string `json:"localhost"`
+ ShardNum chUInt64 `json:"shard_num"`
+ }{
+ {
+ Cluster: "test_database",
+ Hostname: "test_table",
+ ShardNum: 1,
+ },
+ },
+ })
+ }
+ }))
+ ch = &ClickHouse{
+ Servers: []string{
+ ts.URL,
+ },
+ Username: "default",
+ AutoDiscovery: true,
+ }
+ acc = &testutil.Accumulator{}
+ )
+ defer ts.Close()
+ ch.Gather(acc)
+
+}
diff --git a/plugins/inputs/clickhouse/dev/dhparam.pem b/plugins/inputs/clickhouse/dev/dhparam.pem
new file mode 100644
index 0000000000000..5ae6d7bbe7012
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/dhparam.pem
@@ -0,0 +1,13 @@
+-----BEGIN DH PARAMETERS-----
+MIICCAKCAgEAoo1x7wI5K57P1/AkHUmVWzKNfy46b/ni/QtClomTB78Ks1FP8dzs
+CQBW/pfL8yidxTialNhMRCZO1J+uPjTvd8dG8SFZzVylkF41LBNrUD+MLyh/b6Nr
+8uWf3tqYCtsiqsQsnq/oU7C29wn6UjhPPVbRRDPGyJUFOgp0ebPR0L2gOc5HhXSF
+Tt0fuWnvgZJBKGvyodby3p2CSheu8K6ZteVc8ZgHuanhCQA30nVN+yNQzyozlB2H
+B9jxTDPJy8+/4Mui3iiNyXg6FaiI9lWdH7xgKoZlHi8BWlLz5Se9JVNYg0dPrMTz
+K0itQyyTKUlK73x+1uPm6q1AJwz08EZiCXNbk58/Sf+pdwDmAO2QSRrERC73vnvc
+B1+4+Kf7RS7oYpAHknKm/MFnkCJLVIq1b6kikYcIgVCYe+Z1UytSmG1QfwdgL8QQ
+TVYVHBg4w07+s3/IJ1ekvNhdxpkmmevYt7GjohWu8vKkip4se+reNdo+sqLsgFKf
+1IuDMD36zn9FVukvs7e3BwZCTkdosGHvHGjA7zm2DwPPO16hCvJ4mE6ULLpp2NEw
+EBYWm3Tv6M/xtrF5Afyh0gAh7eL767/qsarbx6jlqs+dnh3LptqsE3WerWK54+0B
+3Hr5CVfgYbeXuW2HeFb+fS6CNUWmiAsq1XRiz5p16hpeMGYN/qyF1IsCAQI=
+-----END DH PARAMETERS-----
diff --git a/plugins/inputs/clickhouse/dev/docker-compose.yml b/plugins/inputs/clickhouse/dev/docker-compose.yml
new file mode 100644
index 0000000000000..c34ee9320d931
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/docker-compose.yml
@@ -0,0 +1,27 @@
+version: '3'
+
+services:
+ clickhouse:
+# choose `:latest` after resolve https://github.com/ClickHouse/ClickHouse/issues/13057
+ image: docker.io/yandex/clickhouse-server:${CLICKHOUSE_VERSION:-latest}
+ volumes:
+ - ./test_dictionary.xml:/etc/clickhouse-server/01-test_dictionary.xml
+ - ./zookeeper.xml:/etc/clickhouse-server/config.d/00-zookeeper.xml
+ - ./tls_settings.xml:/etc/clickhouse-server/config.d/01-tls_settings.xml
+ # please comment text_log.xml when CLICKHOUSE_VERSION = 19.16
+ - ./text_log.xml:/etc/clickhouse-server/config.d/02-text_log.xml
+ - ./part_log.xml:/etc/clickhouse-server/config.d/03-part_log.xml
+ - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem
+ - ../../../../testutil/pki/serverkey.pem:/etc/clickhouse-server/server.key
+ - ../../../../testutil/pki/servercert.pem:/etc/clickhouse-server/server.crt
+ ports:
+ - 8123:8123
+ - 8443:8443
+ - 9000:9000
+ - 9009:9009
+ zookeeper:
+ image: docker.io/zookeeper:3.5.6
+ volumes:
+ - /var/lib/zookeeper
+ ports:
+ - 2181:2181
diff --git a/plugins/inputs/clickhouse/dev/part_log.xml b/plugins/inputs/clickhouse/dev/part_log.xml
new file mode 100644
index 0000000000000..e16a23894f7b7
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/part_log.xml
@@ -0,0 +1,12 @@
+
+
+ system
+
+ 7500
+
+ event_date
+
+
+
+
+
diff --git a/plugins/inputs/clickhouse/dev/telegraf.conf b/plugins/inputs/clickhouse/dev/telegraf.conf
new file mode 100644
index 0000000000000..b488ef611dcba
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/telegraf.conf
@@ -0,0 +1,12 @@
+### ClickHouse input plugin
+
+[[inputs.clickhouse]]
+ timeout = 2
+ username = "default"
+ servers = ["http://127.0.0.1:8123"]
+ auto_discovery = true
+ cluster_include = []
+ cluster_exclude = ["test_shard_localhost"]
+
+[[outputs.file]]
+ files = ["stdout"]
diff --git a/plugins/inputs/clickhouse/dev/telegraf_ssl.conf b/plugins/inputs/clickhouse/dev/telegraf_ssl.conf
new file mode 100644
index 0000000000000..62b1cce9cced9
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/telegraf_ssl.conf
@@ -0,0 +1,16 @@
+### ClickHouse input plugin
+
+[[inputs.clickhouse]]
+ timeout = 2
+ username = "default"
+ servers = ["https://127.0.0.1:8443"]
+ auto_discovery = true
+ cluster_include = []
+ cluster_exclude = ["test_shard_localhost"]
+ insecure_skip_verify = false
+ tls_cert = "./testutil/pki/clientcert.pem"
+ tls_key = "./testutil/pki/clientkey.pem"
+ tls_ca = "./testutil/pki/cacert.pem"
+
+[[outputs.file]]
+ files = ["stdout"]
diff --git a/plugins/inputs/clickhouse/dev/test_dictionary.xml b/plugins/inputs/clickhouse/dev/test_dictionary.xml
new file mode 100644
index 0000000000000..2f8f1ae5e26c5
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/test_dictionary.xml
@@ -0,0 +1,63 @@
+
+
+
+ default.test_dict
+
+
+
+
+ Nom
+
+
+
+
+ Nom
+ String
+
+
+ Code
+ String
+
+
+ Cur
+ String
+
+
+
+
+
+
+
+ 3306
+ wrong
+ wrong
+
+ 127.0.0.1
+ 1
+
+ default
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/plugins/inputs/clickhouse/dev/text_log.xml b/plugins/inputs/clickhouse/dev/text_log.xml
new file mode 100644
index 0000000000000..bcccea8b747da
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/text_log.xml
@@ -0,0 +1,12 @@
+
+
+ notice
+ system
+
+ 7500
+
+ event_date
+
+
+
+
diff --git a/plugins/inputs/clickhouse/dev/tls_settings.xml b/plugins/inputs/clickhouse/dev/tls_settings.xml
new file mode 100644
index 0000000000000..6268b6a129278
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/tls_settings.xml
@@ -0,0 +1,4 @@
+
+ 8443
+ 9440
+
diff --git a/plugins/inputs/clickhouse/dev/zookeeper.xml b/plugins/inputs/clickhouse/dev/zookeeper.xml
new file mode 100644
index 0000000000000..ffd3740000a43
--- /dev/null
+++ b/plugins/inputs/clickhouse/dev/zookeeper.xml
@@ -0,0 +1,19 @@
+
+
+
+ zookeeper
+ 2181
+
+
+
+
+
+ 1
+
+ localhost
+ 9000
+
+
+
+
+
diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md
index 460cf4b82e45c..a4244b881cb62 100644
--- a/plugins/inputs/cloud_pubsub/README.md
+++ b/plugins/inputs/cloud_pubsub/README.md
@@ -7,8 +7,8 @@ and creates metrics using one of the supported [input data formats][].
### Configuration
```toml
-[[inputs.pubsub]]
-## Required. Name of Google Cloud Platform (GCP) Project that owns
+[[inputs.cloud_pubsub]]
+ ## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub subscription.
project = "my-project"
@@ -31,7 +31,7 @@ and creates metrics using one of the supported [input data formats][].
## If the streaming pull for a PubSub Subscription fails (receiver),
## the agent attempts to restart receiving messages after this many seconds.
# retry_delay_seconds = 5
-
+
## Optional. Maximum byte length of a message to consume.
## Larger messages are dropped with an error. If less than 0 or unspecified,
## treated as no limit.
@@ -75,7 +75,7 @@ and creates metrics using one of the supported [input data formats][].
## 1. Note this setting does not limit the number of messages that can be
## processed concurrently (use "max_outstanding_messages" instead).
# max_receiver_go_routines = 0
-
+
## Optional. If true, Telegraf will attempt to base64 decode the
## PubSub message data before parsing. Many GCP services that
## output JSON to Google PubSub base64-encode the JSON payload.
diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go
index 845711e7d7d44..b418274f3b34a 100644
--- a/plugins/inputs/cloud_pubsub/pubsub.go
+++ b/plugins/inputs/cloud_pubsub/pubsub.go
@@ -5,16 +5,16 @@ import (
"fmt"
"sync"
- "cloud.google.com/go/pubsub"
"encoding/base64"
+ "time"
+
+ "cloud.google.com/go/pubsub"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
- "log"
- "time"
)
type empty struct{}
@@ -43,6 +43,8 @@ type PubSub struct {
Base64Data bool `toml:"base64_data"`
+ Log telegraf.Logger
+
sub subscription
stubSub func() subscription
@@ -134,14 +136,14 @@ func (ps *PubSub) receiveWithRetry(parentCtx context.Context) {
err := ps.startReceiver(parentCtx)
for err != nil && parentCtx.Err() == nil {
- log.Printf("E! [inputs.cloud_pubsub] Receiver for subscription %s exited with error: %v", ps.sub.ID(), err)
+ ps.Log.Errorf("Receiver for subscription %s exited with error: %v", ps.sub.ID(), err)
delay := defaultRetryDelaySeconds
if ps.RetryReceiveDelaySeconds > 0 {
delay = ps.RetryReceiveDelaySeconds
}
- log.Printf("I! [inputs.cloud_pubsub] Waiting %d seconds before attempting to restart receiver...", delay)
+ ps.Log.Infof("Waiting %d seconds before attempting to restart receiver...", delay)
time.Sleep(time.Duration(delay) * time.Second)
err = ps.startReceiver(parentCtx)
@@ -149,7 +151,7 @@ func (ps *PubSub) receiveWithRetry(parentCtx context.Context) {
}
func (ps *PubSub) startReceiver(parentCtx context.Context) error {
- log.Printf("I! [inputs.cloud_pubsub] Starting receiver for subscription %s...", ps.sub.ID())
+ ps.Log.Infof("Starting receiver for subscription %s...", ps.sub.ID())
cctx, ccancel := context.WithCancel(parentCtx)
err := ps.sub.Receive(cctx, func(ctx context.Context, msg message) {
if err := ps.onMessage(ctx, msg); err != nil {
@@ -159,7 +161,7 @@ func (ps *PubSub) startReceiver(parentCtx context.Context) error {
if err != nil {
ps.acc.AddError(fmt.Errorf("receiver for subscription %s exited: %v", ps.sub.ID(), err))
} else {
- log.Printf("I! [inputs.cloud_pubsub] subscription pull ended (no error, most likely stopped)")
+ ps.Log.Info("Subscription pull ended (no error, most likely stopped)")
}
ccancel()
return err
diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go
index 6233546aa80ee..2045cf4ccbc89 100644
--- a/plugins/inputs/cloud_pubsub/pubsub_test.go
+++ b/plugins/inputs/cloud_pubsub/pubsub_test.go
@@ -3,10 +3,11 @@ package cloud_pubsub
import (
"encoding/base64"
"errors"
+ "testing"
+
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
- "testing"
)
const (
@@ -26,6 +27,7 @@ func TestRunParse(t *testing.T) {
sub.receiver = testMessagesReceive(sub)
ps := &PubSub{
+ Log: testutil.Logger{},
parser: testParser,
stubSub: func() subscription { return sub },
Project: "projectIDontMatterForTests",
@@ -69,6 +71,7 @@ func TestRunBase64(t *testing.T) {
sub.receiver = testMessagesReceive(sub)
ps := &PubSub{
+ Log: testutil.Logger{},
parser: testParser,
stubSub: func() subscription { return sub },
Project: "projectIDontMatterForTests",
@@ -112,6 +115,7 @@ func TestRunInvalidMessages(t *testing.T) {
sub.receiver = testMessagesReceive(sub)
ps := &PubSub{
+ Log: testutil.Logger{},
parser: testParser,
stubSub: func() subscription { return sub },
Project: "projectIDontMatterForTests",
@@ -158,6 +162,7 @@ func TestRunOverlongMessages(t *testing.T) {
sub.receiver = testMessagesReceive(sub)
ps := &PubSub{
+ Log: testutil.Logger{},
parser: testParser,
stubSub: func() subscription { return sub },
Project: "projectIDontMatterForTests",
@@ -205,6 +210,7 @@ func TestRunErrorInSubscriber(t *testing.T) {
sub.receiver = testMessagesError(sub, errors.New("a fake error"))
ps := &PubSub{
+ Log: testutil.Logger{},
parser: testParser,
stubSub: func() subscription { return sub },
Project: "projectIDontMatterForTests",
diff --git a/plugins/inputs/cloud_pubsub_push/README.md b/plugins/inputs/cloud_pubsub_push/README.md
index 76725c9979bdb..3173b43361fb6 100644
--- a/plugins/inputs/cloud_pubsub_push/README.md
+++ b/plugins/inputs/cloud_pubsub_push/README.md
@@ -1,4 +1,4 @@
-# Google Cloud PubSub Push Input Service Plugin
+# Google Cloud PubSub Push Input Plugin
The Google Cloud PubSub Push listener is a service input plugin that listens for messages sent via an HTTP POST from [Google Cloud PubSub][pubsub].
The plugin expects messages in Google's Pub/Sub JSON Format ONLY.
diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go
index 8b83a440df462..b320daedbacc1 100644
--- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go
+++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go
@@ -6,7 +6,6 @@ import (
"encoding/base64"
"encoding/json"
"io/ioutil"
- "log"
"net"
"net/http"
"sync"
@@ -14,7 +13,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -33,6 +32,7 @@ type PubSubPush struct {
WriteTimeout internal.Duration
MaxBodySize internal.Size
AddMeta bool
+ Log telegraf.Logger
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
@@ -227,21 +227,21 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) {
var payload Payload
if err = json.Unmarshal(bytes, &payload); err != nil {
- log.Printf("E! [inputs.cloud_pubsub_push] Error decoding payload %s", err.Error())
+ p.Log.Errorf("Error decoding payload %s", err.Error())
res.WriteHeader(http.StatusBadRequest)
return
}
sDec, err := base64.StdEncoding.DecodeString(payload.Msg.Data)
if err != nil {
- log.Printf("E! [inputs.cloud_pubsub_push] Base64-Decode Failed %s", err.Error())
+ p.Log.Errorf("Base64-decode failed %s", err.Error())
res.WriteHeader(http.StatusBadRequest)
return
}
metrics, err := p.Parse(sDec)
if err != nil {
- log.Println("D! [inputs.cloud_pubsub_push] " + err.Error())
+ p.Log.Debug(err.Error())
res.WriteHeader(http.StatusBadRequest)
return
}
@@ -295,7 +295,7 @@ func (p *PubSubPush) receiveDelivered() {
ch <- true
} else {
ch <- false
- log.Println("D! [inputs.cloud_pubsub_push] Metric group failed to process")
+ p.Log.Debug("Metric group failed to process")
}
}
}
diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go
index 57734c70554f3..ae7601b20cccc 100644
--- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go
+++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go
@@ -16,8 +16,9 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/models"
+ "github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/testutil"
)
func TestServeHTTP(t *testing.T) {
@@ -118,6 +119,7 @@ func TestServeHTTP(t *testing.T) {
rr := httptest.NewRecorder()
pubPush := &PubSubPush{
+ Log: testutil.Logger{},
Path: "/",
MaxBodySize: internal.Size{
Size: test.maxsize,
@@ -183,10 +185,18 @@ func (tm *testMetricMaker) Name() string {
return "TestPlugin"
}
+func (tm *testMetricMaker) LogName() string {
+ return tm.Name()
+}
+
func (tm *testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric {
return metric
}
+func (tm *testMetricMaker) Log() telegraf.Logger {
+ return models.NewLogger("test", "test", "")
+}
+
type testOutput struct {
// if true, mock a write failure
failWrite bool
diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md
index dfb5bf95ded3b..674dd0ac4363a 100644
--- a/plugins/inputs/cloudwatch/README.md
+++ b/plugins/inputs/cloudwatch/README.md
@@ -1,4 +1,4 @@
-# Amazon CloudWatch Statistics Input
+# Amazon CloudWatch Statistics Input Plugin
This plugin will pull Metric Statistics from Amazon CloudWatch.
@@ -17,7 +17,7 @@ API endpoint. In the following order the plugin will attempt to authenticate.
```toml
[[inputs.cloudwatch]]
- ## Amazon Region (required)
+ ## Amazon Region
region = "us-east-1"
## Amazon Credentials
@@ -28,12 +28,12 @@ API endpoint. In the following order the plugin will attempt to authenticate.
## 4) environment variables
## 5) shared credentials file
## 6) EC2 Instance Profile
- #access_key = ""
- #secret_key = ""
- #token = ""
- #role_arn = ""
- #profile = ""
- #shared_credential_file = ""
+ # access_key = ""
+ # secret_key = ""
+ # token = ""
+ # role_arn = ""
+ # profile = ""
+ # shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
@@ -54,32 +54,46 @@ API endpoint. In the following order the plugin will attempt to authenticate.
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = "5m"
- ## Override global run interval (optional - defaults to global interval)
- ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
+ ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"
+ ## Configure the TTL for the internal cache of metrics.
+ # cache_ttl = "1h"
+
## Metric Statistic Namespace (required)
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
- ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
- ## maximum of 400. Optional - default value is 200.
+ ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
+ ## maximum of 50.
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
- ratelimit = 200
+ # ratelimit = 25
+
+ ## Timeout for http requests made by the cloudwatch client.
+ # timeout = "5s"
+
+ ## Namespace-wide statistic filters. These allow fewer queries to be made to
+ ## cloudwatch.
+ # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+ # statistic_exclude = []
- ## Metrics to Pull (optional)
+ ## Metrics to Pull
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
- [[inputs.cloudwatch.metrics]]
- names = ["Latency", "RequestCount"]
-
- ## Dimension filters for Metric. These are optional however all dimensions
- ## defined for the metric names must be specified in order to retrieve
- ## the metric statistics.
- [[inputs.cloudwatch.metrics.dimensions]]
- name = "LoadBalancerName"
- value = "p-example"
+ #[[inputs.cloudwatch.metrics]]
+ # names = ["Latency", "RequestCount"]
+ #
+ # ## Statistic filters for Metric. These allow for retrieving specific
+ # ## statistics for an individual metric.
+ # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+ # # statistic_exclude = []
+ #
+ # ## Dimension filters for Metric. All dimensions defined for the metric names
+ # ## must be specified in order to retrieve the metric statistics.
+ # [[inputs.cloudwatch.metrics.dimensions]]
+ # name = "LoadBalancerName"
+ # value = "p-example"
```
#### Requirements and Terminology
@@ -97,17 +111,21 @@ wildcard dimension is ignored.
Example:
```
-[[inputs.cloudwatch.metrics]]
- names = ["Latency"]
+[[inputs.cloudwatch]]
+ period = "1m"
+ interval = "5m"
- ## Dimension filters for Metric (optional)
- [[inputs.cloudwatch.metrics.dimensions]]
- name = "LoadBalancerName"
- value = "p-example"
+ [[inputs.cloudwatch.metrics]]
+ names = ["Latency"]
- [[inputs.cloudwatch.metrics.dimensions]]
- name = "AvailabilityZone"
- value = "*"
+ ## Dimension filters for Metric (optional)
+ [[inputs.cloudwatch.metrics.dimensions]]
+ name = "LoadBalancerName"
+ value = "p-example"
+
+ [[inputs.cloudwatch.metrics.dimensions]]
+ name = "AvailabilityZone"
+ value = "*"
```
If the following ELBs are available:
@@ -124,9 +142,11 @@ Then 2 metrics will be output:
If the `AvailabilityZone` wildcard dimension was omitted, then a single metric (name: `p-example`)
would be exported containing the aggregate values of the ELB across availability zones.
+To maximize efficiency and savings, consider making fewer requests by increasing `interval` but keeping `period` at the duration you would like metrics to be reported. The above example will request metrics from Cloudwatch every 5 minutes but will output five metrics timestamped one minute apart.
+
#### Restrictions and Limitations
- CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html)
-- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/)
+- CloudWatch API usage incurs cost - see [GetMetricData Pricing](https://aws.amazon.com/cloudwatch/pricing/)
### Measurements & Fields:
@@ -147,7 +167,6 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik
- All measurements have the following tags:
- region (CloudWatch Region)
- - unit (CloudWatch Metric Unit)
- {dimension-name} (Cloudwatch Dimension value - one for each metric dimension)
### Troubleshooting:
@@ -161,12 +180,34 @@ aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 --metric-name
If the expected metrics are not returned, you can try getting them manually
for a short period of time:
```
-aws cloudwatch get-metric-statistics --namespace AWS/EC2 --region us-east-1 --period 300 --start-time 2018-07-01T00:00:00Z --end-time 2018-07-01T00:15:00Z --statistics Average --metric-name CPUCreditBalance --dimensions Name=InstanceId,Value=i-deadbeef
+aws cloudwatch get-metric-data \
+ --start-time 2018-07-01T00:00:00Z \
+ --end-time 2018-07-01T00:15:00Z \
+ --metric-data-queries '[
+ {
+ "Id": "avgCPUCreditBalance",
+ "MetricStat": {
+ "Metric": {
+ "Namespace": "AWS/EC2",
+ "MetricName": "CPUCreditBalance",
+ "Dimensions": [
+ {
+ "Name": "InstanceId",
+ "Value": "i-deadbeef"
+ }
+ ]
+ },
+ "Period": 300,
+ "Stat": "Average"
+ },
+ "Label": "avgCPUCreditBalance"
+ }
+]'
```
### Example Output:
```
$ ./telegraf --config telegraf.conf --input-filter cloudwatch --test
-> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
+> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
```
diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go
index 626511e2ff352..042660a50ff3c 100644
--- a/plugins/inputs/cloudwatch/cloudwatch.go
+++ b/plugins/inputs/cloudwatch/cloudwatch.go
@@ -2,66 +2,84 @@ package cloudwatch
import (
"fmt"
+ "net"
+ "net/http"
+ "strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
-
"github.com/aws/aws-sdk-go/service/cloudwatch"
-
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ internalaws "github.com/influxdata/telegraf/config/aws"
+ "github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
- internalaws "github.com/influxdata/telegraf/internal/config/aws"
"github.com/influxdata/telegraf/internal/limiter"
+ "github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/inputs"
)
-type (
- CloudWatch struct {
- Region string `toml:"region"`
- AccessKey string `toml:"access_key"`
- SecretKey string `toml:"secret_key"`
- RoleARN string `toml:"role_arn"`
- Profile string `toml:"profile"`
- Filename string `toml:"shared_credential_file"`
- Token string `toml:"token"`
- EndpointURL string `toml:"endpoint_url"`
-
- Period internal.Duration `toml:"period"`
- Delay internal.Duration `toml:"delay"`
- Namespace string `toml:"namespace"`
- Metrics []*Metric `toml:"metrics"`
- CacheTTL internal.Duration `toml:"cache_ttl"`
- RateLimit int `toml:"ratelimit"`
- client cloudwatchClient
- metricCache *MetricCache
- windowStart time.Time
- windowEnd time.Time
- }
+// CloudWatch contains the configuration and cache for the cloudwatch plugin.
+type CloudWatch struct {
+ Region string `toml:"region"`
+ AccessKey string `toml:"access_key"`
+ SecretKey string `toml:"secret_key"`
+ RoleARN string `toml:"role_arn"`
+ Profile string `toml:"profile"`
+ CredentialPath string `toml:"shared_credential_file"`
+ Token string `toml:"token"`
+ EndpointURL string `toml:"endpoint_url"`
+ StatisticExclude []string `toml:"statistic_exclude"`
+ StatisticInclude []string `toml:"statistic_include"`
+ Timeout config.Duration `toml:"timeout"`
+
+ Period config.Duration `toml:"period"`
+ Delay config.Duration `toml:"delay"`
+ Namespace string `toml:"namespace"`
+ Metrics []*Metric `toml:"metrics"`
+ CacheTTL config.Duration `toml:"cache_ttl"`
+ RateLimit int `toml:"ratelimit"`
+
+ Log telegraf.Logger `toml:"-"`
+
+ client cloudwatchClient
+ statFilter filter.Filter
+ metricCache *metricCache
+ queryDimensions map[string]*map[string]string
+ windowStart time.Time
+ windowEnd time.Time
+}
- Metric struct {
- MetricNames []string `toml:"names"`
- Dimensions []*Dimension `toml:"dimensions"`
- }
+// Metric defines a simplified Cloudwatch metric.
+type Metric struct {
+ StatisticExclude *[]string `toml:"statistic_exclude"`
+ StatisticInclude *[]string `toml:"statistic_include"`
+ MetricNames []string `toml:"names"`
+ Dimensions []*Dimension `toml:"dimensions"`
+}
- Dimension struct {
- Name string `toml:"name"`
- Value string `toml:"value"`
- }
+// Dimension defines a simplified Cloudwatch dimension (provides metric filtering).
+type Dimension struct {
+ Name string `toml:"name"`
+ Value string `toml:"value"`
+}
- MetricCache struct {
- TTL time.Duration
- Fetched time.Time
- Metrics []*cloudwatch.Metric
- }
+// metricCache caches metrics, their filters, and generated queries.
+type metricCache struct {
+ ttl time.Duration
+ built time.Time
+ metrics []filteredMetric
+ queries []*cloudwatch.MetricDataQuery
+}
- cloudwatchClient interface {
- ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error)
- GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error)
- }
-)
+type cloudwatchClient interface {
+ ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error)
+ GetMetricData(*cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error)
+}
+// SampleConfig returns the default configuration of the Cloudwatch input plugin.
func (c *CloudWatch) SampleConfig() string {
return `
## Amazon Region
@@ -75,12 +93,12 @@ func (c *CloudWatch) SampleConfig() string {
## 4) environment variables
## 5) shared credentials file
## 6) EC2 Instance Profile
- #access_key = ""
- #secret_key = ""
- #token = ""
- #role_arn = ""
- #profile = ""
- #shared_credential_file = ""
+ # access_key = ""
+ # secret_key = ""
+ # token = ""
+ # role_arn = ""
+ # profile = ""
+ # shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
@@ -106,45 +124,177 @@ func (c *CloudWatch) SampleConfig() string {
interval = "5m"
## Configure the TTL for the internal cache of metrics.
- ## Defaults to 1 hr if not specified
- #cache_ttl = "10m"
+ # cache_ttl = "1h"
## Metric Statistic Namespace (required)
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
- ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
- ## maximum of 400. Optional - default value is 200.
+ ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
+ ## maximum of 50.
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
- ratelimit = 200
+ # ratelimit = 25
+
+ ## Timeout for http requests made by the cloudwatch client.
+ # timeout = "5s"
- ## Metrics to Pull (optional)
+ ## Namespace-wide statistic filters. These allow fewer queries to be made to
+ ## cloudwatch.
+ # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+ # statistic_exclude = []
+
+ ## Metrics to Pull
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
#[[inputs.cloudwatch.metrics]]
# names = ["Latency", "RequestCount"]
#
- # ## Dimension filters for Metric. These are optional however all dimensions
- # ## defined for the metric names must be specified in order to retrieve
- # ## the metric statistics.
+ # ## Statistic filters for Metric. These allow for retrieving specific
+ # ## statistics for an individual metric.
+ # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+ # # statistic_exclude = []
+ #
+ # ## Dimension filters for Metric. All dimensions defined for the metric names
+ # ## must be specified in order to retrieve the metric statistics.
# [[inputs.cloudwatch.metrics.dimensions]]
# name = "LoadBalancerName"
# value = "p-example"
`
}
+// Description returns a one-sentence description on the Cloudwatch input plugin.
func (c *CloudWatch) Description() string {
return "Pull Metric Statistics from Amazon CloudWatch"
}
-func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) {
- var metrics []*cloudwatch.Metric
+// Gather takes in an accumulator and adds the metrics that the Input
+// gathers. This is called every "interval".
+func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
+ if c.statFilter == nil {
+ var err error
+ // Set config level filter (won't change throughout life of plugin).
+ c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude)
+ if err != nil {
+ return err
+ }
+ }
+
+ if c.client == nil {
+ c.initializeCloudWatch()
+ }
+
+ filteredMetrics, err := getFilteredMetrics(c)
+ if err != nil {
+ return err
+ }
+
+ c.updateWindow(time.Now())
+
+ // Get all of the possible queries so we can send groups of 100.
+ queries, err := c.getDataQueries(filteredMetrics)
+ if err != nil {
+ return err
+ }
+
+ if len(queries) == 0 {
+ return nil
+ }
+
+ // Limit concurrency or we can easily exhaust user connection limit.
+ // See cloudwatch API request limits:
+ // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
+ lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)
+ defer lmtr.Stop()
+ wg := sync.WaitGroup{}
+ rLock := sync.Mutex{}
+
+ results := []*cloudwatch.MetricDataResult{}
+
+ // 100 is the maximum number of metric data queries a `GetMetricData` request can contain.
+ batchSize := 500
+ var batches [][]*cloudwatch.MetricDataQuery
+
+ for batchSize < len(queries) {
+ queries, batches = queries[batchSize:], append(batches, queries[0:batchSize:batchSize])
+ }
+ batches = append(batches, queries)
+
+ for i := range batches {
+ wg.Add(1)
+ <-lmtr.C
+ go func(inm []*cloudwatch.MetricDataQuery) {
+ defer wg.Done()
+ result, err := c.gatherMetrics(c.getDataInputs(inm))
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
+
+ rLock.Lock()
+ results = append(results, result...)
+ rLock.Unlock()
+ }(batches[i])
+ }
+
+ wg.Wait()
+
+ return c.aggregateMetrics(acc, results)
+}
+
+func (c *CloudWatch) initializeCloudWatch() {
+ credentialConfig := &internalaws.CredentialConfig{
+ Region: c.Region,
+ AccessKey: c.AccessKey,
+ SecretKey: c.SecretKey,
+ RoleARN: c.RoleARN,
+ Profile: c.Profile,
+ Filename: c.CredentialPath,
+ Token: c.Token,
+ EndpointURL: c.EndpointURL,
+ }
+ configProvider := credentialConfig.Credentials()
+
+ cfg := &aws.Config{
+ HTTPClient: &http.Client{
+ // use values from DefaultTransport
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ },
+ Timeout: time.Duration(c.Timeout),
+ },
+ }
+
+ loglevel := aws.LogOff
+ c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel))
+}
+
+type filteredMetric struct {
+ metrics []*cloudwatch.Metric
+ statFilter filter.Filter
+}
+
+// getFilteredMetrics returns metrics specified in the config file or metrics listed from Cloudwatch.
+func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) {
+ if c.metricCache != nil && c.metricCache.isValid() {
+ return c.metricCache.metrics, nil
+ }
+
+ fMetrics := []filteredMetric{}
// check for provided metric filter
if c.Metrics != nil {
- metrics = []*cloudwatch.Metric{}
for _, m := range c.Metrics {
- if !hasWilcard(m.Dimensions) {
+ metrics := []*cloudwatch.Metric{}
+ if !hasWildcard(m.Dimensions) {
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
for k, d := range m.Dimensions {
dimensions[k] = &cloudwatch.Dimension{
@@ -176,234 +326,288 @@ func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) {
}
}
}
+
+ if m.StatisticExclude == nil {
+ m.StatisticExclude = &c.StatisticExclude
+ }
+ if m.StatisticInclude == nil {
+ m.StatisticInclude = &c.StatisticInclude
+ }
+ statFilter, err := filter.NewIncludeExcludeFilter(*m.StatisticInclude, *m.StatisticExclude)
+ if err != nil {
+ return nil, err
+ }
+
+ fMetrics = append(fMetrics, filteredMetric{
+ metrics: metrics,
+ statFilter: statFilter,
+ })
}
} else {
- var err error
- metrics, err = c.fetchNamespaceMetrics()
+ metrics, err := c.fetchNamespaceMetrics()
if err != nil {
return nil, err
}
- }
- return metrics, nil
-}
-func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
- if c.client == nil {
- c.initializeCloudWatch()
+ fMetrics = []filteredMetric{{
+ metrics: metrics,
+ statFilter: c.statFilter,
+ }}
}
- metrics, err := SelectMetrics(c)
- if err != nil {
- return err
+ c.metricCache = &metricCache{
+ metrics: fMetrics,
+ built: time.Now(),
+ ttl: time.Duration(c.CacheTTL),
}
- now := time.Now()
+ return fMetrics, nil
+}
- err = c.updateWindow(now)
- if err != nil {
- return err
+// fetchNamespaceMetrics retrieves available metrics for a given CloudWatch namespace.
+func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
+ metrics := []*cloudwatch.Metric{}
+
+ var token *string
+ params := &cloudwatch.ListMetricsInput{
+ Namespace: aws.String(c.Namespace),
+ Dimensions: []*cloudwatch.DimensionFilter{},
+ NextToken: token,
+ MetricName: nil,
}
- // limit concurrency or we can easily exhaust user connection limit
- // see cloudwatch API request limits:
- // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
- lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)
- defer lmtr.Stop()
- var wg sync.WaitGroup
- wg.Add(len(metrics))
- for _, m := range metrics {
- <-lmtr.C
- go func(inm *cloudwatch.Metric) {
- defer wg.Done()
- acc.AddError(c.gatherMetric(acc, inm))
- }(m)
+ for {
+ resp, err := c.client.ListMetrics(params)
+ if err != nil {
+ return nil, err
+ }
+
+ metrics = append(metrics, resp.Metrics...)
+ if resp.NextToken == nil {
+ break
+ }
+
+ params.NextToken = resp.NextToken
}
- wg.Wait()
- return nil
+ return metrics, nil
}
-func (c *CloudWatch) updateWindow(relativeTo time.Time) error {
- windowEnd := relativeTo.Add(-c.Delay.Duration)
+func (c *CloudWatch) updateWindow(relativeTo time.Time) {
+ windowEnd := relativeTo.Add(-time.Duration(c.Delay))
if c.windowEnd.IsZero() {
// this is the first run, no window info, so just get a single period
- c.windowStart = windowEnd.Add(-c.Period.Duration)
+ c.windowStart = windowEnd.Add(-time.Duration(c.Period))
} else {
// subsequent window, start where last window left off
c.windowStart = c.windowEnd
}
c.windowEnd = windowEnd
-
- return nil
}
-func init() {
- inputs.Add("cloudwatch", func() telegraf.Input {
- ttl, _ := time.ParseDuration("1hr")
- return &CloudWatch{
- CacheTTL: internal.Duration{Duration: ttl},
- RateLimit: 200,
- }
- })
-}
+// getDataQueries gets all of the possible queries so we can maximize the request payload.
+func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudwatch.MetricDataQuery, error) {
+ if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() {
+ return c.metricCache.queries, nil
+ }
-/*
- * Initialize CloudWatch client
- */
-func (c *CloudWatch) initializeCloudWatch() error {
- credentialConfig := &internalaws.CredentialConfig{
- Region: c.Region,
- AccessKey: c.AccessKey,
- SecretKey: c.SecretKey,
- RoleARN: c.RoleARN,
- Profile: c.Profile,
- Filename: c.Filename,
- Token: c.Token,
- EndpointURL: c.EndpointURL,
+ c.queryDimensions = map[string]*map[string]string{}
+
+ dataQueries := []*cloudwatch.MetricDataQuery{}
+ for i, filtered := range filteredMetrics {
+ for j, metric := range filtered.metrics {
+ id := strconv.Itoa(j) + "_" + strconv.Itoa(i)
+ dimension := ctod(metric.Dimensions)
+ if filtered.statFilter.Match("average") {
+ c.queryDimensions["average_"+id] = dimension
+ dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
+ Id: aws.String("average_" + id),
+ Label: aws.String(snakeCase(*metric.MetricName + "_average")),
+ MetricStat: &cloudwatch.MetricStat{
+ Metric: metric,
+ Period: aws.Int64(int64(time.Duration(c.Period).Seconds())),
+ Stat: aws.String(cloudwatch.StatisticAverage),
+ },
+ })
+ }
+ if filtered.statFilter.Match("maximum") {
+ c.queryDimensions["maximum_"+id] = dimension
+ dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
+ Id: aws.String("maximum_" + id),
+ Label: aws.String(snakeCase(*metric.MetricName + "_maximum")),
+ MetricStat: &cloudwatch.MetricStat{
+ Metric: metric,
+ Period: aws.Int64(int64(time.Duration(c.Period).Seconds())),
+ Stat: aws.String(cloudwatch.StatisticMaximum),
+ },
+ })
+ }
+ if filtered.statFilter.Match("minimum") {
+ c.queryDimensions["minimum_"+id] = dimension
+ dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
+ Id: aws.String("minimum_" + id),
+ Label: aws.String(snakeCase(*metric.MetricName + "_minimum")),
+ MetricStat: &cloudwatch.MetricStat{
+ Metric: metric,
+ Period: aws.Int64(int64(time.Duration(c.Period).Seconds())),
+ Stat: aws.String(cloudwatch.StatisticMinimum),
+ },
+ })
+ }
+ if filtered.statFilter.Match("sum") {
+ c.queryDimensions["sum_"+id] = dimension
+ dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
+ Id: aws.String("sum_" + id),
+ Label: aws.String(snakeCase(*metric.MetricName + "_sum")),
+ MetricStat: &cloudwatch.MetricStat{
+ Metric: metric,
+ Period: aws.Int64(int64(time.Duration(c.Period).Seconds())),
+ Stat: aws.String(cloudwatch.StatisticSum),
+ },
+ })
+ }
+ if filtered.statFilter.Match("sample_count") {
+ c.queryDimensions["sample_count_"+id] = dimension
+ dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
+ Id: aws.String("sample_count_" + id),
+ Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")),
+ MetricStat: &cloudwatch.MetricStat{
+ Metric: metric,
+ Period: aws.Int64(int64(time.Duration(c.Period).Seconds())),
+ Stat: aws.String(cloudwatch.StatisticSampleCount),
+ },
+ })
+ }
+ }
}
- configProvider := credentialConfig.Credentials()
- c.client = cloudwatch.New(configProvider)
- return nil
-}
+ if len(dataQueries) == 0 {
+ c.Log.Debug("no metrics found to collect")
+ return nil, nil
+ }
-/*
- * Fetch available metrics for given CloudWatch Namespace
- */
-func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
- if c.metricCache != nil && c.metricCache.IsValid() {
- return c.metricCache.Metrics, nil
+ if c.metricCache == nil {
+ c.metricCache = &metricCache{
+ queries: dataQueries,
+ built: time.Now(),
+ ttl: time.Duration(c.CacheTTL),
+ }
+ } else {
+ c.metricCache.queries = dataQueries
}
- metrics := []*cloudwatch.Metric{}
+ return dataQueries, nil
+}
- var token *string
- for more := true; more; {
- params := &cloudwatch.ListMetricsInput{
- Namespace: aws.String(c.Namespace),
- Dimensions: []*cloudwatch.DimensionFilter{},
- NextToken: token,
- MetricName: nil,
- }
+// gatherMetrics gets metric data from Cloudwatch.
+func (c *CloudWatch) gatherMetrics(
+ params *cloudwatch.GetMetricDataInput,
+) ([]*cloudwatch.MetricDataResult, error) {
+ results := []*cloudwatch.MetricDataResult{}
- resp, err := c.client.ListMetrics(params)
+ for {
+ resp, err := c.client.GetMetricData(params)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to get metric data: %v", err)
}
- metrics = append(metrics, resp.Metrics...)
-
- token = resp.NextToken
- more = token != nil
- }
-
- c.metricCache = &MetricCache{
- Metrics: metrics,
- Fetched: time.Now(),
- TTL: c.CacheTTL.Duration,
+ results = append(results, resp.MetricDataResults...)
+ if resp.NextToken == nil {
+ break
+ }
+ params.NextToken = resp.NextToken
}
- return metrics, nil
+ return results, nil
}
-/*
- * Gather given Metric and emit any error
- */
-func (c *CloudWatch) gatherMetric(
+func (c *CloudWatch) aggregateMetrics(
acc telegraf.Accumulator,
- metric *cloudwatch.Metric,
+ metricDataResults []*cloudwatch.MetricDataResult,
) error {
- params := c.getStatisticsInput(metric)
- resp, err := c.client.GetMetricStatistics(params)
- if err != nil {
- return err
- }
+ var (
+ grouper = metric.NewSeriesGrouper()
+ namespace = sanitizeMeasurement(c.Namespace)
+ )
- for _, point := range resp.Datapoints {
- tags := map[string]string{
- "region": c.Region,
- "unit": snakeCase(*point.Unit),
- }
+ for _, result := range metricDataResults {
+ tags := map[string]string{}
- for _, d := range metric.Dimensions {
- tags[snakeCase(*d.Name)] = *d.Value
+ if dimensions, ok := c.queryDimensions[*result.Id]; ok {
+ tags = *dimensions
}
+ tags["region"] = c.Region
- // record field for each statistic
- fields := map[string]interface{}{}
-
- if point.Average != nil {
- fields[formatField(*metric.MetricName, cloudwatch.StatisticAverage)] = *point.Average
- }
- if point.Maximum != nil {
- fields[formatField(*metric.MetricName, cloudwatch.StatisticMaximum)] = *point.Maximum
- }
- if point.Minimum != nil {
- fields[formatField(*metric.MetricName, cloudwatch.StatisticMinimum)] = *point.Minimum
- }
- if point.SampleCount != nil {
- fields[formatField(*metric.MetricName, cloudwatch.StatisticSampleCount)] = *point.SampleCount
- }
- if point.Sum != nil {
- fields[formatField(*metric.MetricName, cloudwatch.StatisticSum)] = *point.Sum
+ for i := range result.Values {
+ grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i])
}
+ }
- acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp)
+ for _, metric := range grouper.Metrics() {
+ acc.AddMetric(metric)
}
return nil
}
-/*
- * Formatting helpers
- */
-func formatField(metricName string, statistic string) string {
- return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic))
+func init() {
+ inputs.Add("cloudwatch", func() telegraf.Input {
+ return New()
+ })
+}
+
+// New instance of the cloudwatch plugin
+func New() *CloudWatch {
+ return &CloudWatch{
+ CacheTTL: config.Duration(time.Hour),
+ RateLimit: 25,
+ Timeout: config.Duration(time.Second * 5),
+ }
}
-func formatMeasurement(namespace string) string {
+func sanitizeMeasurement(namespace string) string {
namespace = strings.Replace(namespace, "/", "_", -1)
namespace = snakeCase(namespace)
- return fmt.Sprintf("cloudwatch_%s", namespace)
+ return "cloudwatch_" + namespace
}
func snakeCase(s string) string {
s = internal.SnakeCase(s)
+ s = strings.Replace(s, " ", "_", -1)
s = strings.Replace(s, "__", "_", -1)
return s
}
-/*
- * Map Metric to *cloudwatch.GetMetricStatisticsInput for given timeframe
- */
-func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric) *cloudwatch.GetMetricStatisticsInput {
- input := &cloudwatch.GetMetricStatisticsInput{
- StartTime: aws.Time(c.windowStart),
- EndTime: aws.Time(c.windowEnd),
- MetricName: metric.MetricName,
- Namespace: metric.Namespace,
- Period: aws.Int64(int64(c.Period.Duration.Seconds())),
- Dimensions: metric.Dimensions,
- Statistics: []*string{
- aws.String(cloudwatch.StatisticAverage),
- aws.String(cloudwatch.StatisticMaximum),
- aws.String(cloudwatch.StatisticMinimum),
- aws.String(cloudwatch.StatisticSum),
- aws.String(cloudwatch.StatisticSampleCount)},
+type dimension struct {
+ name string
+ value string
+}
+
+// ctod converts cloudwatch dimensions to regular dimensions.
+func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string {
+ dimensions := map[string]string{}
+ for i := range cDimensions {
+ dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value
+ }
+ return &dimensions
+}
+
+func (c *CloudWatch) getDataInputs(dataQueries []*cloudwatch.MetricDataQuery) *cloudwatch.GetMetricDataInput {
+ return &cloudwatch.GetMetricDataInput{
+ StartTime: aws.Time(c.windowStart),
+ EndTime: aws.Time(c.windowEnd),
+ MetricDataQueries: dataQueries,
}
- return input
}
-/*
- * Check Metric Cache validity
- */
-func (c *MetricCache) IsValid() bool {
- return c.Metrics != nil && time.Since(c.Fetched) < c.TTL
+// isValid checks the validity of the metric cache.
+func (f *metricCache) isValid() bool {
+ return f.metrics != nil && time.Since(f.built) < f.ttl
}
-func hasWilcard(dimensions []*Dimension) bool {
+func hasWildcard(dimensions []*Dimension) bool {
for _, d := range dimensions {
if d.Value == "" || d.Value == "*" {
return true
diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go
index 9449cbeadbff3..2983773ad1bb5 100644
--- a/plugins/inputs/cloudwatch/cloudwatch_test.go
+++ b/plugins/inputs/cloudwatch/cloudwatch_test.go
@@ -6,53 +6,103 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatch"
- "github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/filter"
+ "github.com/influxdata/telegraf/testutil"
)
type mockGatherCloudWatchClient struct{}
func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
- metric := &cloudwatch.Metric{
- Namespace: params.Namespace,
- MetricName: aws.String("Latency"),
- Dimensions: []*cloudwatch.Dimension{
+ return &cloudwatch.ListMetricsOutput{
+ Metrics: []*cloudwatch.Metric{
{
- Name: aws.String("LoadBalancerName"),
- Value: aws.String("p-example"),
+ Namespace: params.Namespace,
+ MetricName: aws.String("Latency"),
+ Dimensions: []*cloudwatch.Dimension{
+ {
+ Name: aws.String("LoadBalancerName"),
+ Value: aws.String("p-example"),
+ },
+ },
},
},
- }
+ }, nil
+}
- result := &cloudwatch.ListMetricsOutput{
- Metrics: []*cloudwatch.Metric{metric},
- }
- return result, nil
+func (m *mockGatherCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) {
+ return &cloudwatch.GetMetricDataOutput{
+ MetricDataResults: []*cloudwatch.MetricDataResult{
+ {
+ Id: aws.String("minimum_0_0"),
+ Label: aws.String("latency_minimum"),
+ StatusCode: aws.String("completed"),
+ Timestamps: []*time.Time{
+ params.EndTime,
+ },
+ Values: []*float64{
+ aws.Float64(0.1),
+ },
+ },
+ {
+ Id: aws.String("maximum_0_0"),
+ Label: aws.String("latency_maximum"),
+ StatusCode: aws.String("completed"),
+ Timestamps: []*time.Time{
+ params.EndTime,
+ },
+ Values: []*float64{
+ aws.Float64(0.3),
+ },
+ },
+ {
+ Id: aws.String("average_0_0"),
+ Label: aws.String("latency_average"),
+ StatusCode: aws.String("completed"),
+ Timestamps: []*time.Time{
+ params.EndTime,
+ },
+ Values: []*float64{
+ aws.Float64(0.2),
+ },
+ },
+ {
+ Id: aws.String("sum_0_0"),
+ Label: aws.String("latency_sum"),
+ StatusCode: aws.String("completed"),
+ Timestamps: []*time.Time{
+ params.EndTime,
+ },
+ Values: []*float64{
+ aws.Float64(123),
+ },
+ },
+ {
+ Id: aws.String("sample_count_0_0"),
+ Label: aws.String("latency_sample_count"),
+ StatusCode: aws.String("completed"),
+ Timestamps: []*time.Time{
+ params.EndTime,
+ },
+ Values: []*float64{
+ aws.Float64(100),
+ },
+ },
+ },
+ }, nil
}
-func (m *mockGatherCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
- dataPoint := &cloudwatch.Datapoint{
- Timestamp: params.EndTime,
- Minimum: aws.Float64(0.1),
- Maximum: aws.Float64(0.3),
- Average: aws.Float64(0.2),
- Sum: aws.Float64(123),
- SampleCount: aws.Float64(100),
- Unit: aws.String("Seconds"),
- }
- result := &cloudwatch.GetMetricStatisticsOutput{
- Label: aws.String("Latency"),
- Datapoints: []*cloudwatch.Datapoint{dataPoint},
- }
- return result, nil
+func TestSnakeCase(t *testing.T) {
+ assert.Equal(t, "cluster_name", snakeCase("Cluster Name"))
+ assert.Equal(t, "broker_id", snakeCase("Broker ID"))
}
func TestGather(t *testing.T) {
duration, _ := time.ParseDuration("1m")
- internalDuration := internal.Duration{
- Duration: duration,
- }
+ internalDuration := config.Duration(duration)
c := &CloudWatch{
Region: "us-east-1",
Namespace: "AWS/ELB",
@@ -64,7 +114,7 @@ func TestGather(t *testing.T) {
var acc testutil.Accumulator
c.client = &mockGatherCloudWatchClient{}
- acc.GatherError(c.Gather)
+ assert.NoError(t, acc.GatherError(c.Gather))
fields := map[string]interface{}{}
fields["latency_minimum"] = 0.1
@@ -74,13 +124,11 @@ func TestGather(t *testing.T) {
fields["latency_sample_count"] = 100.0
tags := map[string]string{}
- tags["unit"] = "seconds"
tags["region"] = "us-east-1"
tags["load_balancer_name"] = "p-example"
assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb"))
acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags)
-
}
type mockSelectMetricsCloudWatchClient struct{}
@@ -132,15 +180,13 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM
return result, nil
}
-func (m *mockSelectMetricsCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
+func (m *mockSelectMetricsCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) {
return nil, nil
}
func TestSelectMetrics(t *testing.T) {
duration, _ := time.ParseDuration("1m")
- internalDuration := internal.Duration{
- Duration: duration,
- }
+ internalDuration := config.Duration(duration)
c := &CloudWatch{
Region: "us-east-1",
Namespace: "AWS/ELB",
@@ -164,11 +210,11 @@ func TestSelectMetrics(t *testing.T) {
},
}
c.client = &mockSelectMetricsCloudWatchClient{}
- metrics, err := SelectMetrics(c)
+ filtered, err := getFilteredMetrics(c)
// We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2
// AZs. We should get 12 metrics.
- assert.Equal(t, 12, len(metrics))
- assert.Nil(t, err)
+ assert.Equal(t, 12, len(filtered[0].metrics))
+ assert.NoError(t, err)
}
func TestGenerateStatisticsInputParams(t *testing.T) {
@@ -183,10 +229,45 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
}
duration, _ := time.ParseDuration("1m")
- internalDuration := internal.Duration{
- Duration: duration,
+ internalDuration := config.Duration(duration)
+
+ c := &CloudWatch{
+ Namespace: "AWS/ELB",
+ Delay: internalDuration,
+ Period: internalDuration,
+ }
+
+ c.initializeCloudWatch()
+
+ now := time.Now()
+
+ c.updateWindow(now)
+
+ statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil)
+ queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}})
+ params := c.getDataInputs(queries)
+
+ assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay)))
+ assert.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay)))
+ require.Len(t, params.MetricDataQueries, 5)
+ assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1)
+ assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60)
+}
+
+func TestGenerateStatisticsInputParamsFiltered(t *testing.T) {
+ d := &cloudwatch.Dimension{
+ Name: aws.String("LoadBalancerName"),
+ Value: aws.String("p-example"),
}
+ m := &cloudwatch.Metric{
+ MetricName: aws.String("Latency"),
+ Dimensions: []*cloudwatch.Dimension{d},
+ }
+
+ duration, _ := time.ParseDuration("1m")
+ internalDuration := config.Duration(duration)
+
c := &CloudWatch{
Namespace: "AWS/ELB",
Delay: internalDuration,
@@ -199,32 +280,32 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
c.updateWindow(now)
- params := c.getStatisticsInput(m)
+ statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil)
+ queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}})
+ params := c.getDataInputs(queries)
- assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration))
- assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration))
- assert.Len(t, params.Dimensions, 1)
- assert.Len(t, params.Statistics, 5)
- assert.EqualValues(t, *params.Period, 60)
+ assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay)))
+ assert.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay)))
+ require.Len(t, params.MetricDataQueries, 2)
+ assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1)
+ assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60)
}
func TestMetricsCacheTimeout(t *testing.T) {
- cache := &MetricCache{
- Metrics: []*cloudwatch.Metric{},
- Fetched: time.Now(),
- TTL: time.Minute,
+ cache := &metricCache{
+ metrics: []filteredMetric{},
+ built: time.Now(),
+ ttl: time.Minute,
}
- assert.True(t, cache.IsValid())
- cache.Fetched = time.Now().Add(-time.Minute)
- assert.False(t, cache.IsValid())
+ assert.True(t, cache.isValid())
+ cache.built = time.Now().Add(-time.Minute)
+ assert.False(t, cache.isValid())
}
func TestUpdateWindow(t *testing.T) {
duration, _ := time.ParseDuration("1m")
- internalDuration := internal.Duration{
- Duration: duration,
- }
+ internalDuration := config.Duration(duration)
c := &CloudWatch{
Namespace: "AWS/ELB",
@@ -242,13 +323,13 @@ func TestUpdateWindow(t *testing.T) {
newStartTime := c.windowEnd
// initial window just has a single period
- assert.EqualValues(t, c.windowEnd, now.Add(-c.Delay.Duration))
- assert.EqualValues(t, c.windowStart, now.Add(-c.Delay.Duration).Add(-c.Period.Duration))
+ assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay)))
+ assert.EqualValues(t, c.windowStart, now.Add(-time.Duration(c.Delay)).Add(-time.Duration(c.Period)))
now = time.Now()
c.updateWindow(now)
// subsequent window uses previous end time as start time
- assert.EqualValues(t, c.windowEnd, now.Add(-c.Delay.Duration))
+ assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay)))
assert.EqualValues(t, c.windowStart, newStartTime)
}
diff --git a/plugins/inputs/conntrack/README.md b/plugins/inputs/conntrack/README.md
index 0eae4b3c3f659..2e5fb8861dec1 100644
--- a/plugins/inputs/conntrack/README.md
+++ b/plugins/inputs/conntrack/README.md
@@ -1,4 +1,4 @@
-# Conntrack Plugin
+# Conntrack Input Plugin
Collects stats from Netfilter's conntrack-tools.
@@ -34,7 +34,7 @@ For more information on conntrack-tools, see the
"nf_conntrack_count","nf_conntrack_max"]
## Directories to search within for the conntrack files above.
- ## Missing directrories will be ignored.
+ ## Missing directories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
```
diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go
index 4df01a31f5a4b..bf6c021c80f4a 100644
--- a/plugins/inputs/conntrack/conntrack.go
+++ b/plugins/inputs/conntrack/conntrack.go
@@ -61,7 +61,7 @@ var sampleConfig = `
"nf_conntrack_count","nf_conntrack_max"]
## Directories to search within for the conntrack files above.
- ## Missing directrories will be ignored.
+ ## Missing directories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
`
diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md
index 2b2368388e39c..8e1ecc094c3a8 100644
--- a/plugins/inputs/consul/README.md
+++ b/plugins/inputs/consul/README.md
@@ -12,7 +12,7 @@ report those stats already using StatsD protocol if needed.
# Gather health check statuses from services registered in Consul
[[inputs.consul]]
## Consul server address
- # address = "localhost"
+ # address = "localhost:8500"
## URI scheme for the Consul server, one of "http", "https"
# scheme = "http"
@@ -44,7 +44,7 @@ report those stats already using StatsD protocol if needed.
- consul_health_checks
- tags:
- - node (node that check/service is registred on)
+ - node (node that check/service is registered on)
- service_name
- check_id
- fields:
diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go
index 4b5ee4b1cae11..19ee7a17cd5dd 100644
--- a/plugins/inputs/consul/consul.go
+++ b/plugins/inputs/consul/consul.go
@@ -5,9 +5,8 @@ import (
"strings"
"github.com/hashicorp/consul/api"
-
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -28,7 +27,7 @@ type Consul struct {
var sampleConfig = `
## Consul server address
- # address = "localhost"
+ # address = "localhost:8500"
## URI scheme for the Consul server, one of "http", "https"
# scheme = "http"
diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md
index 13eaa02c8e33a..659b87c3a1fb6 100644
--- a/plugins/inputs/couchbase/README.md
+++ b/plugins/inputs/couchbase/README.md
@@ -1,8 +1,8 @@
-# Telegraf Plugin: Couchbase
+# Couchbase Input Plugin
## Configuration:
-```
+```toml
# Read per-node and per-bucket metrics from Couchbase
[[inputs.couchbase]]
## specify servers via a url matching:
@@ -12,7 +12,7 @@
## http://admin:secret@couchbase-0.example.com:8091/
##
## If no servers are specified, then localhost is used as the host.
- ## If no protocol is specifed, HTTP is used.
+ ## If no protocol is specified, HTTP is used.
## If no port is specified, 8091 is used.
servers = ["http://localhost:8091"]
```
diff --git a/plugins/inputs/cpu/README.md b/plugins/inputs/cpu/README.md
index dfb8561a23ce9..bc86ae898021c 100644
--- a/plugins/inputs/cpu/README.md
+++ b/plugins/inputs/cpu/README.md
@@ -1,12 +1,9 @@
-# Telegraf plugin: CPU
+# CPU Input Plugin
-#### Plugin arguments:
-- **totalcpu** boolean: If true, include `cpu-total` data
-- **percpu** boolean: If true, include data on a per-cpu basis `cpu0, cpu1, etc.`
+The `cpu` plugin gather metrics on the system CPUs.
-
-##### Configuration:
-```
+#### Configuration
+```toml
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
@@ -18,82 +15,53 @@
report_active = false
```
-#### Description
-
-The CPU plugin collects standard CPU metrics as defined in `man proc`. All
-architectures do not support all of these metrics.
+### Metrics
+
+On Linux, consult `man proc` for details on the meanings of these values.
+
+- cpu
+ - tags:
+ - cpu (CPU ID or `cpu-total`)
+ - fields:
+ - time_user (float)
+ - time_system (float)
+ - time_idle (float)
+ - time_active (float)
+ - time_nice (float)
+ - time_iowait (float)
+ - time_irq (float)
+ - time_softirq (float)
+ - time_steal (float)
+ - time_guest (float)
+ - time_guest_nice (float)
+ - usage_user (float, percent)
+ - usage_system (float, percent)
+ - usage_idle (float, percent)
+ - usage_active (float)
+ - usage_nice (float, percent)
+ - usage_iowait (float, percent)
+ - usage_irq (float, percent)
+ - usage_softirq (float, percent)
+ - usage_steal (float, percent)
+ - usage_guest (float, percent)
+ - usage_guest_nice (float, percent)
+
+### Troubleshooting
+
+On Linux systems the `/proc/stat` file is used to gather CPU times.
+Percentages are based on the last 2 samples.
+
+### Example Output
```
-cpu 3357 0 4313 1362393
- The amount of time, measured in units of USER_HZ (1/100ths of a second on
- most architectures, use sysconf(_SC_CLK_TCK) to obtain the right value),
- that the system spent in various states:
-
- user (1) Time spent in user mode.
-
- nice (2) Time spent in user mode with low priority (nice).
-
- system (3) Time spent in system mode.
-
- idle (4) Time spent in the idle task. This value should be USER_HZ times
- the second entry in the /proc/uptime pseudo-file.
-
- iowait (since Linux 2.5.41)
- (5) Time waiting for I/O to complete.
-
- irq (since Linux 2.6.0-test4)
- (6) Time servicing interrupts.
-
- softirq (since Linux 2.6.0-test4)
- (7) Time servicing softirqs.
-
- steal (since Linux 2.6.11)
- (8) Stolen time, which is the time spent in other operating systems
- when running in a virtualized environment
-
- guest (since Linux 2.6.24)
- (9) Time spent running a virtual CPU for guest operating systems
- under the control of the Linux kernel.
-
- guest_nice (since Linux 2.6.33)
- (10) Time spent running a niced guest (virtual CPU for guest operating systems under the control of the Linux kernel).
+cpu,cpu=cpu0,host=loaner time_active=202224.15999999992,time_guest=30250.35,time_guest_nice=0,time_idle=1527035.04,time_iowait=1352,time_irq=0,time_nice=169.28,time_softirq=6281.4,time_steal=0,time_system=40097.14,time_user=154324.34 1568760922000000000
+cpu,cpu=cpu0,host=loaner usage_active=31.249999981810106,usage_guest=2.083333333080696,usage_guest_nice=0,usage_idle=68.7500000181899,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666161392,usage_user=25.000000002273737 1568760922000000000
+cpu,cpu=cpu1,host=loaner time_active=201890.02000000002,time_guest=30508.41,time_guest_nice=0,time_idle=264641.18,time_iowait=210.44,time_irq=0,time_nice=181.75,time_softirq=4537.88,time_steal=0,time_system=39480.7,time_user=157479.25 1568760922000000000
+cpu,cpu=cpu1,host=loaner usage_active=12.500000010610771,usage_guest=2.0833333328280585,usage_guest_nice=0,usage_idle=87.49999998938922,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=2.0833333332070145,usage_steal=0,usage_system=4.166666665656117,usage_user=4.166666666414029 1568760922000000000
+cpu,cpu=cpu2,host=loaner time_active=201382.78999999998,time_guest=30325.8,time_guest_nice=0,time_idle=264686.63,time_iowait=202.77,time_irq=0,time_nice=162.81,time_softirq=3378.34,time_steal=0,time_system=39270.59,time_user=158368.28 1568760922000000000
+cpu,cpu=cpu2,host=loaner usage_active=15.999999993480742,usage_guest=1.9999999999126885,usage_guest_nice=0,usage_idle=84.00000000651926,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=2.0000000002764864,usage_steal=0,usage_system=3.999999999825377,usage_user=7.999999998923158 1568760922000000000
+cpu,cpu=cpu3,host=loaner time_active=198953.51000000007,time_guest=30344.43,time_guest_nice=0,time_idle=265504.09,time_iowait=187.64,time_irq=0,time_nice=197.47,time_softirq=2301.47,time_steal=0,time_system=39313.73,time_user=156953.2 1568760922000000000
+cpu,cpu=cpu3,host=loaner usage_active=10.41666667424579,usage_guest=0,usage_guest_nice=0,usage_idle=89.58333332575421,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666666667,usage_user=6.249999998484175 1568760922000000000
+cpu,cpu=cpu-total,host=loaner time_active=804450.5299999998,time_guest=121429,time_guest_nice=0,time_idle=2321866.96,time_iowait=1952.86,time_irq=0,time_nice=711.32,time_softirq=16499.1,time_steal=0,time_system=158162.17,time_user=627125.08 1568760922000000000
+cpu,cpu=cpu-total,host=loaner usage_active=17.616580305880305,usage_guest=1.036269430422946,usage_guest_nice=0,usage_idle=82.3834196941197,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=1.0362694300459534,usage_steal=0,usage_system=4.145077721691784,usage_user=11.398963731636465 1568760922000000000
```
-
-# Measurements:
-### CPU Time measurements:
-
-Meta:
-- units: CPU Time
-- tags: `cpu= or `
-
-Measurement names:
-- cpu_time_user
-- cpu_time_system
-- cpu_time_idle
-- cpu_time_active (must be explicitly enabled by setting `report_active = true`)
-- cpu_time_nice
-- cpu_time_iowait
-- cpu_time_irq
-- cpu_time_softirq
-- cpu_time_steal
-- cpu_time_guest
-- cpu_time_guest_nice
-
-### CPU Usage Percent Measurements:
-
-Meta:
-- units: percent (out of 100)
-- tags: `cpu= or `
-
-Measurement names:
-- cpu_usage_user
-- cpu_usage_system
-- cpu_usage_idle
-- cpu_usage_active (must be explicitly enabled by setting `report_active = true`)
-- cpu_usage_nice
-- cpu_usage_iowait
-- cpu_usage_irq
-- cpu_usage_softirq
-- cpu_usage_steal
-- cpu_usage_guest
-- cpu_usage_guest_nice
diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go
index 34d7853504d39..bf356ec7b945c 100644
--- a/plugins/inputs/cpu/cpu_test.go
+++ b/plugins/inputs/cpu/cpu_test.go
@@ -55,7 +55,7 @@ func TestCPUStats(t *testing.T) {
err := cs.Gather(&acc)
require.NoError(t, err)
- // Computed values are checked with delta > 0 because of floating point arithmatic
+ // Computed values are checked with delta > 0 because of floating point arithmetic
// imprecision
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags)
@@ -102,7 +102,7 @@ func TestCPUStats(t *testing.T) {
assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags)
}
-// Asserts that a given accumulator contains a measurment of type float64 with
+// Asserts that a given accumulator contains a measurement of type float64 with
// specific tags within a certain distance of a given expected value. Asserts a failure
// if the measurement is of the wrong type, or if no matching measurements are found
//
@@ -113,7 +113,7 @@ func TestCPUStats(t *testing.T) {
// expectedValue float64 : Value to search for within the measurement
// delta float64 : Maximum acceptable distance of an accumulated value
// from the expectedValue parameter. Useful when
-// floating-point arithmatic imprecision makes looking
+// floating-point arithmetic imprecision makes looking
// for an exact match impractical
// tags map[string]string : Tag set the found measurement must have. Set to nil to
// ignore the tag set.
@@ -225,7 +225,7 @@ func TestCPUTimesDecrease(t *testing.T) {
err := cs.Gather(&acc)
require.NoError(t, err)
- // Computed values are checked with delta > 0 because of floating point arithmatic
+ // Computed values are checked with delta > 0 because of floating point arithmetic
// imprecision
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags)
diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go
index e37bf996be571..d74c792494378 100644
--- a/plugins/inputs/dcos/dcos.go
+++ b/plugins/inputs/dcos/dcos.go
@@ -13,7 +13,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
diff --git a/plugins/inputs/disk/README.md b/plugins/inputs/disk/README.md
index 2979a5f2effb8..b0a8ac05a6c19 100644
--- a/plugins/inputs/disk/README.md
+++ b/plugins/inputs/disk/README.md
@@ -15,7 +15,7 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details.
# mount_points = ["/"]
## Ignore mount points by filesystem type.
- ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
+ ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
```
#### Docker container
diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go
index 5a30dbecff71e..b2c7e540038bb 100644
--- a/plugins/inputs/disk/disk.go
+++ b/plugins/inputs/disk/disk.go
@@ -13,9 +13,9 @@ type DiskStats struct {
ps system.PS
// Legacy support
- Mountpoints []string
+ Mountpoints []string `toml:"mountpoints"`
- MountPoints []string
+ MountPoints []string `toml:"mount_points"`
IgnoreFS []string `toml:"ignore_fs"`
}
@@ -29,7 +29,7 @@ var diskSampleConfig = `
# mount_points = ["/"]
## Ignore mount points by filesystem type.
- ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
+ ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
`
func (_ *DiskStats) SampleConfig() string {
diff --git a/plugins/inputs/diskio/README.md b/plugins/inputs/diskio/README.md
index 07bc714566749..11e68d6961ee0 100644
--- a/plugins/inputs/diskio/README.md
+++ b/plugins/inputs/diskio/README.md
@@ -64,6 +64,8 @@ docker run --privileged -v /:/hostfs:ro -v /run/udev:/run/udev:ro -e HOST_PROC=/
- io_time (integer, counter, milliseconds)
- weighted_io_time (integer, counter, milliseconds)
- iops_in_progress (integer, gauge)
+ - merged_reads (integer, counter)
+ - merged_writes (integer, counter)
On linux these values correspond to the values in
[`/proc/diskstats`](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats)
@@ -105,6 +107,13 @@ This value counts the number of I/O requests that have been issued to
the device driver but have not yet completed. It does not include I/O
requests that are in the queue but not yet issued to the device driver.
+#### `merged_reads` & `merged_writes`:
+
+Reads and writes which are adjacent to each other may be merged for
+efficiency. Thus two 4K reads may become one 8K read before it is
+ultimately handed to the disk, and so it will be counted (and queued)
+as only one I/O. These fields lets you know how often this was done.
+
### Sample Queries:
#### Calculate percent IO utilization per disk and host:
@@ -115,17 +124,14 @@ SELECT non_negative_derivative(last("io_time"),1ms) FROM "diskio" WHERE time > n
#### Calculate average queue depth:
`iops_in_progress` will give you an instantaneous value. This will give you the average between polling intervals.
```
-SELECT non_negative_derivative(last("weighted_io_time",1ms)) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s)
+SELECT non_negative_derivative(last("weighted_io_time"),1ms) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s)
```
### Example Output:
```
-diskio,name=sda weighted_io_time=8411917i,read_time=7446444i,write_time=971489i,io_time=866197i,write_bytes=5397686272i,iops_in_progress=0i,reads=2970519i,writes=361139i,read_bytes=119528903168i 1502467254359000000
-diskio,name=sda1 reads=2149i,read_bytes=10753536i,write_bytes=20697088i,write_time=346i,weighted_io_time=505i,writes=2110i,read_time=161i,io_time=208i,iops_in_progress=0i 1502467254359000000
-diskio,name=sda2 reads=2968279i,writes=359029i,write_bytes=5376989184i,iops_in_progress=0i,weighted_io_time=8411250i,read_bytes=119517334528i,read_time=7446249i,write_time=971143i,io_time=866010i 1502467254359000000
-diskio,name=sdb writes=99391856i,write_time=466700894i,io_time=630259874i,weighted_io_time=4245949844i,reads=2750773828i,read_bytes=80667939499008i,write_bytes=6329347096576i,read_time=3783042534i,iops_in_progress=2i 1502467254359000000
-diskio,name=centos/root read_time=7472461i,write_time=950014i,iops_in_progress=0i,weighted_io_time=8424447i,writes=298543i,read_bytes=119510105088i,io_time=837421i,reads=2971769i,write_bytes=5192795648i 1502467254359000000
-diskio,name=centos/var_log reads=1065i,writes=69711i,read_time=1083i,write_time=35376i,read_bytes=6828032i,write_bytes=184193536i,io_time=29699i,iops_in_progress=0i,weighted_io_time=36460i 1502467254359000000
-diskio,name=postgresql/pgsql write_time=478267417i,io_time=631098730i,iops_in_progress=2i,weighted_io_time=4263637564i,reads=2750777151i,writes=110044361i,read_bytes=80667939288064i,write_bytes=6329347096576i,read_time=3784499336i 1502467254359000000
+diskio,name=sda1 merged_reads=0i,reads=2353i,writes=10i,write_bytes=2117632i,write_time=49i,io_time=1271i,weighted_io_time=1350i,read_bytes=31350272i,read_time=1303i,iops_in_progress=0i,merged_writes=0i 1578326400000000000
+diskio,name=centos/var_log reads=1063077i,writes=591025i,read_bytes=139325491712i,write_bytes=144233131520i,read_time=650221i,write_time=24368817i,io_time=852490i,weighted_io_time=25037394i,iops_in_progress=1i,merged_reads=0i,merged_writes=0i 1578326400000000000
+diskio,name=sda write_time=49i,io_time=1317i,weighted_io_time=1404i,reads=2495i,read_time=1357i,write_bytes=2117632i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,writes=10i,read_bytes=38956544i 1578326400000000000
+
```
diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go
index e0c6243bb83c0..9c1e20ebdc5de 100644
--- a/plugins/inputs/diskio/diskio.go
+++ b/plugins/inputs/diskio/diskio.go
@@ -2,7 +2,6 @@ package diskio
import (
"fmt"
- "log"
"regexp"
"strings"
@@ -24,6 +23,8 @@ type DiskIO struct {
NameTemplates []string
SkipSerialNumber bool
+ Log telegraf.Logger
+
infoCache map[string]diskInfoCache
deviceFilter filter.Filter
initialized bool
@@ -75,7 +76,7 @@ func (s *DiskIO) init() error {
if hasMeta(device) {
filter, err := filter.Compile(s.Devices)
if err != nil {
- return fmt.Errorf("error compiling device pattern: %v", err)
+ return fmt.Errorf("error compiling device pattern: %s", err.Error())
}
s.deviceFilter = filter
}
@@ -99,19 +100,36 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
diskio, err := s.ps.DiskIO(devices)
if err != nil {
- return fmt.Errorf("error getting disk io info: %s", err)
+ return fmt.Errorf("error getting disk io info: %s", err.Error())
}
for _, io := range diskio {
- if s.deviceFilter != nil && !s.deviceFilter.Match(io.Name) {
- continue
+
+ match := false
+ if s.deviceFilter != nil && s.deviceFilter.Match(io.Name) {
+ match = true
}
tags := map[string]string{}
- tags["name"] = s.diskName(io.Name)
+ var devLinks []string
+ tags["name"], devLinks = s.diskName(io.Name)
+
+ if s.deviceFilter != nil && !match {
+ for _, devLink := range devLinks {
+ if s.deviceFilter.Match(devLink) {
+ match = true
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+ }
+
for t, v := range s.diskTags(io.Name) {
tags[t] = v
}
+
if !s.SkipSerialNumber {
if len(io.SerialNumber) != 0 {
tags["serial"] = io.SerialNumber
@@ -130,6 +148,8 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
"io_time": io.IoTime,
"weighted_io_time": io.WeightedIO,
"iops_in_progress": io.IopsInProgress,
+ "merged_reads": io.MergedReadCount,
+ "merged_writes": io.MergedWriteCount,
}
acc.AddCounter("diskio", fields, tags)
}
@@ -137,15 +157,20 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
return nil
}
-func (s *DiskIO) diskName(devName string) string {
+func (s *DiskIO) diskName(devName string) (string, []string) {
+ di, err := s.diskInfo(devName)
+ devLinks := strings.Split(di["DEVLINKS"], " ")
+ for i, devLink := range devLinks {
+ devLinks[i] = strings.TrimPrefix(devLink, "/dev/")
+ }
+
if len(s.NameTemplates) == 0 {
- return devName
+ return devName, devLinks
}
- di, err := s.diskInfo(devName)
if err != nil {
- log.Printf("W! Error gathering disk info: %s", err)
- return devName
+ s.Log.Warnf("Error gathering disk info: %s", err)
+ return devName, devLinks
}
for _, nt := range s.NameTemplates {
@@ -163,11 +188,11 @@ func (s *DiskIO) diskName(devName string) string {
})
if !miss {
- return name
+ return name, devLinks
}
}
- return devName
+ return devName, devLinks
}
func (s *DiskIO) diskTags(devName string) map[string]string {
@@ -177,7 +202,7 @@ func (s *DiskIO) diskTags(devName string) map[string]string {
di, err := s.diskInfo(devName)
if err != nil {
- log.Printf("W! Error gathering disk info: %s", err)
+ s.Log.Warnf("Error gathering disk info: %s", err)
return nil
}
diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go
index c727f485b1410..f2499ca17c1c2 100644
--- a/plugins/inputs/diskio/diskio_linux.go
+++ b/plugins/inputs/diskio/diskio_linux.go
@@ -11,6 +11,7 @@ import (
)
type diskInfoCache struct {
+ modifiedAt int64 // Unix Nano timestamp of the last modification of the device. This value is used to invalidate the cache
udevDataPath string
values map[string]string
}
@@ -31,7 +32,8 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) {
s.infoCache = map[string]diskInfoCache{}
}
ic, ok := s.infoCache[devName]
- if ok {
+
+ if ok && stat.Mtim.Nano() == ic.modifiedAt {
return ic.values, nil
}
@@ -42,6 +44,7 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) {
di := map[string]string{}
s.infoCache[devName] = diskInfoCache{
+ modifiedAt: stat.Mtim.Nano(),
udevDataPath: udevDataPath,
values: di,
}
diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go
index 9e79be1653ca9..1cb031bdce553 100644
--- a/plugins/inputs/diskio/diskio_linux_test.go
+++ b/plugins/inputs/diskio/diskio_linux_test.go
@@ -88,7 +88,8 @@ func TestDiskIOStats_diskName(t *testing.T) {
s := DiskIO{
NameTemplates: tc.templates,
}
- assert.Equal(t, tc.expected, s.diskName("null"), "Templates: %#v", tc.templates)
+ name, _ := s.diskName("null")
+ assert.Equal(t, tc.expected, name, "Templates: %#v", tc.templates)
}
}
diff --git a/plugins/inputs/diskio/diskio_test.go b/plugins/inputs/diskio/diskio_test.go
index 41c4b53e25614..3ad203de09362 100644
--- a/plugins/inputs/diskio/diskio_test.go
+++ b/plugins/inputs/diskio/diskio_test.go
@@ -31,15 +31,17 @@ func TestDiskIO(t *testing.T) {
result: Result{
stats: map[string]disk.IOCountersStat{
"sda": {
- ReadCount: 888,
- WriteCount: 5341,
- ReadBytes: 100000,
- WriteBytes: 200000,
- ReadTime: 7123,
- WriteTime: 9087,
- Name: "sda",
- IoTime: 123552,
- SerialNumber: "ab-123-ad",
+ ReadCount: 888,
+ WriteCount: 5341,
+ ReadBytes: 100000,
+ WriteBytes: 200000,
+ ReadTime: 7123,
+ WriteTime: 9087,
+ MergedReadCount: 11,
+ MergedWriteCount: 12,
+ Name: "sda",
+ IoTime: 123552,
+ SerialNumber: "ab-123-ad",
},
},
err: nil,
@@ -61,6 +63,8 @@ func TestDiskIO(t *testing.T) {
"io_time": uint64(123552),
"weighted_io_time": uint64(0),
"iops_in_progress": uint64(0),
+ "merged_reads": uint64(11),
+ "merged_writes": uint64(12),
},
},
},
@@ -103,6 +107,7 @@ func TestDiskIO(t *testing.T) {
var acc testutil.Accumulator
diskio := &DiskIO{
+ Log: testutil.Logger{},
ps: &mps,
Devices: tt.devices,
}
diff --git a/plugins/inputs/disque/README.md b/plugins/inputs/disque/README.md
new file mode 100644
index 0000000000000..ad05658cc2b14
--- /dev/null
+++ b/plugins/inputs/disque/README.md
@@ -0,0 +1,38 @@
+# Disque Input Plugin
+
+[Disque](https://github.com/antirez/disque) is an ongoing experiment to build a distributed, in-memory, message broker.
+
+
+### Configuration:
+
+```toml
+[[inputs.disque]]
+ ## An array of URI to gather stats about. Specify an ip or hostname
+ ## with optional port and password.
+ ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
+ ## If no servers are specified, then localhost is used as the host.
+ servers = ["localhost"]
+```
+
+### Metrics
+
+
+- disque
+ - disque_host
+ - uptime_in_seconds
+ - connected_clients
+ - blocked_clients
+ - used_memory
+ - used_memory_rss
+ - used_memory_peak
+ - total_connections_received
+ - total_commands_processed
+ - instantaneous_ops_per_sec
+ - latest_fork_usec
+ - mem_fragmentation_ratio
+ - used_cpu_sys
+ - used_cpu_user
+ - used_cpu_sys_children
+ - used_cpu_user_children
+ - registered_jobs
+ - registered_queues
diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md
index 51152a367b045..dc8ddd90373e9 100644
--- a/plugins/inputs/dns_query/README.md
+++ b/plugins/inputs/dns_query/README.md
@@ -16,7 +16,7 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi
# domains = ["."]
## Query record type.
- ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
+ ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A"
## Dns server port.
diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go
index 3fcf4a0b83427..c5657277073c2 100644
--- a/plugins/inputs/dns_query/dns_query.go
+++ b/plugins/inputs/dns_query/dns_query.go
@@ -1,7 +1,6 @@
package dns_query
import (
- "errors"
"fmt"
"net"
"strconv"
@@ -53,7 +52,7 @@ var sampleConfig = `
# domains = ["."]
## Query record type.
- ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
+ ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A"
## Dns server port.
@@ -162,7 +161,7 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int,
func (d *DnsQuery) parseRecordType() (uint16, error) {
var recordType uint16
- var error error
+ var err error
switch d.RecordType {
case "A":
@@ -188,10 +187,10 @@ func (d *DnsQuery) parseRecordType() (uint16, error) {
case "TXT":
recordType = dns.TypeTXT
default:
- error = errors.New(fmt.Sprintf("Record type %s not recognized", d.RecordType))
+ err = fmt.Errorf("Record type %s not recognized", d.RecordType)
}
- return recordType, error
+ return recordType, err
}
func setResult(result ResultType, fields map[string]interface{}, tags map[string]string) {
diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md
index 39fc7d6a64e29..95394c94e9c44 100644
--- a/plugins/inputs/docker/README.md
+++ b/plugins/inputs/docker/README.md
@@ -26,12 +26,17 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/)
## Deprecated (1.4.0), use container_name_include
container_names = []
+ ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+ source_tag = false
+
## Containers to include and exclude. Collect all if empty. Globs accepted.
container_name_include = []
container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
+ ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
+ ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
@@ -91,6 +96,17 @@ volumes:
- /var/run/docker.sock:/var/run/docker.sock
```
+#### source tag
+
+Selecting the containers measurements can be tricky if you have many containers with the same name.
+To alleviate this issue you can set the below value to `true`
+
+```toml
+source_tag = true
+```
+
+This will cause all measurements to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker.
+
#### Kubernetes Labels
Kubernetes may add many labels to your containers, if they are not needed you
@@ -106,7 +122,7 @@ may prefer to exclude them:
- unit
- engine_host
- server_version
- - fields:
+ + fields:
- n_used_file_descriptors
- n_cpus
- n_containers
@@ -117,32 +133,49 @@ may prefer to exclude them:
- n_goroutines
- n_listener_events
- memory_total
- - pool_blocksize (requires devicemapper storage driver)
+ - pool_blocksize (requires devicemapper storage driver) (deprecated see: `docker_devicemapper`)
The `docker_data` and `docker_metadata` measurements are available only for
some storage drivers such as devicemapper.
-- docker_data
++ docker_data (deprecated see: `docker_devicemapper`)
- tags:
- unit
- engine_host
- server_version
- - fields:
+ + fields:
- available
- total
- used
-- docker_metadata
+- docker_metadata (deprecated see: `docker_devicemapper`)
- tags:
- unit
- engine_host
- server_version
- - fields:
+ + fields:
- available
- total
- used
-- docker_container_mem
+The above measurements for the devicemapper storage driver can now be found in the new `docker_devicemapper` measurement
+
+- docker_devicemapper
+ - tags:
+ - engine_host
+ - server_version
+ - pool_name
+ + fields:
+ - pool_blocksize_bytes
+ - data_space_used_bytes
+ - data_space_total_bytes
+ - data_space_available_bytes
+ - metadata_space_used_bytes
+ - metadata_space_total_bytes
+ - metadata_space_available_bytes
+ - thin_pool_minimum_free_space_bytes
+
++ docker_container_mem
- tags:
- engine_host
- server_version
@@ -150,8 +183,8 @@ some storage drivers such as devicemapper.
- container_name
- container_status
- container_version
- - fields:
- - total_pgmafault
+ + fields:
+ - total_pgmajfault
- cache
- mapped_file
- total_inactive_file
@@ -195,7 +228,7 @@ some storage drivers such as devicemapper.
- container_status
- container_version
- cpu
- - fields:
+ + fields:
- throttling_periods
- throttling_throttled_periods
- throttling_throttled_time
@@ -206,7 +239,7 @@ some storage drivers such as devicemapper.
- usage_percent
- container_id
-- docker_container_net
++ docker_container_net
- tags:
- engine_host
- server_version
@@ -215,7 +248,7 @@ some storage drivers such as devicemapper.
- container_status
- container_version
- network
- - fields:
+ + fields:
- rx_dropped
- rx_bytes
- rx_errors
@@ -273,11 +306,13 @@ status if configured.
- container_status
- container_version
- fields:
+ - container_id
- oomkilled (boolean)
- pid (integer)
- exitcode (integer)
- started_at (integer)
- finished_at (integer)
+ - uptime_ns (integer)
- docker_swarm
- tags:
diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go
index 6d9d563722e91..dafedacafb3f1 100644
--- a/plugins/inputs/docker/docker.go
+++ b/plugins/inputs/docker/docker.go
@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
- "log"
"net/http"
"regexp"
"strconv"
@@ -20,7 +19,8 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/internal/docker"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -44,6 +44,10 @@ type Docker struct {
ContainerStateInclude []string `toml:"container_state_include"`
ContainerStateExclude []string `toml:"container_state_exclude"`
+ IncludeSourceTag bool `toml:"source_tag"`
+
+ Log telegraf.Logger
+
tlsint.ClientConfig
newEnvClient func() (Client, error)
@@ -51,7 +55,7 @@ type Docker struct {
client Client
httpClient *http.Client
- engine_host string
+ engineHost string
serverVersion string
filtersCreated bool
labelFilter filter.Filter
@@ -73,6 +77,7 @@ const (
var (
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"}
+ now = time.Now
)
var sampleConfig = `
@@ -87,6 +92,9 @@ var sampleConfig = `
## Only collect metrics for these containers, collect all if empty
container_names = []
+ ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+ source_tag = false
+
## Containers to include and exclude. Globs accepted.
## Note that an empty array for both will include all containers
container_name_include = []
@@ -94,6 +102,8 @@ var sampleConfig = `
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
+ ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
+ ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
@@ -103,8 +113,10 @@ var sampleConfig = `
## Whether to report for each container per-device blkio (8:0, 8:1...) and
## network (eth0, eth1, ...) stats or not
perdevice = true
+
## Whether to report for each container total blkio and network stats or not
total = false
+
## Which environment variables should we use as a tag
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
@@ -121,12 +133,15 @@ var sampleConfig = `
# insecure_skip_verify = false
`
+// SampleConfig returns the default Docker TOML configuration.
+func (d *Docker) SampleConfig() string { return sampleConfig }
+
+// Description the metrics returned.
func (d *Docker) Description() string {
return "Read metrics about docker containers"
}
-func (d *Docker) SampleConfig() string { return sampleConfig }
-
+// Gather metrics from the docker server.
func (d *Docker) Gather(acc telegraf.Accumulator) error {
if d.client == nil {
c, err := d.getNewClient()
@@ -184,7 +199,11 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
}
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
+
containers, err := d.client.ContainerList(ctx, opts)
+ if err == context.DeadlineExceeded {
+ return errListTimeout
+ }
if err != nil {
return err
}
@@ -195,10 +214,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
for _, container := range containers {
go func(c types.Container) {
defer wg.Done()
- err := d.gatherContainer(c, acc)
- if err != nil {
- acc.AddError(fmt.Errorf("E! Error gathering container %s stats: %s\n",
- c.Names, err.Error()))
+ if err := d.gatherContainer(c, acc); err != nil {
+ acc.AddError(err)
}
}(container)
}
@@ -210,7 +227,11 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
+
services, err := d.client.ServiceList(ctx, types.ServiceListOptions{})
+ if err == context.DeadlineExceeded {
+ return errServiceTimeout
+ }
if err != nil {
return err
}
@@ -261,7 +282,7 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
fields["tasks_running"] = running[service.ID]
fields["tasks_desired"] = tasksNoShutdown[service.ID]
} else {
- log.Printf("E! Unknow Replicas Mode")
+ d.Log.Error("Unknown replica mode")
}
// Add metrics
acc.AddFields("docker_swarm",
@@ -279,19 +300,24 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
dataFields := make(map[string]interface{})
metadataFields := make(map[string]interface{})
now := time.Now()
+
// Get info from docker daemon
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
+
info, err := d.client.Info(ctx)
+ if err == context.DeadlineExceeded {
+ return errInfoTimeout
+ }
if err != nil {
return err
}
- d.engine_host = info.Name
+ d.engineHost = info.Name
d.serverVersion = info.ServerVersion
tags := map[string]string{
- "engine_host": d.engine_host,
+ "engine_host": d.engineHost,
"server_version": d.serverVersion,
}
@@ -306,21 +332,50 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
"n_goroutines": info.NGoroutines,
"n_listener_events": info.NEventsListener,
}
+
// Add metrics
acc.AddFields("docker", fields, tags, now)
acc.AddFields("docker",
map[string]interface{}{"memory_total": info.MemTotal},
tags,
now)
+
// Get storage metrics
tags["unit"] = "bytes"
+
+ var (
+ // "docker_devicemapper" measurement fields
+ poolName string
+ deviceMapperFields = map[string]interface{}{}
+ )
+
for _, rawData := range info.DriverStatus {
+ name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1))
+ if name == "pool_name" {
+ poolName = rawData[1]
+ continue
+ }
+
// Try to convert string to int (bytes)
value, err := parseSize(rawData[1])
if err != nil {
continue
}
- name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1))
+
+ switch name {
+ case "pool_blocksize",
+ "base_device_size",
+ "data_space_used",
+ "data_space_total",
+ "data_space_available",
+ "metadata_space_used",
+ "metadata_space_total",
+ "metadata_space_available",
+ "thin_pool_minimum_free_space":
+ deviceMapperFields[name+"_bytes"] = value
+ }
+
+ // Legacy devicemapper measurements
if name == "pool_blocksize" {
// pool blocksize
acc.AddFields("docker",
@@ -337,20 +392,44 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
metadataFields[fieldName] = value
}
}
+
if len(dataFields) > 0 {
acc.AddFields("docker_data", dataFields, tags, now)
}
+
if len(metadataFields) > 0 {
acc.AddFields("docker_metadata", metadataFields, tags, now)
}
+
+ if len(deviceMapperFields) > 0 {
+ tags := map[string]string{
+ "engine_host": d.engineHost,
+ "server_version": d.serverVersion,
+ }
+
+ if poolName != "" {
+ tags["pool_name"] = poolName
+ }
+
+ acc.AddFields("docker_devicemapper", deviceMapperFields, tags, now)
+ }
+
return nil
}
+func hostnameFromID(id string) string {
+ if len(id) > 12 {
+ return id[0:12]
+ }
+ return id
+}
+
func (d *Docker) gatherContainer(
container types.Container,
acc telegraf.Accumulator,
) error {
var v *types.StatsJSON
+
// Parse container name
var cname string
for _, name := range container.Names {
@@ -366,39 +445,38 @@ func (d *Docker) gatherContainer(
return nil
}
- // the image name sometimes has a version part, or a private repo
- // ie, rabbitmq:3-management or docker.someco.net:4443/rabbitmq:3-management
- imageName := ""
- imageVersion := "unknown"
- i := strings.LastIndex(container.Image, ":") // index of last ':' character
- if i > -1 {
- imageVersion = container.Image[i+1:]
- imageName = container.Image[:i]
- } else {
- imageName = container.Image
- }
+ imageName, imageVersion := docker.ParseImage(container.Image)
tags := map[string]string{
- "engine_host": d.engine_host,
+ "engine_host": d.engineHost,
"server_version": d.serverVersion,
"container_name": cname,
"container_image": imageName,
"container_version": imageVersion,
}
+ if d.IncludeSourceTag {
+ tags["source"] = hostnameFromID(container.ID)
+ }
+
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
+
r, err := d.client.ContainerStats(ctx, container.ID, false)
+ if err == context.DeadlineExceeded {
+ return errStatsTimeout
+ }
if err != nil {
- return fmt.Errorf("Error getting docker stats: %s", err.Error())
+ return fmt.Errorf("error getting docker stats: %v", err)
}
+
defer r.Body.Close()
dec := json.NewDecoder(r.Body)
if err = dec.Decode(&v); err != nil {
if err == io.EOF {
return nil
}
- return fmt.Errorf("Error decoding: %s", err.Error())
+ return fmt.Errorf("error decoding: %v", err)
}
daemonOSType := r.OSType
@@ -414,19 +492,35 @@ func (d *Docker) gatherContainer(
}
}
+ return d.gatherContainerInspect(container, acc, tags, daemonOSType, v)
+}
+
+func (d *Docker) gatherContainerInspect(
+ container types.Container,
+ acc telegraf.Accumulator,
+ tags map[string]string,
+ daemonOSType string,
+ v *types.StatsJSON,
+) error {
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
+ defer cancel()
+
info, err := d.client.ContainerInspect(ctx, container.ID)
+ if err == context.DeadlineExceeded {
+ return errInspectTimeout
+ }
if err != nil {
- return fmt.Errorf("Error inspecting docker container: %s", err.Error())
+ return fmt.Errorf("error inspecting docker container: %v", err)
}
// Add whitelisted environment variables to tags
if len(d.TagEnvironment) > 0 {
for _, envvar := range info.Config.Env {
for _, configvar := range d.TagEnvironment {
- dock_env := strings.SplitN(envvar, "=", 2)
+ dockEnv := strings.SplitN(envvar, "=", 2)
//check for presence of tag in whitelist
- if len(dock_env) == 2 && len(strings.TrimSpace(dock_env[1])) != 0 && configvar == dock_env[0] {
- tags[dock_env[0]] = dock_env[1]
+ if len(dockEnv) == 2 && len(strings.TrimSpace(dockEnv[1])) != 0 && configvar == dockEnv[0] {
+ tags[dockEnv[0]] = dockEnv[1]
}
}
}
@@ -435,26 +529,39 @@ func (d *Docker) gatherContainer(
if info.State != nil {
tags["container_status"] = info.State.Status
statefields := map[string]interface{}{
- "oomkilled": info.State.OOMKilled,
- "pid": info.State.Pid,
- "exitcode": info.State.ExitCode,
+ "oomkilled": info.State.OOMKilled,
+ "pid": info.State.Pid,
+ "exitcode": info.State.ExitCode,
+ "container_id": container.ID,
}
- container_time, err := time.Parse(time.RFC3339, info.State.StartedAt)
- if err == nil && !container_time.IsZero() {
- statefields["started_at"] = container_time.UnixNano()
+
+ finished, err := time.Parse(time.RFC3339, info.State.FinishedAt)
+ if err == nil && !finished.IsZero() {
+ statefields["finished_at"] = finished.UnixNano()
+ } else {
+ // set finished to now for use in uptime
+ finished = now()
}
- container_time, err = time.Parse(time.RFC3339, info.State.FinishedAt)
- if err == nil && !container_time.IsZero() {
- statefields["finished_at"] = container_time.UnixNano()
+
+ started, err := time.Parse(time.RFC3339, info.State.StartedAt)
+ if err == nil && !started.IsZero() {
+ statefields["started_at"] = started.UnixNano()
+
+ uptime := finished.Sub(started)
+ if finished.Before(started) {
+ uptime = now().Sub(started)
+ }
+ statefields["uptime_ns"] = uptime.Nanoseconds()
}
- acc.AddFields("docker_container_status", statefields, tags, time.Now())
+
+ acc.AddFields("docker_container_status", statefields, tags, now())
if info.State.Health != nil {
healthfields := map[string]interface{}{
"health_status": info.State.Health.Status,
"failing_streak": info.ContainerJSONBase.State.Health.FailingStreak,
}
- acc.AddFields("docker_container_health", healthfields, tags, time.Now())
+ acc.AddFields("docker_container_health", healthfields, tags, now())
}
}
@@ -526,10 +633,10 @@ func parseContainerStats(
memfields["limit"] = stat.MemoryStats.Limit
memfields["max_usage"] = stat.MemoryStats.MaxUsage
- mem := calculateMemUsageUnixNoCache(stat.MemoryStats)
+ mem := CalculateMemUsageUnixNoCache(stat.MemoryStats)
memLimit := float64(stat.MemoryStats.Limit)
memfields["usage"] = uint64(mem)
- memfields["usage_percent"] = calculateMemPercentUnixNoCache(memLimit, mem)
+ memfields["usage_percent"] = CalculateMemPercentUnixNoCache(memLimit, mem)
} else {
memfields["commit_bytes"] = stat.MemoryStats.Commit
memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak
@@ -552,7 +659,7 @@ func parseContainerStats(
if daemonOSType != "windows" {
previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage
previousSystem := stat.PreCPUStats.SystemUsage
- cpuPercent := calculateCPUPercentUnix(previousCPU, previousSystem, stat)
+ cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat)
cpufields["usage_percent"] = cpuPercent
} else {
cpuPercent := calculateCPUPercentWindows(stat)
@@ -769,7 +876,7 @@ func sliceContains(in string, sl []string) bool {
func parseSize(sizeStr string) (int64, error) {
matches := sizeRegex.FindStringSubmatch(sizeStr)
if len(matches) != 4 {
- return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ return -1, fmt.Errorf("invalid size: %s", sizeStr)
}
size, err := strconv.ParseFloat(matches[1], 64)
diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go
index ac95b5ccde730..a331479d10ea1 100644
--- a/plugins/inputs/docker/docker_test.go
+++ b/plugins/inputs/docker/docker_test.go
@@ -7,11 +7,12 @@ import (
"sort"
"strings"
"testing"
-
- "github.com/influxdata/telegraf/testutil"
+ "time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -83,7 +84,7 @@ var baseClient = MockClient{
return containerStats(s), nil
},
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
- return containerInspect, nil
+ return containerInspect(), nil
},
ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) {
return ServiceList, nil
@@ -252,6 +253,7 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
+ Log: testutil.Logger{},
newClient: func(string, *tls.Config) (Client, error) {
return &MockClient{
InfoF: func(ctx context.Context) (types.Info, error) {
@@ -264,7 +266,7 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
return containerStatsWindows(), nil
},
ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
- return containerInspect, nil
+ return containerInspect(), nil
},
ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) {
return ServiceList, nil
@@ -390,6 +392,7 @@ func TestContainerLabels(t *testing.T) {
}
d := Docker{
+ Log: testutil.Logger{},
newClient: newClientFunc,
LabelInclude: tt.include,
LabelExclude: tt.exclude,
@@ -511,6 +514,7 @@ func TestContainerNames(t *testing.T) {
}
d := Docker{
+ Log: testutil.Logger{},
newClient: newClientFunc,
ContainerInclude: tt.include,
ContainerExclude: tt.exclude,
@@ -538,9 +542,212 @@ func TestContainerNames(t *testing.T) {
}
}
+func FilterMetrics(metrics []telegraf.Metric, f func(telegraf.Metric) bool) []telegraf.Metric {
+ results := []telegraf.Metric{}
+ for _, m := range metrics {
+ if f(m) {
+ results = append(results, m)
+ }
+ }
+ return results
+}
+
+func TestContainerStatus(t *testing.T) {
+ var tests = []struct {
+ name string
+ now func() time.Time
+ inspect types.ContainerJSON
+ expected []telegraf.Metric
+ }{
+ {
+ name: "finished_at is zero value",
+ now: func() time.Time {
+ return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC)
+ },
+ inspect: containerInspect(),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "docker_container_status",
+ map[string]string{
+ "container_name": "etcd",
+ "container_image": "quay.io/coreos/etcd",
+ "container_version": "v2.2.2",
+ "engine_host": "absol",
+ "label1": "test_value_1",
+ "label2": "test_value_2",
+ "server_version": "17.09.0-ce",
+ "container_status": "running",
+ "source": "e2173b9478a6",
+ },
+ map[string]interface{}{
+ "oomkilled": false,
+ "pid": 1234,
+ "exitcode": 0,
+ "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
+ "started_at": time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC).UnixNano(),
+ "uptime_ns": int64(3 * time.Minute),
+ },
+ time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC),
+ ),
+ },
+ },
+ {
+ name: "finished_at is non-zero value",
+ now: func() time.Time {
+ return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC)
+ },
+ inspect: func() types.ContainerJSON {
+ i := containerInspect()
+ i.ContainerJSONBase.State.FinishedAt = "2018-06-14T05:53:53.266176036Z"
+ return i
+ }(),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "docker_container_status",
+ map[string]string{
+ "container_name": "etcd",
+ "container_image": "quay.io/coreos/etcd",
+ "container_version": "v2.2.2",
+ "engine_host": "absol",
+ "label1": "test_value_1",
+ "label2": "test_value_2",
+ "server_version": "17.09.0-ce",
+ "container_status": "running",
+ "source": "e2173b9478a6",
+ },
+ map[string]interface{}{
+ "oomkilled": false,
+ "pid": 1234,
+ "exitcode": 0,
+ "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
+ "started_at": time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC).UnixNano(),
+ "finished_at": time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC).UnixNano(),
+ "uptime_ns": int64(5 * time.Minute),
+ },
+ time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC),
+ ),
+ },
+ },
+ {
+ name: "started_at is zero value",
+ now: func() time.Time {
+ return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC)
+ },
+ inspect: func() types.ContainerJSON {
+ i := containerInspect()
+ i.ContainerJSONBase.State.StartedAt = ""
+ i.ContainerJSONBase.State.FinishedAt = "2018-06-14T05:53:53.266176036Z"
+ return i
+ }(),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "docker_container_status",
+ map[string]string{
+ "container_name": "etcd",
+ "container_image": "quay.io/coreos/etcd",
+ "container_version": "v2.2.2",
+ "engine_host": "absol",
+ "label1": "test_value_1",
+ "label2": "test_value_2",
+ "server_version": "17.09.0-ce",
+ "container_status": "running",
+ "source": "e2173b9478a6",
+ },
+ map[string]interface{}{
+ "oomkilled": false,
+ "pid": 1234,
+ "exitcode": 0,
+ "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
+ "finished_at": time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC).UnixNano(),
+ },
+ time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC),
+ ),
+ },
+ },
+ {
+ name: "container has been restarted",
+ now: func() time.Time {
+ return time.Date(2019, 1, 1, 0, 0, 3, 0, time.UTC)
+ },
+ inspect: func() types.ContainerJSON {
+ i := containerInspect()
+ i.ContainerJSONBase.State.StartedAt = "2019-01-01T00:00:02Z"
+ i.ContainerJSONBase.State.FinishedAt = "2019-01-01T00:00:01Z"
+ return i
+ }(),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "docker_container_status",
+ map[string]string{
+ "container_name": "etcd",
+ "container_image": "quay.io/coreos/etcd",
+ "container_version": "v2.2.2",
+ "engine_host": "absol",
+ "label1": "test_value_1",
+ "label2": "test_value_2",
+ "server_version": "17.09.0-ce",
+ "container_status": "running",
+ "source": "e2173b9478a6",
+ },
+ map[string]interface{}{
+ "oomkilled": false,
+ "pid": 1234,
+ "exitcode": 0,
+ "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
+ "started_at": time.Date(2019, 1, 1, 0, 0, 2, 0, time.UTC).UnixNano(),
+ "finished_at": time.Date(2019, 1, 1, 0, 0, 1, 0, time.UTC).UnixNano(),
+ "uptime_ns": int64(1 * time.Second),
+ },
+ time.Date(2019, 1, 1, 0, 0, 3, 0, time.UTC),
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var (
+ acc testutil.Accumulator
+ newClientFunc = func(string, *tls.Config) (Client, error) {
+ client := baseClient
+ client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
+ return containerList[:1], nil
+ }
+ client.ContainerInspectF = func(c context.Context, s string) (types.ContainerJSON, error) {
+ return tt.inspect, nil
+ }
+
+ return &client, nil
+ }
+ d = Docker{
+ Log: testutil.Logger{},
+ newClient: newClientFunc,
+ IncludeSourceTag: true,
+ }
+ )
+
+ // mock time
+ if tt.now != nil {
+ now = tt.now
+ }
+ defer func() {
+ now = time.Now
+ }()
+
+ err := d.Gather(&acc)
+ require.NoError(t, err)
+
+ actual := FilterMetrics(acc.GetTelegrafMetrics(), func(m telegraf.Metric) bool {
+ return m.Name() == "docker_container_status"
+ })
+ testutil.RequireMetricsEqual(t, tt.expected, actual)
+ })
+ }
+}
+
func TestDockerGatherInfo(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
+ Log: testutil.Logger{},
newClient: newClient,
TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5",
"ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"},
@@ -568,6 +775,29 @@ func TestDockerGatherInfo(t *testing.T) {
},
)
+ acc.AssertContainsTaggedFields(t,
+ "docker",
+ map[string]interface{}{
+ "memory_total": int64(3840757760),
+ },
+ map[string]string{
+ "engine_host": "absol",
+ "server_version": "17.09.0-ce",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "docker",
+ map[string]interface{}{
+ "pool_blocksize": int64(65540),
+ },
+ map[string]string{
+ "engine_host": "absol",
+ "server_version": "17.09.0-ce",
+ "unit": "bytes",
+ },
+ )
+
acc.AssertContainsTaggedFields(t,
"docker_data",
map[string]interface{}{
@@ -576,11 +806,46 @@ func TestDockerGatherInfo(t *testing.T) {
"available": int64(36530000000),
},
map[string]string{
+ "engine_host": "absol",
+ "server_version": "17.09.0-ce",
+ "unit": "bytes",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "docker_metadata",
+ map[string]interface{}{
+ "used": int64(20970000),
+ "total": int64(2146999999),
+ "available": int64(2126999999),
+ },
+ map[string]string{
+ "engine_host": "absol",
+ "server_version": "17.09.0-ce",
"unit": "bytes",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "docker_devicemapper",
+ map[string]interface{}{
+ "base_device_size_bytes": int64(10740000000),
+ "pool_blocksize_bytes": int64(65540),
+ "data_space_used_bytes": int64(17300000000),
+ "data_space_total_bytes": int64(107400000000),
+ "data_space_available_bytes": int64(36530000000),
+ "metadata_space_used_bytes": int64(20970000),
+ "metadata_space_total_bytes": int64(2146999999),
+ "metadata_space_available_bytes": int64(2126999999),
+ "thin_pool_minimum_free_space_bytes": int64(10740000000),
+ },
+ map[string]string{
"engine_host": "absol",
"server_version": "17.09.0-ce",
+ "pool_name": "docker-8:1-1182287-pool",
},
)
+
acc.AssertContainsTaggedFields(t,
"docker_container_cpu",
map[string]interface{}{
@@ -632,6 +897,7 @@ func TestDockerGatherInfo(t *testing.T) {
func TestDockerGatherSwarmInfo(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
+ Log: testutil.Logger{},
newClient: newClient,
}
@@ -739,6 +1005,7 @@ func TestContainerStateFilter(t *testing.T) {
}
d := Docker{
+ Log: testutil.Logger{},
newClient: newClientFunc,
ContainerStateInclude: tt.include,
ContainerStateExclude: tt.exclude,
@@ -800,6 +1067,7 @@ func TestContainerName(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
d := Docker{
+ Log: testutil.Logger{},
newClient: tt.clientFunc,
}
var acc testutil.Accumulator
@@ -815,3 +1083,37 @@ func TestContainerName(t *testing.T) {
})
}
}
+
+func TestHostnameFromID(t *testing.T) {
+ tests := []struct {
+ name string
+ id string
+ expect string
+ }{
+ {
+ name: "Real ID",
+ id: "565e3a55f5843cfdd4aa5659a1a75e4e78d47f73c3c483f782fe4a26fc8caa07",
+ expect: "565e3a55f584",
+ },
+ {
+ name: "Short ID",
+ id: "shortid123",
+ expect: "shortid123",
+ },
+ {
+ name: "No ID",
+ id: "",
+ expect: "shortid123",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ output := hostnameFromID(test.id)
+ if test.expect != output {
+ t.Logf("Container ID for hostname is wrong. Want: %s, Got: %s", output, test.expect)
+ }
+ })
+ }
+
+}
diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go
index 7302e219def01..d50b80b9a1d1d 100644
--- a/plugins/inputs/docker/docker_testdata.go
+++ b/plugins/inputs/docker/docker_testdata.go
@@ -47,7 +47,7 @@ var info = types.Info{
HTTPSProxy: "",
Labels: []string{},
MemoryLimit: false,
- DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
+ DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Base Device Size", "10.74 GB"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}, {"Thin Pool Minimum Free Space", "10.74GB"}},
NFd: 19,
HTTPProxy: "",
Driver: "devicemapper",
@@ -492,32 +492,34 @@ func containerStatsWindows() types.ContainerStats {
return stat
}
-var containerInspect = types.ContainerJSON{
- Config: &container.Config{
- Env: []string{
- "ENVVAR1=loremipsum",
- "ENVVAR1FOO=loremipsum",
- "ENVVAR2=dolorsitamet",
- "ENVVAR3==ubuntu:10.04",
- "ENVVAR4",
- "ENVVAR5=",
- "ENVVAR6= ",
- "ENVVAR7=ENVVAR8=ENVVAR9",
- "PATH=/bin:/sbin",
+func containerInspect() types.ContainerJSON {
+ return types.ContainerJSON{
+ Config: &container.Config{
+ Env: []string{
+ "ENVVAR1=loremipsum",
+ "ENVVAR1FOO=loremipsum",
+ "ENVVAR2=dolorsitamet",
+ "ENVVAR3==ubuntu:10.04",
+ "ENVVAR4",
+ "ENVVAR5=",
+ "ENVVAR6= ",
+ "ENVVAR7=ENVVAR8=ENVVAR9",
+ "PATH=/bin:/sbin",
+ },
},
- },
- ContainerJSONBase: &types.ContainerJSONBase{
- State: &types.ContainerState{
- Health: &types.Health{
- FailingStreak: 1,
- Status: "Unhealthy",
+ ContainerJSONBase: &types.ContainerJSONBase{
+ State: &types.ContainerState{
+ Health: &types.Health{
+ FailingStreak: 1,
+ Status: "Unhealthy",
+ },
+ Status: "running",
+ OOMKilled: false,
+ Pid: 1234,
+ ExitCode: 0,
+ StartedAt: "2018-06-14T05:48:53.266176036Z",
+ FinishedAt: "0001-01-01T00:00:00Z",
},
- Status: "running",
- OOMKilled: false,
- Pid: 1234,
- ExitCode: 0,
- StartedAt: "2018-06-14T05:48:53.266176036Z",
- FinishedAt: "0001-01-01T00:00:00Z",
},
- },
+ }
}
diff --git a/plugins/inputs/docker/errors.go b/plugins/inputs/docker/errors.go
new file mode 100644
index 0000000000000..f3c0f76a5c328
--- /dev/null
+++ b/plugins/inputs/docker/errors.go
@@ -0,0 +1,11 @@
+package docker
+
+import "errors"
+
+var (
+ errInfoTimeout = errors.New("timeout retrieving docker engine info")
+ errStatsTimeout = errors.New("timeout retrieving container stats")
+ errInspectTimeout = errors.New("timeout retrieving container environment")
+ errListTimeout = errors.New("timeout retrieving container list")
+ errServiceTimeout = errors.New("timeout retrieving swarm service list")
+)
diff --git a/plugins/inputs/docker/stats_helpers.go b/plugins/inputs/docker/stats_helpers.go
index b4c91e2fc9069..93ea2f2196baf 100644
--- a/plugins/inputs/docker/stats_helpers.go
+++ b/plugins/inputs/docker/stats_helpers.go
@@ -4,7 +4,7 @@ package docker
import "github.com/docker/docker/api/types"
-func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
+func CalculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
var (
cpuPercent = 0.0
// calculate the change for the cpu usage of the container in between readings
@@ -39,13 +39,13 @@ func calculateCPUPercentWindows(v *types.StatsJSON) float64 {
return 0.00
}
-// calculateMemUsageUnixNoCache calculate memory usage of the container.
+// CalculateMemUsageUnixNoCache calculate memory usage of the container.
// Page cache is intentionally excluded to avoid misinterpretation of the output.
-func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 {
+func CalculateMemUsageUnixNoCache(mem types.MemoryStats) float64 {
return float64(mem.Usage - mem.Stats["cache"])
}
-func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 {
+func CalculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 {
// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
// got any data from cgroup
if limit != 0 {
diff --git a/plugins/inputs/docker_log/README.md b/plugins/inputs/docker_log/README.md
new file mode 100644
index 0000000000000..d2f0dc6144ff9
--- /dev/null
+++ b/plugins/inputs/docker_log/README.md
@@ -0,0 +1,99 @@
+# Docker Log Input Plugin
+
+The docker log plugin uses the Docker Engine API to get logs on running
+docker containers.
+
+The docker plugin uses the [Official Docker Client][] to gather logs from the
+[Engine API][].
+
+**Note:** This plugin works only for containers with the `local` or
+`json-file` or `journald` logging driver.
+
+[Official Docker Client]: https://github.com/moby/moby/tree/master/client
+[Engine API]: https://docs.docker.com/engine/api/v1.24/
+
+### Configuration
+
+```toml
+[[inputs.docker_log]]
+ ## Docker Endpoint
+ ## To use TCP, set endpoint = "tcp://[ip]:[port]"
+ ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
+ # endpoint = "unix:///var/run/docker.sock"
+
+ ## When true, container logs are read from the beginning; otherwise
+ ## reading begins at the end of the log.
+ # from_beginning = false
+
+ ## Timeout for Docker API calls.
+ # timeout = "5s"
+
+ ## Containers to include and exclude. Globs accepted.
+ ## Note that an empty array for both will include all containers
+ # container_name_include = []
+ # container_name_exclude = []
+
+ ## Container states to include and exclude. Globs accepted.
+ ## When empty only containers in the "running" state will be captured.
+ # container_state_include = []
+ # container_state_exclude = []
+
+ ## docker labels to include and exclude as tags. Globs accepted.
+ ## Note that an empty array for both will include all labels as tags
+ # docker_label_include = []
+ # docker_label_exclude = []
+
+ ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+ source_tag = false
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+```
+
+#### Environment Configuration
+
+When using the `"ENV"` endpoint, the connection is configured using the
+[CLI Docker environment variables][env]
+
+[env]: https://godoc.org/github.com/moby/moby/client#NewEnvClient
+
+### source tag
+
+Selecting the containers can be tricky if you have many containers with the same name.
+To alleviate this issue you can set the below value to `true`
+
+```toml
+source_tag = true
+```
+
+This will cause all data points to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker.
+
+### Metrics
+
+- docker_log
+ - tags:
+ - container_image
+ - container_version
+ - container_name
+ - stream (stdout, stderr, or tty)
+ - source
+ - fields:
+ - container_id
+ - message
+
+### Example Output
+
+```
+docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! [agent] Config: Interval:10s, Quiet:false, Hostname:\"371ee5d3e587\", Flush Interval:10s" 1560913872000000000
+docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Tags enabled: host=371ee5d3e587" 1560913872000000000
+docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded outputs: file" 1560913872000000000
+docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded processors:" 1560913872000000000
+docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded aggregators:" 1560913872000000000
+docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded inputs: net" 1560913872000000000
+docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Using config file: /etc/telegraf/telegraf.conf" 1560913872000000000
+docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Starting Telegraf 1.10.4" 1560913872000000000
+```
diff --git a/plugins/inputs/docker_log/client.go b/plugins/inputs/docker_log/client.go
new file mode 100644
index 0000000000000..7667c6e4d67e5
--- /dev/null
+++ b/plugins/inputs/docker_log/client.go
@@ -0,0 +1,63 @@
+package docker_log
+
+import (
+ "context"
+ "crypto/tls"
+ "io"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ docker "github.com/docker/docker/client"
+)
+
+/*This file is inherited from telegraf docker input plugin*/
+var (
+ version = "1.24"
+ defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
+)
+
+type Client interface {
+ ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
+ ContainerLogs(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
+}
+
+func NewEnvClient() (Client, error) {
+ client, err := docker.NewClientWithOpts(docker.FromEnv)
+ if err != nil {
+ return nil, err
+ }
+ return &SocketClient{client}, nil
+}
+
+func NewClient(host string, tlsConfig *tls.Config) (Client, error) {
+ transport := &http.Transport{
+ TLSClientConfig: tlsConfig,
+ }
+ httpClient := &http.Client{Transport: transport}
+ client, err := docker.NewClientWithOpts(
+ docker.WithHTTPHeaders(defaultHeaders),
+ docker.WithHTTPClient(httpClient),
+ docker.WithVersion(version),
+ docker.WithHost(host))
+
+ if err != nil {
+ return nil, err
+ }
+ return &SocketClient{client}, nil
+}
+
+type SocketClient struct {
+ client *docker.Client
+}
+
+func (c *SocketClient) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+ return c.client.ContainerList(ctx, options)
+}
+
+func (c *SocketClient) ContainerLogs(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ return c.client.ContainerLogs(ctx, containerID, options)
+}
+func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
+ return c.client.ContainerInspect(ctx, containerID)
+}
diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go
new file mode 100644
index 0000000000000..27462ec5a66e7
--- /dev/null
+++ b/plugins/inputs/docker_log/docker_log.go
@@ -0,0 +1,476 @@
+package docker_log
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/pkg/stdcopy"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/docker"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+var sampleConfig = `
+ ## Docker Endpoint
+ ## To use TCP, set endpoint = "tcp://[ip]:[port]"
+ ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
+ # endpoint = "unix:///var/run/docker.sock"
+
+ ## When true, container logs are read from the beginning; otherwise
+ ## reading begins at the end of the log.
+ # from_beginning = false
+
+ ## Timeout for Docker API calls.
+ # timeout = "5s"
+
+ ## Containers to include and exclude. Globs accepted.
+ ## Note that an empty array for both will include all containers
+ # container_name_include = []
+ # container_name_exclude = []
+
+ ## Container states to include and exclude. Globs accepted.
+ ## When empty only containers in the "running" state will be captured.
+ # container_state_include = []
+ # container_state_exclude = []
+
+ ## docker labels to include and exclude as tags. Globs accepted.
+ ## Note that an empty array for both will include all labels as tags
+ # docker_label_include = []
+ # docker_label_exclude = []
+
+ ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+ source_tag = false
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+`
+
+const (
+ defaultEndpoint = "unix:///var/run/docker.sock"
+
+ // Maximum bytes of a log line before it will be split, size is mirroring
+ // docker code:
+ // https://github.com/moby/moby/blob/master/daemon/logger/copier.go#L21
+ maxLineBytes = 16 * 1024
+)
+
+var (
+ containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"}
+ // ensure *DockerLogs implements telegraf.ServiceInput
+ _ telegraf.ServiceInput = (*DockerLogs)(nil)
+)
+
+type DockerLogs struct {
+ Endpoint string `toml:"endpoint"`
+ FromBeginning bool `toml:"from_beginning"`
+ Timeout internal.Duration `toml:"timeout"`
+ LabelInclude []string `toml:"docker_label_include"`
+ LabelExclude []string `toml:"docker_label_exclude"`
+ ContainerInclude []string `toml:"container_name_include"`
+ ContainerExclude []string `toml:"container_name_exclude"`
+ ContainerStateInclude []string `toml:"container_state_include"`
+ ContainerStateExclude []string `toml:"container_state_exclude"`
+ IncludeSourceTag bool `toml:"source_tag"`
+
+ tlsint.ClientConfig
+
+ newEnvClient func() (Client, error)
+ newClient func(string, *tls.Config) (Client, error)
+
+ client Client
+ labelFilter filter.Filter
+ containerFilter filter.Filter
+ stateFilter filter.Filter
+ opts types.ContainerListOptions
+ wg sync.WaitGroup
+ mu sync.Mutex
+ containerList map[string]context.CancelFunc
+}
+
+func (d *DockerLogs) Description() string {
+ return "Read logging output from the Docker engine"
+}
+
+func (d *DockerLogs) SampleConfig() string {
+ return sampleConfig
+}
+
+func (d *DockerLogs) Init() error {
+ var err error
+ if d.Endpoint == "ENV" {
+ d.client, err = d.newEnvClient()
+ if err != nil {
+ return err
+ }
+ } else {
+ tlsConfig, err := d.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+ d.client, err = d.newClient(d.Endpoint, tlsConfig)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create filters
+ err = d.createLabelFilters()
+ if err != nil {
+ return err
+ }
+ err = d.createContainerFilters()
+ if err != nil {
+ return err
+ }
+ err = d.createContainerStateFilters()
+ if err != nil {
+ return err
+ }
+
+ filterArgs := filters.NewArgs()
+ for _, state := range containerStates {
+ if d.stateFilter.Match(state) {
+ filterArgs.Add("status", state)
+ }
+ }
+
+ if filterArgs.Len() != 0 {
+ d.opts = types.ContainerListOptions{
+ Filters: filterArgs,
+ }
+ }
+
+ return nil
+}
+
+func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ d.containerList[containerID] = cancel
+ return nil
+}
+
+func (d *DockerLogs) removeFromContainerList(containerID string) error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ delete(d.containerList, containerID)
+ return nil
+}
+
+func (d *DockerLogs) containerInContainerList(containerID string) bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ _, ok := d.containerList[containerID]
+ return ok
+}
+
+func (d *DockerLogs) cancelTails() error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ for _, cancel := range d.containerList {
+ cancel()
+ }
+ return nil
+}
+
+func (d *DockerLogs) matchedContainerName(names []string) string {
+ // Check if all container names are filtered; in practice I believe
+ // this array is always of length 1.
+ for _, name := range names {
+ trimmedName := strings.TrimPrefix(name, "/")
+ match := d.containerFilter.Match(trimmedName)
+ if match {
+ return trimmedName
+ }
+ }
+ return ""
+}
+
+func (d *DockerLogs) Gather(acc telegraf.Accumulator) error {
+ ctx := context.Background()
+ acc.SetPrecision(time.Nanosecond)
+
+ ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration)
+ defer cancel()
+ containers, err := d.client.ContainerList(ctx, d.opts)
+ if err != nil {
+ return err
+ }
+
+ for _, container := range containers {
+ if d.containerInContainerList(container.ID) {
+ continue
+ }
+
+ containerName := d.matchedContainerName(container.Names)
+ if containerName == "" {
+ continue
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ d.addToContainerList(container.ID, cancel)
+
+ // Start a new goroutine for every new container that has logs to collect
+ d.wg.Add(1)
+ go func(container types.Container) {
+ defer d.wg.Done()
+ defer d.removeFromContainerList(container.ID)
+
+ err = d.tailContainerLogs(ctx, acc, container, containerName)
+ if err != nil && err != context.Canceled {
+ acc.AddError(err)
+ }
+ }(container)
+ }
+ return nil
+}
+
+func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (bool, error) {
+ ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration)
+ defer cancel()
+ c, err := d.client.ContainerInspect(ctx, container.ID)
+ if err != nil {
+ return false, err
+ }
+ return c.Config.Tty, nil
+}
+
+func (d *DockerLogs) tailContainerLogs(
+ ctx context.Context,
+ acc telegraf.Accumulator,
+ container types.Container,
+ containerName string,
+) error {
+ imageName, imageVersion := docker.ParseImage(container.Image)
+ tags := map[string]string{
+ "container_name": containerName,
+ "container_image": imageName,
+ "container_version": imageVersion,
+ }
+
+ if d.IncludeSourceTag {
+ tags["source"] = hostnameFromID(container.ID)
+ }
+
+ // Add matching container labels as tags
+ for k, label := range container.Labels {
+ if d.labelFilter.Match(k) {
+ tags[k] = label
+ }
+ }
+
+ hasTTY, err := d.hasTTY(ctx, container)
+ if err != nil {
+ return err
+ }
+
+ tail := "0"
+ if d.FromBeginning {
+ tail = "all"
+ }
+
+ logOptions := types.ContainerLogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ Timestamps: true,
+ Details: false,
+ Follow: true,
+ Tail: tail,
+ }
+
+ logReader, err := d.client.ContainerLogs(ctx, container.ID, logOptions)
+ if err != nil {
+ return err
+ }
+
+ // If the container is using a TTY, there is only a single stream
+ // (stdout), and data is copied directly from the container output stream,
+ // no extra multiplexing or headers.
+ //
+ // If the container is *not* using a TTY, streams for stdout and stderr are
+ // multiplexed.
+ if hasTTY {
+ return tailStream(acc, tags, container.ID, logReader, "tty")
+ } else {
+ return tailMultiplexed(acc, tags, container.ID, logReader)
+ }
+}
+
+func parseLine(line []byte) (time.Time, string, error) {
+ parts := bytes.SplitN(line, []byte(" "), 2)
+
+ switch len(parts) {
+ case 1:
+ parts = append(parts, []byte(""))
+ }
+
+ tsString := string(parts[0])
+
+ // Keep any leading space, but remove whitespace from end of line.
+ // This preserves space in, for example, stacktraces, while removing
+ // annoying end of line characters and is similar to how other logging
+ // plugins such as syslog behave.
+ message := bytes.TrimRightFunc(parts[1], unicode.IsSpace)
+
+ ts, err := time.Parse(time.RFC3339Nano, tsString)
+ if err != nil {
+ return time.Time{}, "", fmt.Errorf("error parsing timestamp %q: %v", tsString, err)
+ }
+
+ return ts, string(message), nil
+}
+
+func tailStream(
+ acc telegraf.Accumulator,
+ baseTags map[string]string,
+ containerID string,
+ reader io.ReadCloser,
+ stream string,
+) error {
+ defer reader.Close()
+
+ tags := make(map[string]string, len(baseTags)+1)
+ for k, v := range baseTags {
+ tags[k] = v
+ }
+ tags["stream"] = stream
+
+ r := bufio.NewReaderSize(reader, 64*1024)
+
+ for {
+ line, err := r.ReadBytes('\n')
+
+ if len(line) != 0 {
+ ts, message, err := parseLine(line)
+ if err != nil {
+ acc.AddError(err)
+ } else {
+ acc.AddFields("docker_log", map[string]interface{}{
+ "container_id": containerID,
+ "message": message,
+ }, tags, ts)
+ }
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+}
+
+func tailMultiplexed(
+ acc telegraf.Accumulator,
+ tags map[string]string,
+ containerID string,
+ src io.ReadCloser,
+) error {
+ outReader, outWriter := io.Pipe()
+ errReader, errWriter := io.Pipe()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := tailStream(acc, tags, containerID, outReader, "stdout")
+ if err != nil {
+ acc.AddError(err)
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := tailStream(acc, tags, containerID, errReader, "stderr")
+ if err != nil {
+ acc.AddError(err)
+ }
+ }()
+
+ _, err := stdcopy.StdCopy(outWriter, errWriter, src)
+ outWriter.Close()
+ errWriter.Close()
+ src.Close()
+ wg.Wait()
+ return err
+}
+
+// Start is a noop which is required for a *DockerLogs to implement
+// the telegraf.ServiceInput interface
+func (d *DockerLogs) Start(telegraf.Accumulator) error {
+ return nil
+}
+
+func (d *DockerLogs) Stop() {
+ d.cancelTails()
+ d.wg.Wait()
+}
+
+// Following few functions have been inherited from telegraf docker input plugin
+func (d *DockerLogs) createContainerFilters() error {
+ filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude)
+ if err != nil {
+ return err
+ }
+ d.containerFilter = filter
+ return nil
+}
+
+func (d *DockerLogs) createLabelFilters() error {
+ filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude)
+ if err != nil {
+ return err
+ }
+ d.labelFilter = filter
+ return nil
+}
+
+func (d *DockerLogs) createContainerStateFilters() error {
+ if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 {
+ d.ContainerStateInclude = []string{"running"}
+ }
+ filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude)
+ if err != nil {
+ return err
+ }
+ d.stateFilter = filter
+ return nil
+}
+
+func init() {
+ inputs.Add("docker_log", func() telegraf.Input {
+ return &DockerLogs{
+ Timeout: internal.Duration{Duration: time.Second * 5},
+ Endpoint: defaultEndpoint,
+ newEnvClient: NewEnvClient,
+ newClient: NewClient,
+ containerList: make(map[string]context.CancelFunc),
+ }
+ })
+}
+
+func hostnameFromID(id string) string {
+ if len(id) > 12 {
+ return id[0:12]
+ }
+ return id
+}
diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go
new file mode 100644
index 0000000000000..c8903c9d8ec28
--- /dev/null
+++ b/plugins/inputs/docker_log/docker_log_test.go
@@ -0,0 +1,188 @@
+package docker_log
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/pkg/stdcopy"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+type MockClient struct {
+ ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
+ ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error)
+ ContainerLogsF func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+}
+
+func (c *MockClient) ContainerList(
+ ctx context.Context,
+ options types.ContainerListOptions,
+) ([]types.Container, error) {
+ return c.ContainerListF(ctx, options)
+}
+
+func (c *MockClient) ContainerInspect(
+ ctx context.Context,
+ containerID string,
+) (types.ContainerJSON, error) {
+ return c.ContainerInspectF(ctx, containerID)
+}
+
+func (c *MockClient) ContainerLogs(
+ ctx context.Context,
+ containerID string,
+ options types.ContainerLogsOptions,
+) (io.ReadCloser, error) {
+ return c.ContainerLogsF(ctx, containerID, options)
+}
+
+type Response struct {
+ io.Reader
+}
+
+func (r *Response) Close() error {
+ return nil
+}
+
+func MustParse(layout, value string) time.Time {
+ tm, err := time.Parse(layout, value)
+ if err != nil {
+ panic(err)
+ }
+ return tm
+}
+
+func Test(t *testing.T) {
+ tests := []struct {
+ name string
+ client *MockClient
+ expected []telegraf.Metric
+ }{
+ {
+ name: "no containers",
+ client: &MockClient{
+ ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+ return nil, nil
+ },
+ },
+ },
+ {
+ name: "one container tty",
+ client: &MockClient{
+ ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+ return []types.Container{
+ {
+ ID: "deadbeef",
+ Names: []string{"/telegraf"},
+ Image: "influxdata/telegraf:1.11.0",
+ },
+ }, nil
+ },
+ ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
+ return types.ContainerJSON{
+ Config: &container.Config{
+ Tty: true,
+ },
+ }, nil
+ },
+ ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ return &Response{Reader: bytes.NewBuffer([]byte("2020-04-28T18:43:16.432691200Z hello\n"))}, nil
+ },
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "docker_log",
+ map[string]string{
+ "container_name": "telegraf",
+ "container_image": "influxdata/telegraf",
+ "container_version": "1.11.0",
+ "stream": "tty",
+ "source": "deadbeef",
+ },
+ map[string]interface{}{
+ "container_id": "deadbeef",
+ "message": "hello",
+ },
+ MustParse(time.RFC3339Nano, "2020-04-28T18:43:16.432691200Z"),
+ ),
+ },
+ },
+ {
+ name: "one container multiplex",
+ client: &MockClient{
+ ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+ return []types.Container{
+ {
+ ID: "deadbeef",
+ Names: []string{"/telegraf"},
+ Image: "influxdata/telegraf:1.11.0",
+ },
+ }, nil
+ },
+ ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
+ return types.ContainerJSON{
+ Config: &container.Config{
+ Tty: false,
+ },
+ }, nil
+ },
+ ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ var buf bytes.Buffer
+ w := stdcopy.NewStdWriter(&buf, stdcopy.Stdout)
+ w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout"))
+ return &Response{Reader: &buf}, nil
+ },
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "docker_log",
+ map[string]string{
+ "container_name": "telegraf",
+ "container_image": "influxdata/telegraf",
+ "container_version": "1.11.0",
+ "stream": "stdout",
+ "source": "deadbeef",
+ },
+ map[string]interface{}{
+ "container_id": "deadbeef",
+ "message": "hello from stdout",
+ },
+ MustParse(time.RFC3339Nano, "2020-04-28T18:42:16.432691200Z"),
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var acc testutil.Accumulator
+ plugin := &DockerLogs{
+ Timeout: internal.Duration{Duration: time.Second * 5},
+ newClient: func(string, *tls.Config) (Client, error) { return tt.client, nil },
+ containerList: make(map[string]context.CancelFunc),
+ IncludeSourceTag: true,
+ }
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ err = plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.Wait(len(tt.expected))
+ plugin.Stop()
+
+ require.Nil(t, acc.Errors) // no errors during gathering
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
+ })
+ }
+}
diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md
index c853832b69b46..3b6129488dae3 100644
--- a/plugins/inputs/dovecot/README.md
+++ b/plugins/inputs/dovecot/README.md
@@ -8,7 +8,7 @@ the [upgrading steps][upgrading].
### Configuration:
-```
+```toml
# Read metrics about dovecot servers
[[inputs.dovecot]]
## specify dovecot servers via an address:port list
@@ -17,8 +17,10 @@ the [upgrading steps][upgrading].
##
## If no servers are specified, then localhost is used as the host.
servers = ["localhost:24242"]
+
## Type is one of "user", "domain", "ip", or "global"
type = "global"
+
## Wildcard matches like "*.com". An empty string "" is same as "*"
## If type = "ip" filters should be
filters = [""]
diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go
index a621252e5dd4b..66282c43423b2 100644
--- a/plugins/inputs/dovecot/dovecot.go
+++ b/plugins/inputs/dovecot/dovecot.go
@@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"io"
- // "log"
"net"
"strconv"
"strings"
@@ -32,8 +31,10 @@ var sampleConfig = `
##
## If no servers are specified, then localhost is used as the host.
servers = ["localhost:24242"]
+
## Type is one of "user", "domain", "ip", or "global"
type = "global"
+
## Wildcard matches like "*.com". An empty string "" is same as "*"
## If type = "ip" filters should be
filters = [""]
@@ -82,12 +83,12 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error {
_, _, err := net.SplitHostPort(addr)
if err != nil {
- return fmt.Errorf("Error: %s on url %s\n", err, addr)
+ return fmt.Errorf("%q on url %s", err.Error(), addr)
}
c, err := net.DialTimeout("tcp", addr, defaultTimeout)
if err != nil {
- return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err)
+ return fmt.Errorf("enable to connect to dovecot server '%s': %s", addr, err)
}
defer c.Close()
diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md
new file mode 100644
index 0000000000000..9e3188eec30bf
--- /dev/null
+++ b/plugins/inputs/ecs/README.md
@@ -0,0 +1,246 @@
+# Amazon ECS Input Plugin
+
+Amazon ECS, Fargate compatible, input plugin which uses the Amazon ECS metadata and
+stats [v2][task-metadata-endpoint-v2] or [v3][task-metadata-endpoint-v3] API endpoints
+to gather stats on running containers in a Task.
+
+The telegraf container must be run in the same Task as the workload it is
+inspecting.
+
+This is similar to (and reuses a few pieces of) the [Docker][docker-input]
+input plugin, with some ECS specific modifications for AWS metadata and stats
+formats.
+
+The amazon-ecs-agent (though it _is_ a container running on the host) is not
+present in the metadata/stats endpoints.
+
+### Configuration
+
+```toml
+# Read metrics about ECS containers
+[[inputs.ecs]]
+ ## ECS metadata url.
+ ## Metadata v2 API is used if set explicitly. Otherwise,
+ ## v3 metadata endpoint API is used if available.
+ # endpoint_url = ""
+
+ ## Containers to include and exclude. Globs accepted.
+ ## Note that an empty array for both will include all containers
+ # container_name_include = []
+ # container_name_exclude = []
+
+ ## Container states to include and exclude. Globs accepted.
+ ## When empty only containers in the "RUNNING" state will be captured.
+ ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
+ ## "RESOURCES_PROVISIONED", "STOPPED".
+ # container_status_include = []
+ # container_status_exclude = []
+
+ ## ecs labels to include and exclude as tags. Globs accepted.
+ ## Note that an empty array for both will include all labels as tags
+ ecs_label_include = [ "com.amazonaws.ecs.*" ]
+ ecs_label_exclude = []
+
+ ## Timeout for queries.
+ # timeout = "5s"
+```
+
+### Configuration (enforce v2 metadata)
+
+```toml
+# Read metrics about ECS containers
+[[inputs.ecs]]
+ ## ECS metadata url.
+ ## Metadata v2 API is used if set explicitly. Otherwise,
+ ## v3 metadata endpoint API is used if available.
+ endpoint_url = "http://169.254.170.2"
+
+ ## Containers to include and exclude. Globs accepted.
+ ## Note that an empty array for both will include all containers
+ # container_name_include = []
+ # container_name_exclude = []
+
+ ## Container states to include and exclude. Globs accepted.
+ ## When empty only containers in the "RUNNING" state will be captured.
+ ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
+ ## "RESOURCES_PROVISIONED", "STOPPED".
+ # container_status_include = []
+ # container_status_exclude = []
+
+ ## ecs labels to include and exclude as tags. Globs accepted.
+ ## Note that an empty array for both will include all labels as tags
+ ecs_label_include = [ "com.amazonaws.ecs.*" ]
+ ecs_label_exclude = []
+
+ ## Timeout for queries.
+ # timeout = "5s"
+```
+
+### Metrics
+
+- ecs_task
+ - tags:
+ - cluster
+ - task_arn
+ - family
+ - revision
+ - id
+ - name
+ - fields:
+ - revision (string)
+ - desired_status (string)
+ - known_status (string)
+ - limit_cpu (float)
+ - limit_mem (float)
+
++ ecs_container_mem
+ - tags:
+ - cluster
+ - task_arn
+ - family
+ - revision
+ - id
+ - name
+ - fields:
+ - container_id
+ - active_anon
+ - active_file
+ - cache
+ - hierarchical_memory_limit
+ - inactive_anon
+ - inactive_file
+ - mapped_file
+ - pgfault
+ - pgmajfault
+ - pgpgin
+ - pgpgout
+ - rss
+ - rss_huge
+ - total_active_anon
+ - total_active_file
+ - total_cache
+ - total_inactive_anon
+ - total_inactive_file
+ - total_mapped_file
+ - total_pgfault
+ - total_pgmajfault
+ - total_pgpgin
+ - total_pgpgout
+ - total_rss
+ - total_rss_huge
+ - total_unevictable
+ - total_writeback
+ - unevictable
+ - writeback
+ - fail_count
+ - limit
+ - max_usage
+ - usage
+ - usage_percent
+
+- ecs_container_cpu
+ - tags:
+ - cluster
+ - task_arn
+ - family
+ - revision
+ - id
+ - name
+ - cpu
+ - fields:
+ - container_id
+ - usage_total
+ - usage_in_usermode
+ - usage_in_kernelmode
+ - usage_system
+ - throttling_periods
+ - throttling_throttled_periods
+ - throttling_throttled_time
+ - usage_percent
+ - usage_total
+
++ ecs_container_net
+ - tags:
+ - cluster
+ - task_arn
+ - family
+ - revision
+ - id
+ - name
+ - network
+ - fields:
+ - container_id
+ - rx_packets
+ - rx_dropped
+ - rx_bytes
+ - rx_errors
+ - tx_packets
+ - tx_dropped
+ - tx_bytes
+ - tx_errors
+
+- ecs_container_blkio
+ - tags:
+ - cluster
+ - task_arn
+ - family
+ - revision
+ - id
+ - name
+ - device
+ - fields:
+ - container_id
+ - io_service_bytes_recursive_async
+ - io_service_bytes_recursive_read
+ - io_service_bytes_recursive_sync
+ - io_service_bytes_recursive_total
+ - io_service_bytes_recursive_write
+ - io_serviced_recursive_async
+ - io_serviced_recursive_read
+ - io_serviced_recursive_sync
+ - io_serviced_recursive_total
+ - io_serviced_recursive_write
+
++ ecs_container_meta
+ - tags:
+ - cluster
+ - task_arn
+ - family
+ - revision
+ - id
+ - name
+ - fields:
+ - container_id
+ - docker_name
+ - image
+ - image_id
+ - desired_status
+ - known_status
+ - limit_cpu
+ - limit_mem
+ - created_at
+ - started_at
+ - type
+
+
+### Example Output
+
+```
+ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a revision="2",desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000
+ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000
+ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000
+ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu0,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_total=26426156i 1542642001000000000
+ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=eth0,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_errors=0i,rx_packets=36i,tx_errors=0i,tx_bytes=648i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",rx_dropped=0i,rx_bytes=5338i,tx_packets=8i,tx_dropped=0i 1542642001000000000
+ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=eth5,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_errors=0i,tx_packets=9i,rx_packets=26i,tx_errors=0i,rx_bytes=4641i,tx_dropped=0i,tx_bytes=690i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",rx_dropped=0i 1542642001000000000
+ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=total,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_dropped=0i,rx_bytes=9979i,rx_errors=0i,rx_packets=62i,tx_bytes=1338i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",tx_packets=17i,tx_dropped=0i,tx_errors=0i 1542642001000000000
+ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:1,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_sync=10i,io_serviced_recursive_write=0i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,io_serviced_recursive_read=10i 1542642001000000000
+ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i 1542642001000000000
+ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:4,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_write=0i,io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_async=0i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_async=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i,io_serviced_recursive_total=10i 1542642001000000000
+ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=202:26368,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_service_bytes_recursive_async=0i,io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i 1542642001000000000
+ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_serviced_recursive_async=0i,io_serviced_recursive_read=40i,io_serviced_recursive_sync=40i,io_serviced_recursive_write=0i,io_serviced_recursive_total=40i,io_service_bytes_recursive_read=3162112i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_sync=3162112i,io_service_bytes_recursive_total=3162112i 1542642001000000000
+ecs_container_meta,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a limit_mem=0,type="CNI_PAUSE",container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",docker_name="ecs-nginx-2-internalecspause",limit_cpu=0,known_status="RESOURCES_PROVISIONED",image="amazon/amazon-ecs-pause:0.1.0",image_id="",desired_status="RESOURCES_PROVISIONED" 1542642001000000000
+```
+
+[docker-input]: /plugins/inputs/docker/README.md
+[task-metadata-endpoint-v2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html
+[task-metadata-endpoint-v3] https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html
diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go
new file mode 100644
index 0000000000000..d7ce10cb2a2e0
--- /dev/null
+++ b/plugins/inputs/ecs/client.go
@@ -0,0 +1,169 @@
+package ecs
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/docker/docker/api/types"
+)
+
+var (
+ // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html
+ ecsMetadataPath = "/v2/metadata"
+ ecsMetaStatsPath = "/v2/stats"
+
+ // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html
+ ecsMetadataPathV3 = "/task"
+ ecsMetaStatsPathV3 = "/task/stats"
+)
+
+// Client is the ECS client contract
+type Client interface {
+ Task() (*Task, error)
+ ContainerStats() (map[string]types.StatsJSON, error)
+}
+
+type httpClient interface {
+ Do(req *http.Request) (*http.Response, error)
+}
+
+// NewClient constructs an ECS client with the passed configuration params
+func NewClient(timeout time.Duration, endpoint string, version int) (*EcsClient, error) {
+ if version != 2 && version != 3 {
+ const msg = "expected metadata version 2 or 3, got %d"
+ return nil, fmt.Errorf(msg, version)
+ }
+
+ baseURL, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ c := &http.Client{
+ Timeout: timeout,
+ }
+
+ return &EcsClient{
+ client: c,
+ baseURL: baseURL,
+ taskURL: resolveTaskURL(baseURL, version),
+ statsURL: resolveStatsURL(baseURL, version),
+ version: version,
+ }, nil
+}
+
+func resolveTaskURL(base *url.URL, version int) string {
+ var path string
+ switch version {
+ case 2:
+ path = ecsMetadataPath
+ case 3:
+ path = ecsMetadataPathV3
+ default:
+ // Should never happen.
+ const msg = "resolveTaskURL: unexpected version %d"
+ panic(fmt.Errorf(msg, version))
+ }
+ return resolveURL(base, path)
+}
+
+func resolveStatsURL(base *url.URL, version int) string {
+ var path string
+ switch version {
+ case 2:
+ path = ecsMetaStatsPath
+ case 3:
+ path = ecsMetaStatsPathV3
+ default:
+ // Should never happen.
+ const msg = "resolveStatsURL: unexpected version %d"
+ panic(fmt.Errorf(msg, version))
+ }
+ return resolveURL(base, path)
+}
+
+// resolveURL returns a URL string by concatenating the string representation of base
+// and path. This is consistent with AWS metadata documentation:
+// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html#task-metadata-endpoint-v3-paths
+func resolveURL(base *url.URL, path string) string {
+ return base.String() + path
+}
+
+// EcsClient contains ECS connection config
+type EcsClient struct {
+ client httpClient
+ version int
+ baseURL *url.URL
+ taskURL string
+ statsURL string
+}
+
+// Task calls the ECS metadata endpoint and returns a populated Task
+func (c *EcsClient) Task() (*Task, error) {
+ req, _ := http.NewRequest("GET", c.taskURL, nil)
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
+ body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200))
+ return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body)
+ }
+
+ task, err := unmarshalTask(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ return task, nil
+}
+
+// ContainerStats calls the ECS stats endpoint and returns a populated container stats map
+func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) {
+ req, _ := http.NewRequest("GET", c.statsURL, nil)
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return map[string]types.StatsJSON{}, err
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
+ body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200))
+ return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body)
+ }
+
+ statsMap, err := unmarshalStats(resp.Body)
+ if err != nil {
+ return map[string]types.StatsJSON{}, err
+ }
+
+ return statsMap, nil
+}
+
+// PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned.
+// If either errors, a single error is returned.
+func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) {
+
+ var task *Task
+ var stats map[string]types.StatsJSON
+ var err error
+
+ if stats, err = c.ContainerStats(); err != nil {
+ return nil, nil, err
+ }
+
+ if task, err = c.Task(); err != nil {
+ return nil, nil, err
+ }
+
+ return task, stats, nil
+}
diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go
new file mode 100644
index 0000000000000..333aec80c2709
--- /dev/null
+++ b/plugins/inputs/ecs/client_test.go
@@ -0,0 +1,315 @@
+package ecs
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "testing"
+
+ "github.com/docker/docker/api/types"
+ "github.com/stretchr/testify/assert"
+)
+
+type pollMock struct {
+ task func() (*Task, error)
+ stats func() (map[string]types.StatsJSON, error)
+}
+
+func (p *pollMock) Task() (*Task, error) {
+ return p.task()
+}
+
+func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) {
+ return p.stats()
+}
+
+func TestEcsClient_PollSync(t *testing.T) {
+
+ tests := []struct {
+ name string
+ mock *pollMock
+ want *Task
+ want1 map[string]types.StatsJSON
+ wantErr bool
+ }{
+ {
+ name: "success",
+ mock: &pollMock{
+ task: func() (*Task, error) {
+ return &validMeta, nil
+ },
+ stats: func() (map[string]types.StatsJSON, error) {
+ return validStats, nil
+ },
+ },
+ want: &validMeta,
+ want1: validStats,
+ },
+ {
+ name: "task err",
+ mock: &pollMock{
+ task: func() (*Task, error) {
+ return nil, errors.New("err")
+ },
+ stats: func() (map[string]types.StatsJSON, error) {
+ return validStats, nil
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "stats err",
+ mock: &pollMock{
+ task: func() (*Task, error) {
+ return &validMeta, nil
+ },
+ stats: func() (map[string]types.StatsJSON, error) {
+ return nil, errors.New("err")
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, got1, err := PollSync(tt.mock)
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("EcsClient.PollSync() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ assert.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want)
+ assert.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1)
+ })
+ }
+}
+
+type mockDo struct {
+ do func(req *http.Request) (*http.Response, error)
+}
+
+func (m mockDo) Do(req *http.Request) (*http.Response, error) {
+ return m.do(req)
+}
+
+func TestEcsClient_Task(t *testing.T) {
+ rc, _ := os.Open("testdata/metadata.golden")
+ tests := []struct {
+ name string
+ client httpClient
+ want *Task
+ wantErr bool
+ }{
+ {
+ name: "happy",
+ client: mockDo{
+ do: func(req *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: ioutil.NopCloser(rc),
+ }, nil
+ },
+ },
+ want: &validMeta,
+ },
+ {
+ name: "do err",
+ client: mockDo{
+ do: func(req *http.Request) (*http.Response, error) {
+ return nil, errors.New("err")
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "malformed 500 resp",
+ client: mockDo{
+ do: func(req *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: http.StatusInternalServerError,
+ Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))),
+ }, nil
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "malformed 200 resp",
+ client: mockDo{
+ do: func(req *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))),
+ }, nil
+ },
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &EcsClient{
+ client: tt.client,
+ taskURL: "abc",
+ }
+ got, err := c.Task()
+ if (err != nil) != tt.wantErr {
+ t.Errorf("EcsClient.Task() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ assert.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want)
+ })
+ }
+}
+
+func TestEcsClient_ContainerStats(t *testing.T) {
+ rc, _ := os.Open("testdata/stats.golden")
+ tests := []struct {
+ name string
+ client httpClient
+ want map[string]types.StatsJSON
+ wantErr bool
+ }{
+ {
+ name: "happy",
+ client: mockDo{
+ do: func(req *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: ioutil.NopCloser(rc),
+ }, nil
+ },
+ },
+ want: validStats,
+ },
+ {
+ name: "do err",
+ client: mockDo{
+ do: func(req *http.Request) (*http.Response, error) {
+ return nil, errors.New("err")
+ },
+ },
+ want: map[string]types.StatsJSON{},
+ wantErr: true,
+ },
+ {
+ name: "malformed 200 resp",
+ client: mockDo{
+ do: func(req *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))),
+ }, nil
+ },
+ },
+ want: map[string]types.StatsJSON{},
+ wantErr: true,
+ },
+ {
+ name: "malformed 500 resp",
+ client: mockDo{
+ do: func(req *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: http.StatusInternalServerError,
+ Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))),
+ }, nil
+ },
+ },
+ want: nil,
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &EcsClient{
+ client: tt.client,
+ statsURL: "abc",
+ }
+ got, err := c.ContainerStats()
+ if (err != nil) != tt.wantErr {
+ t.Errorf("EcsClient.ContainerStats() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ assert.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want)
+ })
+ }
+}
+
+func TestResolveTaskURL(t *testing.T) {
+ tests := []struct {
+ name string
+ base string
+ ver int
+ exp string
+ }{
+ {
+ name: "default v2 endpoint",
+ base: v2Endpoint,
+ ver: 2,
+ exp: "http://169.254.170.2/v2/metadata",
+ },
+ {
+ name: "custom v2 endpoint",
+ base: "http://192.168.0.1",
+ ver: 2,
+ exp: "http://192.168.0.1/v2/metadata",
+ },
+ {
+ name: "theoretical v3 endpoint",
+ base: "http://169.254.170.2/v3/metadata",
+ ver: 3,
+ exp: "http://169.254.170.2/v3/metadata/task",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ baseURL, err := url.Parse(tt.base)
+ assert.NoError(t, err)
+
+ act := resolveTaskURL(baseURL, tt.ver)
+ assert.Equal(t, tt.exp, act)
+ })
+ }
+}
+
+func TestResolveStatsURL(t *testing.T) {
+ tests := []struct {
+ name string
+ base string
+ ver int
+ exp string
+ }{
+ {
+ name: "default v2 endpoint",
+ base: v2Endpoint,
+ ver: 2,
+ exp: "http://169.254.170.2/v2/stats",
+ },
+ {
+ name: "custom v2 endpoint",
+ base: "http://192.168.0.1",
+ ver: 2,
+ exp: "http://192.168.0.1/v2/stats",
+ },
+ {
+ name: "theoretical v3 endpoint",
+ base: "http://169.254.170.2/v3/metadata",
+ ver: 3,
+ exp: "http://169.254.170.2/v3/metadata/task/stats",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ baseURL, err := url.Parse(tt.base)
+ assert.NoError(t, err)
+
+ act := resolveStatsURL(baseURL, tt.ver)
+ assert.Equal(t, tt.exp, act)
+ })
+ }
+}
diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go
new file mode 100644
index 0000000000000..5fa53d4fd58bc
--- /dev/null
+++ b/plugins/inputs/ecs/ecs.go
@@ -0,0 +1,271 @@
+package ecs
+
+import (
+ "os"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// Ecs config object
+type Ecs struct {
+ EndpointURL string `toml:"endpoint_url"`
+ Timeout internal.Duration
+
+ ContainerNameInclude []string `toml:"container_name_include"`
+ ContainerNameExclude []string `toml:"container_name_exclude"`
+
+ ContainerStatusInclude []string `toml:"container_status_include"`
+ ContainerStatusExclude []string `toml:"container_status_exclude"`
+
+ LabelInclude []string `toml:"ecs_label_include"`
+ LabelExclude []string `toml:"ecs_label_exclude"`
+
+ newClient func(timeout time.Duration, endpoint string, version int) (*EcsClient, error)
+
+ client Client
+ filtersCreated bool
+ labelFilter filter.Filter
+ containerNameFilter filter.Filter
+ statusFilter filter.Filter
+ metadataVersion int
+}
+
+const (
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ v2Endpoint = "http://169.254.170.2"
+)
+
+var sampleConfig = `
+ ## ECS metadata url.
+ ## Metadata v2 API is used if set explicitly. Otherwise,
+ ## v3 metadata endpoint API is used if available.
+ # endpoint_url = ""
+
+ ## Containers to include and exclude. Globs accepted.
+ ## Note that an empty array for both will include all containers
+ # container_name_include = []
+ # container_name_exclude = []
+
+ ## Container states to include and exclude. Globs accepted.
+ ## When empty only containers in the "RUNNING" state will be captured.
+ ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
+ ## "RESOURCES_PROVISIONED", "STOPPED".
+ # container_status_include = []
+ # container_status_exclude = []
+
+ ## ecs labels to include and exclude as tags. Globs accepted.
+ ## Note that an empty array for both will include all labels as tags
+ ecs_label_include = [ "com.amazonaws.ecs.*" ]
+ ecs_label_exclude = []
+
+ ## Timeout for queries.
+ # timeout = "5s"
+`
+
+// Description describes ECS plugin
+func (ecs *Ecs) Description() string {
+ return "Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints."
+}
+
+// SampleConfig returns the ECS example config
+func (ecs *Ecs) SampleConfig() string {
+ return sampleConfig
+}
+
+// Gather is the entrypoint for telegraf metrics collection
+func (ecs *Ecs) Gather(acc telegraf.Accumulator) error {
+ err := initSetup(ecs)
+ if err != nil {
+ return err
+ }
+
+ task, stats, err := PollSync(ecs.client)
+ if err != nil {
+ return err
+ }
+
+ mergeTaskStats(task, stats)
+
+ taskTags := map[string]string{
+ "cluster": task.Cluster,
+ "task_arn": task.TaskARN,
+ "family": task.Family,
+ "revision": task.Revision,
+ }
+
+ // accumulate metrics
+ ecs.accTask(task, taskTags, acc)
+ ecs.accContainers(task, taskTags, acc)
+
+ return nil
+}
+
+func initSetup(ecs *Ecs) error {
+ if ecs.client == nil {
+ resolveEndpoint(ecs)
+
+ c, err := ecs.newClient(ecs.Timeout.Duration, ecs.EndpointURL, ecs.metadataVersion)
+ if err != nil {
+ return err
+ }
+ ecs.client = c
+ }
+
+ // Create filters
+ if !ecs.filtersCreated {
+ err := ecs.createContainerNameFilters()
+ if err != nil {
+ return err
+ }
+ err = ecs.createContainerStatusFilters()
+ if err != nil {
+ return err
+ }
+ err = ecs.createLabelFilters()
+ if err != nil {
+ return err
+ }
+ ecs.filtersCreated = true
+ }
+
+ return nil
+}
+
+func resolveEndpoint(ecs *Ecs) {
+ if ecs.EndpointURL != "" {
+ // Use metadata v2 API since endpoint is set explicitly.
+ ecs.metadataVersion = 2
+ return
+ }
+
+ // Auto-detect metadata endpoint version.
+
+ // Use metadata v3 if available.
+ // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html
+ v3Endpoint := os.Getenv("ECS_CONTAINER_METADATA_URI")
+ if v3Endpoint != "" {
+ ecs.EndpointURL = v3Endpoint
+ ecs.metadataVersion = 3
+ return
+ }
+
+ // Use v2 endpoint if nothing else is available.
+ ecs.EndpointURL = v2Endpoint
+ ecs.metadataVersion = 2
+}
+
+func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumulator) {
+ taskFields := map[string]interface{}{
+ "revision": task.Revision,
+ "desired_status": task.DesiredStatus,
+ "known_status": task.KnownStatus,
+ "limit_cpu": task.Limits["CPU"],
+ "limit_mem": task.Limits["Memory"],
+ }
+
+ acc.AddFields("ecs_task", taskFields, tags, task.PullStoppedAt)
+}
+
+func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) {
+ for _, c := range task.Containers {
+ if !ecs.containerNameFilter.Match(c.Name) {
+ continue
+ }
+
+ if !ecs.statusFilter.Match(strings.ToUpper(c.KnownStatus)) {
+ continue
+ }
+
+ // add matching ECS container Labels
+ containerTags := map[string]string{
+ "id": c.ID,
+ "name": c.Name,
+ }
+ for k, v := range c.Labels {
+ if ecs.labelFilter.Match(k) {
+ containerTags[k] = v
+ }
+ }
+ tags := mergeTags(taskTags, containerTags)
+
+ parseContainerStats(c, acc, tags)
+ }
+}
+
+// returns a new map with the same content values as the input map
+func copyTags(in map[string]string) map[string]string {
+ out := make(map[string]string)
+ for k, v := range in {
+ out[k] = v
+ }
+ return out
+}
+
+// returns a new map with the merged content values of the two input maps
+func mergeTags(a map[string]string, b map[string]string) map[string]string {
+ c := copyTags(a)
+ for k, v := range b {
+ c[k] = v
+ }
+ return c
+}
+
+func (ecs *Ecs) createContainerNameFilters() error {
+ filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude)
+ if err != nil {
+ return err
+ }
+ ecs.containerNameFilter = filter
+ return nil
+}
+
+func (ecs *Ecs) createLabelFilters() error {
+ filter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude)
+ if err != nil {
+ return err
+ }
+ ecs.labelFilter = filter
+ return nil
+}
+
+func (ecs *Ecs) createContainerStatusFilters() error {
+ if len(ecs.ContainerStatusInclude) == 0 && len(ecs.ContainerStatusExclude) == 0 {
+ ecs.ContainerStatusInclude = []string{"RUNNING"}
+ }
+
+ // ECS uses uppercase status names, normalizing for comparison.
+ for i, include := range ecs.ContainerStatusInclude {
+ ecs.ContainerStatusInclude[i] = strings.ToUpper(include)
+ }
+ for i, exclude := range ecs.ContainerStatusExclude {
+ ecs.ContainerStatusExclude[i] = strings.ToUpper(exclude)
+ }
+
+ filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude)
+ if err != nil {
+ return err
+ }
+ ecs.statusFilter = filter
+ return nil
+}
+
+func init() {
+ inputs.Add("ecs", func() telegraf.Input {
+ return &Ecs{
+ EndpointURL: "",
+ Timeout: internal.Duration{Duration: 5 * time.Second},
+ newClient: NewClient,
+ filtersCreated: false,
+ }
+ })
+}
diff --git a/plugins/inputs/ecs/ecs_test.go b/plugins/inputs/ecs/ecs_test.go
new file mode 100644
index 0000000000000..5d64fef01efad
--- /dev/null
+++ b/plugins/inputs/ecs/ecs_test.go
@@ -0,0 +1,831 @@
+package ecs
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/stretchr/testify/assert"
+)
+
+// codified golden objects for tests
+
+// stats
+const pauseStatsKey = "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba"
+const nginxStatsKey = "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299"
+
+var pauseStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.936081344Z")
+var pauseStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.933000984Z")
+
+var nginxStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.93733207Z")
+var nginxStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.934291009Z")
+
+var validStats = map[string]types.StatsJSON{
+ pauseStatsKey: {
+ Stats: types.Stats{
+ Read: pauseStatsRead,
+ PreRead: pauseStatsPreRead,
+ BlkioStats: types.BlkioStats{
+ IoServiceBytesRecursive: []types.BlkioStatEntry{
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Read",
+ Value: 790528,
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Write",
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Sync",
+ Value: 790528,
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Async",
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Total",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Read",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Sync",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Total",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Read",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Sync",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Total",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Read",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Sync",
+ Value: 790528,
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Total",
+ Value: 790528,
+ },
+ },
+ IoServicedRecursive: []types.BlkioStatEntry{
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Read",
+ Value: 10,
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Write",
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Sync",
+ Value: 10,
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Async",
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Total",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Read",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Sync",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Total",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Read",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Sync",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Total",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Read",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Sync",
+ Value: 10,
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 4,
+ Op: "Total",
+ Value: 10,
+ },
+ },
+ },
+ CPUStats: types.CPUStats{
+ CPUUsage: types.CPUUsage{
+ PercpuUsage: []uint64{
+ 26426156,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ },
+ UsageInUsermode: 20000000,
+ TotalUsage: 26426156,
+ },
+ SystemUsage: 2336100000000,
+ OnlineCPUs: 1,
+ ThrottlingData: types.ThrottlingData{},
+ },
+ PreCPUStats: types.CPUStats{
+ CPUUsage: types.CPUUsage{
+ PercpuUsage: []uint64{
+ 26426156,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ },
+ UsageInUsermode: 20000000,
+ TotalUsage: 26426156,
+ },
+ SystemUsage: 2335090000000,
+ OnlineCPUs: 1,
+ ThrottlingData: types.ThrottlingData{},
+ },
+ MemoryStats: types.MemoryStats{
+ Stats: map[string]uint64{
+ "cache": 790528,
+ "mapped_file": 618496,
+ "total_inactive_file": 782336,
+ "pgpgout": 1040,
+ "rss": 40960,
+ "total_mapped_file": 618496,
+ "pgpgin": 1243,
+ "pgmajfault": 6,
+ "total_rss": 40960,
+ "hierarchical_memory_limit": 536870912,
+ "total_pgfault": 1298,
+ "total_active_file": 8192,
+ "active_anon": 40960,
+ "total_active_anon": 40960,
+ "total_pgpgout": 1040,
+ "total_cache": 790528,
+ "active_file": 8192,
+ "pgfault": 1298,
+ "inactive_file": 782336,
+ "total_pgpgin": 1243,
+ "hierarchical_memsw_limit": 9223372036854772000,
+ },
+ MaxUsage: 4825088,
+ Usage: 1343488,
+ Limit: 1033658368,
+ },
+ },
+ Networks: map[string]types.NetworkStats{
+ "eth0": {
+ RxBytes: uint64(5338),
+ RxDropped: uint64(0),
+ RxErrors: uint64(0),
+ RxPackets: uint64(36),
+ TxBytes: uint64(648),
+ TxDropped: uint64(0),
+ TxErrors: uint64(0),
+ TxPackets: uint64(8),
+ },
+ "eth5": {
+ RxBytes: uint64(4641),
+ RxDropped: uint64(0),
+ RxErrors: uint64(0),
+ RxPackets: uint64(26),
+ TxBytes: uint64(690),
+ TxDropped: uint64(0),
+ TxErrors: uint64(0),
+ TxPackets: uint64(9),
+ },
+ },
+ },
+ nginxStatsKey: {
+ Stats: types.Stats{
+ Read: nginxStatsRead,
+ PreRead: nginxStatsPreRead,
+ BlkioStats: types.BlkioStats{
+ IoServiceBytesRecursive: []types.BlkioStatEntry{
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Read",
+ Value: 5730304,
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Write",
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Sync",
+ Value: 5730304,
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Async",
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Total",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Read",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Sync",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Total",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Read",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Sync",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Total",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Read",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Sync",
+ Value: 5730304,
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Total",
+ Value: 5730304,
+ },
+ },
+ IoServicedRecursive: []types.BlkioStatEntry{
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Read",
+ Value: 156,
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Write",
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Sync",
+ Value: 156,
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Async",
+ },
+ {
+ Major: 202,
+ Minor: 26368,
+ Op: "Total",
+ Value: 156,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Read",
+ Value: 156,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Sync",
+ Value: 156,
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 1,
+ Op: "Total",
+ Value: 156,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Read",
+ Value: 156,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Sync",
+ Value: 156,
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 2,
+ Op: "Total",
+ Value: 156,
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Read",
+ Value: 147,
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Write",
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Sync",
+ Value: 147,
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Async",
+ },
+ {
+ Major: 253,
+ Minor: 5,
+ Op: "Total",
+ Value: 147,
+ },
+ },
+ },
+ CPUStats: types.CPUStats{
+ CPUUsage: types.CPUUsage{
+ PercpuUsage: []uint64{
+ 65599511,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ },
+ UsageInUsermode: 40000000,
+ TotalUsage: 65599511,
+ UsageInKernelmode: 10000000,
+ },
+ SystemUsage: 2336100000000,
+ OnlineCPUs: 1,
+ ThrottlingData: types.ThrottlingData{},
+ },
+ PreCPUStats: types.CPUStats{
+ CPUUsage: types.CPUUsage{
+ PercpuUsage: []uint64{
+ 65599511,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ },
+ UsageInUsermode: 40000000,
+ TotalUsage: 65599511,
+ UsageInKernelmode: 10000000,
+ },
+ SystemUsage: 2335090000000,
+ OnlineCPUs: 1,
+ ThrottlingData: types.ThrottlingData{},
+ },
+ MemoryStats: types.MemoryStats{
+ Stats: map[string]uint64{
+ "cache": 5787648,
+ "mapped_file": 3616768,
+ "total_inactive_file": 4321280,
+ "pgpgout": 1674,
+ "rss": 1597440,
+ "total_mapped_file": 3616768,
+ "pgpgin": 3477,
+ "pgmajfault": 40,
+ "total_rss": 1597440,
+ "total_inactive_anon": 4096,
+ "hierarchical_memory_limit": 536870912,
+ "total_pgfault": 2924,
+ "total_active_file": 1462272,
+ "active_anon": 1597440,
+ "total_active_anon": 1597440,
+ "total_pgpgout": 1674,
+ "total_cache": 5787648,
+ "inactive_anon": 4096,
+ "active_file": 1462272,
+ "pgfault": 2924,
+ "inactive_file": 4321280,
+ "total_pgpgin": 3477,
+ "hierarchical_memsw_limit": 9223372036854772000,
+ },
+ MaxUsage: 8667136,
+ Usage: 8179712,
+ Limit: 1033658368,
+ },
+ },
+ },
+}
+
+// meta
+var metaPauseCreated, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:26.641964373Z")
+var metaPauseStarted, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.035698679Z")
+var metaCreated, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.614884084Z")
+var metaStarted, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.975996351Z")
+var metaPullStart, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.197327103Z")
+var metaPullStop, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.609089471Z")
+
+var validMeta = Task{
+ Cluster: "test",
+ TaskARN: "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a",
+ Family: "nginx",
+ Revision: "2",
+ DesiredStatus: "RUNNING",
+ KnownStatus: "RUNNING",
+ Containers: []Container{
+ {
+ ID: pauseStatsKey,
+ Name: "~internal~ecs~pause",
+ DockerName: "ecs-nginx-2-internalecspause",
+ Image: "amazon/amazon-ecs-pause:0.1.0",
+ ImageID: "",
+ Labels: map[string]string{
+ "com.amazonaws.ecs.cluster": "test",
+ "com.amazonaws.ecs.container-name": "~internal~ecs~pause",
+ "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a",
+ "com.amazonaws.ecs.task-definition-family": "nginx",
+ "com.amazonaws.ecs.task-definition-version": "2",
+ },
+ DesiredStatus: "RESOURCES_PROVISIONED",
+ KnownStatus: "RESOURCES_PROVISIONED",
+ Limits: map[string]float64{
+ "CPU": 0,
+ "Memory": 0,
+ },
+ CreatedAt: metaPauseCreated,
+ StartedAt: metaPauseStarted,
+ Type: "CNI_PAUSE",
+ Networks: []Network{
+ {
+ NetworkMode: "awsvpc",
+ IPv4Addresses: []string{
+ "172.31.25.181",
+ },
+ },
+ },
+ },
+ {
+ ID: nginxStatsKey,
+ Name: "nginx",
+ DockerName: "ecs-nginx-2-nginx",
+ Image: "nginx:alpine",
+ ImageID: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ Labels: map[string]string{
+ "com.amazonaws.ecs.cluster": "test",
+ "com.amazonaws.ecs.container-name": "nginx",
+ "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a",
+ "com.amazonaws.ecs.task-definition-family": "nginx",
+ "com.amazonaws.ecs.task-definition-version": "2",
+ },
+ DesiredStatus: "RUNNING",
+ KnownStatus: "RUNNING",
+ Limits: map[string]float64{
+ "CPU": 0,
+ "Memory": 0,
+ },
+ CreatedAt: metaCreated,
+ StartedAt: metaStarted,
+ Type: "NORMAL",
+ Networks: []Network{
+ {
+ NetworkMode: "awsvpc",
+ IPv4Addresses: []string{
+ "172.31.25.181",
+ },
+ },
+ },
+ },
+ },
+ Limits: map[string]float64{
+ "CPU": 0.5,
+ "Memory": 512,
+ },
+ PullStartedAt: metaPullStart,
+ PullStoppedAt: metaPullStop,
+}
+
+func TestResolveEndpoint(t *testing.T) {
+ tests := []struct {
+ name string
+ given Ecs
+ exp Ecs
+ preF func()
+ afterF func()
+ }{
+ {
+ name: "Endpoint is explicitly set => use v2 metadata",
+ given: Ecs{
+ EndpointURL: "192.162.0.1/custom_endpoint",
+ },
+ exp: Ecs{
+ EndpointURL: "192.162.0.1/custom_endpoint",
+ metadataVersion: 2,
+ },
+ },
+ {
+ name: "Endpoint is not set, ECS_CONTAINER_METADATA_URI is not set => use v2 metadata",
+ given: Ecs{
+ EndpointURL: "",
+ },
+ exp: Ecs{
+ EndpointURL: v2Endpoint,
+ metadataVersion: 2,
+ },
+ },
+ {
+ name: "Endpoint is not set, ECS_CONTAINER_METADATA_URI is set => use v3 metadata",
+ preF: func() {
+ os.Setenv("ECS_CONTAINER_METADATA_URI", "v3-endpoint.local")
+ },
+ afterF: func() {
+ os.Unsetenv("ECS_CONTAINER_METADATA_URI")
+ },
+ given: Ecs{
+ EndpointURL: "",
+ },
+ exp: Ecs{
+ EndpointURL: "v3-endpoint.local",
+ metadataVersion: 3,
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if tt.preF != nil {
+ tt.preF()
+ }
+ if tt.afterF != nil {
+ defer tt.afterF()
+ }
+
+ act := tt.given
+ resolveEndpoint(&act)
+ assert.Equal(t, tt.exp, act)
+ })
+ }
+}
diff --git a/plugins/inputs/ecs/stats.go b/plugins/inputs/ecs/stats.go
new file mode 100644
index 0000000000000..d2a8ee5d34cfd
--- /dev/null
+++ b/plugins/inputs/ecs/stats.go
@@ -0,0 +1,295 @@
+package ecs
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs/docker"
+)
+
+func parseContainerStats(c Container, acc telegraf.Accumulator, tags map[string]string) {
+ id := c.ID
+ stats := c.Stats
+ tm := stats.Read
+
+ if tm.Before(time.Unix(0, 0)) {
+ tm = time.Now()
+ }
+
+ metastats(id, c, acc, tags, tm)
+ memstats(id, stats, acc, tags, tm)
+ cpustats(id, stats, acc, tags, tm)
+ netstats(id, stats, acc, tags, tm)
+ blkstats(id, stats, acc, tags, tm)
+}
+
+func metastats(id string, c Container, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
+ metafields := map[string]interface{}{
+ "container_id": id,
+ "docker_name": c.DockerName,
+ "image": c.Image,
+ "image_id": c.ImageID,
+ "desired_status": c.DesiredStatus,
+ "known_status": c.KnownStatus,
+ "limit_cpu": c.Limits["CPU"],
+ "limit_mem": c.Limits["Memory"],
+ "created_at": c.CreatedAt,
+ "started_at": c.StartedAt,
+ "type": c.Type,
+ }
+
+ acc.AddFields("ecs_container_meta", metafields, tags, tm)
+}
+
+func memstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
+ memfields := map[string]interface{}{
+ "container_id": id,
+ }
+
+ memstats := []string{
+ "active_anon",
+ "active_file",
+ "cache",
+ "hierarchical_memory_limit",
+ "inactive_anon",
+ "inactive_file",
+ "mapped_file",
+ "pgfault",
+ "pgmajfault",
+ "pgpgin",
+ "pgpgout",
+ "rss",
+ "rss_huge",
+ "total_active_anon",
+ "total_active_file",
+ "total_cache",
+ "total_inactive_anon",
+ "total_inactive_file",
+ "total_mapped_file",
+ "total_pgfault",
+ "total_pgmajfault",
+ "total_pgpgin",
+ "total_pgpgout",
+ "total_rss",
+ "total_rss_huge",
+ "total_unevictable",
+ "total_writeback",
+ "unevictable",
+ "writeback",
+ }
+
+ for _, field := range memstats {
+ if value, ok := stats.MemoryStats.Stats[field]; ok {
+ memfields[field] = value
+ }
+ }
+ if stats.MemoryStats.Failcnt != 0 {
+ memfields["fail_count"] = stats.MemoryStats.Failcnt
+ }
+
+ memfields["limit"] = stats.MemoryStats.Limit
+ memfields["max_usage"] = stats.MemoryStats.MaxUsage
+
+ mem := docker.CalculateMemUsageUnixNoCache(stats.MemoryStats)
+ memLimit := float64(stats.MemoryStats.Limit)
+ memfields["usage"] = uint64(mem)
+ memfields["usage_percent"] = docker.CalculateMemPercentUnixNoCache(memLimit, mem)
+
+ acc.AddFields("ecs_container_mem", memfields, tags, tm)
+}
+
+func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
+ cpufields := map[string]interface{}{
+ "usage_total": stats.CPUStats.CPUUsage.TotalUsage,
+ "usage_in_usermode": stats.CPUStats.CPUUsage.UsageInUsermode,
+ "usage_in_kernelmode": stats.CPUStats.CPUUsage.UsageInKernelmode,
+ "usage_system": stats.CPUStats.SystemUsage,
+ "throttling_periods": stats.CPUStats.ThrottlingData.Periods,
+ "throttling_throttled_periods": stats.CPUStats.ThrottlingData.ThrottledPeriods,
+ "throttling_throttled_time": stats.CPUStats.ThrottlingData.ThrottledTime,
+ "container_id": id,
+ }
+
+ previousCPU := stats.PreCPUStats.CPUUsage.TotalUsage
+ previousSystem := stats.PreCPUStats.SystemUsage
+ cpuPercent := docker.CalculateCPUPercentUnix(previousCPU, previousSystem, &stats)
+ cpufields["usage_percent"] = cpuPercent
+
+ cputags := copyTags(tags)
+ cputags["cpu"] = "cpu-total"
+ acc.AddFields("ecs_container_cpu", cpufields, cputags, tm)
+
+ // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
+ // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
+ var percpuusage []uint64
+ if stats.CPUStats.OnlineCPUs > 0 {
+ percpuusage = stats.CPUStats.CPUUsage.PercpuUsage[:stats.CPUStats.OnlineCPUs]
+ } else {
+ percpuusage = stats.CPUStats.CPUUsage.PercpuUsage
+ }
+
+ for i, percpu := range percpuusage {
+ percputags := copyTags(tags)
+ percputags["cpu"] = fmt.Sprintf("cpu%d", i)
+ fields := map[string]interface{}{
+ "usage_total": percpu,
+ "container_id": id,
+ }
+ acc.AddFields("ecs_container_cpu", fields, percputags, tm)
+ }
+}
+
+func netstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
+ totalNetworkStatMap := make(map[string]interface{})
+ for network, netstats := range stats.Networks {
+ netfields := map[string]interface{}{
+ "rx_dropped": netstats.RxDropped,
+ "rx_bytes": netstats.RxBytes,
+ "rx_errors": netstats.RxErrors,
+ "tx_packets": netstats.TxPackets,
+ "tx_dropped": netstats.TxDropped,
+ "rx_packets": netstats.RxPackets,
+ "tx_errors": netstats.TxErrors,
+ "tx_bytes": netstats.TxBytes,
+ "container_id": id,
+ }
+
+ nettags := copyTags(tags)
+ nettags["network"] = network
+ acc.AddFields("ecs_container_net", netfields, nettags, tm)
+
+ for field, value := range netfields {
+ if field == "container_id" {
+ continue
+ }
+
+ var uintV uint64
+ switch v := value.(type) {
+ case uint64:
+ uintV = v
+ case int64:
+ uintV = uint64(v)
+ default:
+ continue
+ }
+
+ _, ok := totalNetworkStatMap[field]
+ if ok {
+ totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV
+ } else {
+ totalNetworkStatMap[field] = uintV
+ }
+ }
+ }
+
+ // totalNetworkStatMap could be empty if container is running with --net=host.
+ if len(totalNetworkStatMap) != 0 {
+ nettags := copyTags(tags)
+ nettags["network"] = "total"
+ totalNetworkStatMap["container_id"] = id
+ acc.AddFields("ecs_container_net", totalNetworkStatMap, nettags, tm)
+ }
+}
+
+func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
+ blkioStats := stats.BlkioStats
+ // Make a map of devices to their block io stats
+ deviceStatMap := make(map[string]map[string]interface{})
+
+ for _, metric := range blkioStats.IoServiceBytesRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ _, ok := deviceStatMap[device]
+ if !ok {
+ deviceStatMap[device] = make(map[string]interface{})
+ }
+
+ field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IoServicedRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ _, ok := deviceStatMap[device]
+ if !ok {
+ deviceStatMap[device] = make(map[string]interface{})
+ }
+
+ field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IoQueuedRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IoServiceTimeRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IoWaitTimeRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IoMergedRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IoTimeRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ deviceStatMap[device]["io_time_recursive"] = metric.Value
+ }
+
+ for _, metric := range blkioStats.SectorsRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ deviceStatMap[device]["sectors_recursive"] = metric.Value
+ }
+
+ totalStatMap := make(map[string]interface{})
+ for device, fields := range deviceStatMap {
+ fields["container_id"] = id
+
+ iotags := copyTags(tags)
+ iotags["device"] = device
+ acc.AddFields("ecs_container_blkio", fields, iotags, tm)
+
+ for field, value := range fields {
+ if field == "container_id" {
+ continue
+ }
+
+ var uintV uint64
+ switch v := value.(type) {
+ case uint64:
+ uintV = v
+ case int64:
+ uintV = uint64(v)
+ default:
+ continue
+ }
+
+ _, ok := totalStatMap[field]
+ if ok {
+ totalStatMap[field] = totalStatMap[field].(uint64) + uintV
+ } else {
+ totalStatMap[field] = uintV
+ }
+
+ }
+ }
+
+ totalStatMap["container_id"] = id
+ iotags := copyTags(tags)
+ iotags["device"] = "total"
+ acc.AddFields("ecs_container_blkio", totalStatMap, iotags, tm)
+}
diff --git a/plugins/inputs/ecs/stats_test.go b/plugins/inputs/ecs/stats_test.go
new file mode 100644
index 0000000000000..04632ac61c208
--- /dev/null
+++ b/plugins/inputs/ecs/stats_test.go
@@ -0,0 +1,226 @@
+package ecs
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func Test_metastats(t *testing.T) {
+ var mockAcc testutil.Accumulator
+
+ tags := map[string]string{
+ "test_tag": "test",
+ }
+ tm := time.Now()
+
+ metastats(nginxStatsKey, validMeta.Containers[1], &mockAcc, tags, tm)
+ mockAcc.AssertContainsTaggedFields(
+ t,
+ "ecs_container_meta",
+ map[string]interface{}{
+ "container_id": nginxStatsKey,
+ "docker_name": "ecs-nginx-2-nginx",
+ "image": "nginx:alpine",
+ "image_id": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "desired_status": "RUNNING",
+ "known_status": "RUNNING",
+ "limit_cpu": float64(0),
+ "limit_mem": float64(0),
+ "created_at": metaCreated,
+ "started_at": metaStarted,
+ "type": "NORMAL",
+ },
+ tags,
+ )
+}
+
+func Test_memstats(t *testing.T) {
+ var mockAcc testutil.Accumulator
+
+ tags := map[string]string{
+ "test_tag": "test",
+ }
+ tm := time.Now()
+
+ memstats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm)
+ mockAcc.AssertContainsTaggedFields(
+ t,
+ "ecs_container_mem",
+ map[string]interface{}{
+ "active_anon": uint64(1597440),
+ "active_file": uint64(1462272),
+ "cache": uint64(5787648),
+ "container_id": nginxStatsKey,
+ "hierarchical_memory_limit": uint64(536870912),
+ "inactive_anon": uint64(4096),
+ "inactive_file": uint64(4321280),
+ "limit": uint64(1033658368),
+ "mapped_file": uint64(3616768),
+ "max_usage": uint64(8667136),
+ "pgmajfault": uint64(40),
+ "pgpgin": uint64(3477),
+ "pgpgout": uint64(1674),
+ "pgfault": uint64(2924),
+ "rss": uint64(1597440),
+ "total_active_anon": uint64(1597440),
+ "total_active_file": uint64(1462272),
+ "total_cache": uint64(5787648),
+ "total_inactive_anon": uint64(4096),
+ "total_inactive_file": uint64(4321280),
+ "total_mapped_file": uint64(3616768),
+ "total_pgfault": uint64(2924),
+ "total_pgpgout": uint64(1674),
+ "total_pgpgin": uint64(3477),
+ "total_rss": uint64(1597440),
+ "usage": uint64(2392064),
+ "usage_percent": float64(0.23141727228778164),
+ },
+ map[string]string{
+ "test_tag": "test",
+ },
+ )
+}
+
+func Test_cpustats(t *testing.T) {
+ var mockAcc testutil.Accumulator
+
+ tags := map[string]string{
+ "test_tag": "test",
+ }
+ tm := time.Now()
+
+ cpustats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm)
+ mockAcc.AssertContainsTaggedFields(
+ t,
+ "ecs_container_cpu",
+ map[string]interface{}{
+ "container_id": nginxStatsKey,
+ "throttling_periods": uint64(0),
+ "throttling_throttled_periods": uint64(0),
+ "throttling_throttled_time": uint64(0),
+ "usage_in_usermode": uint64(40000000),
+ "usage_in_kernelmode": uint64(10000000),
+ "usage_percent": float64(0),
+ "usage_system": uint64(2336100000000),
+ "usage_total": uint64(65599511),
+ },
+ map[string]string{
+ "test_tag": "test",
+ "cpu": "cpu-total",
+ },
+ )
+ mockAcc.AssertContainsTaggedFields(
+ t,
+ "ecs_container_cpu",
+ map[string]interface{}{
+ "container_id": nginxStatsKey,
+ "usage_total": uint64(65599511),
+ },
+ map[string]string{
+ "test_tag": "test",
+ "cpu": "cpu0",
+ },
+ )
+}
+
+func Test_netstats(t *testing.T) {
+ var mockAcc testutil.Accumulator
+
+ tags := map[string]string{
+ "test_tag": "test",
+ }
+ tm := time.Now()
+
+ netstats(pauseStatsKey, validStats[pauseStatsKey], &mockAcc, tags, tm)
+ mockAcc.AssertContainsTaggedFields(
+ t,
+ "ecs_container_net",
+ map[string]interface{}{
+ "container_id": pauseStatsKey,
+ "rx_bytes": uint64(5338),
+ "rx_dropped": uint64(0),
+ "rx_errors": uint64(0),
+ "rx_packets": uint64(36),
+ "tx_bytes": uint64(648),
+ "tx_dropped": uint64(0),
+ "tx_errors": uint64(0),
+ "tx_packets": uint64(8),
+ },
+ map[string]string{
+ "test_tag": "test",
+ "network": "eth0",
+ },
+ )
+ mockAcc.AssertContainsTaggedFields(
+ t,
+ "ecs_container_net",
+ map[string]interface{}{
+ "container_id": pauseStatsKey,
+ "rx_bytes": uint64(4641),
+ "rx_dropped": uint64(0),
+ "rx_errors": uint64(0),
+ "rx_packets": uint64(26),
+ "tx_bytes": uint64(690),
+ "tx_dropped": uint64(0),
+ "tx_errors": uint64(0),
+ "tx_packets": uint64(9),
+ },
+ map[string]string{
+ "test_tag": "test",
+ "network": "eth5",
+ },
+ )
+ mockAcc.AssertContainsTaggedFields(
+ t,
+ "ecs_container_net",
+ map[string]interface{}{
+ "container_id": pauseStatsKey,
+ "rx_bytes": uint64(9979),
+ "rx_dropped": uint64(0),
+ "rx_errors": uint64(0),
+ "rx_packets": uint64(62),
+ "tx_bytes": uint64(1338),
+ "tx_dropped": uint64(0),
+ "tx_errors": uint64(0),
+ "tx_packets": uint64(17),
+ },
+ map[string]string{
+ "test_tag": "test",
+ "network": "total",
+ },
+ )
+}
+
+func Test_blkstats(t *testing.T) {
+ var mockAcc testutil.Accumulator
+
+ tags := map[string]string{
+ "test_tag": "test",
+ }
+ tm := time.Now()
+
+ blkstats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm)
+ mockAcc.AssertContainsTaggedFields(
+ t,
+ "ecs_container_blkio",
+ map[string]interface{}{
+ "container_id": nginxStatsKey,
+ "io_service_bytes_recursive_read": uint64(5730304),
+ "io_service_bytes_recursive_write": uint64(0),
+ "io_service_bytes_recursive_sync": uint64(5730304),
+ "io_service_bytes_recursive_async": uint64(0),
+ "io_service_bytes_recursive_total": uint64(5730304),
+ "io_serviced_recursive_read": uint64(156),
+ "io_serviced_recursive_write": uint64(0),
+ "io_serviced_recursive_sync": uint64(156),
+ "io_serviced_recursive_async": uint64(0),
+ "io_serviced_recursive_total": uint64(156),
+ },
+ map[string]string{
+ "test_tag": "test",
+ "device": "202:26368",
+ },
+ )
+}
diff --git a/plugins/inputs/ecs/testdata/metadata.golden b/plugins/inputs/ecs/testdata/metadata.golden
new file mode 100644
index 0000000000000..6823d7e5e95d7
--- /dev/null
+++ b/plugins/inputs/ecs/testdata/metadata.golden
@@ -0,0 +1,78 @@
+{
+ "Cluster": "test",
+ "TaskARN": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a",
+ "Family": "nginx",
+ "Revision": "2",
+ "DesiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "Containers": [
+ {
+ "DockerId": "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",
+ "Name": "~internal~ecs~pause",
+ "DockerName": "ecs-nginx-2-internalecspause",
+ "Image": "amazon/amazon-ecs-pause:0.1.0",
+ "ImageID": "",
+ "Labels": {
+ "com.amazonaws.ecs.cluster": "test",
+ "com.amazonaws.ecs.container-name": "~internal~ecs~pause",
+ "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a",
+ "com.amazonaws.ecs.task-definition-family": "nginx",
+ "com.amazonaws.ecs.task-definition-version": "2"
+ },
+ "DesiredStatus": "RESOURCES_PROVISIONED",
+ "KnownStatus": "RESOURCES_PROVISIONED",
+ "Limits": {
+ "CPU": 0,
+ "Memory": 0
+ },
+ "CreatedAt": "2018-11-19T15:31:26.641964373Z",
+ "StartedAt": "2018-11-19T15:31:27.035698679Z",
+ "Type": "CNI_PAUSE",
+ "Networks": [
+ {
+ "NetworkMode": "awsvpc",
+ "IPv4Addresses": [
+ "172.31.25.181"
+ ]
+ }
+ ]
+ },
+ {
+ "DockerId": "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299",
+ "Name": "nginx",
+ "DockerName": "ecs-nginx-2-nginx",
+ "Image": "nginx:alpine",
+ "ImageID": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "Labels": {
+ "com.amazonaws.ecs.cluster": "test",
+ "com.amazonaws.ecs.container-name": "nginx",
+ "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a",
+ "com.amazonaws.ecs.task-definition-family": "nginx",
+ "com.amazonaws.ecs.task-definition-version": "2"
+ },
+ "DesiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "Limits": {
+ "CPU": 0,
+ "Memory": 0
+ },
+ "CreatedAt": "2018-11-19T15:31:27.614884084Z",
+ "StartedAt": "2018-11-19T15:31:27.975996351Z",
+ "Type": "NORMAL",
+ "Networks": [
+ {
+ "NetworkMode": "awsvpc",
+ "IPv4Addresses": [
+ "172.31.25.181"
+ ]
+ }
+ ]
+ }
+ ],
+ "Limits": {
+ "CPU": 0.5,
+ "Memory": 512
+ },
+ "PullStartedAt": "2018-11-19T15:31:27.197327103Z",
+ "PullStoppedAt": "2018-11-19T15:31:27.609089471Z"
+}
\ No newline at end of file
diff --git a/plugins/inputs/ecs/testdata/stats.golden b/plugins/inputs/ecs/testdata/stats.golden
new file mode 100644
index 0000000000000..791f4f0b3b8a3
--- /dev/null
+++ b/plugins/inputs/ecs/testdata/stats.golden
@@ -0,0 +1,663 @@
+{
+ "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba": {
+ "read": "2018-11-19T15:40:00.936081344Z",
+ "preread": "2018-11-19T15:39:59.933000984Z",
+ "num_procs": 0,
+ "pids_stats": {},
+ "networks": {
+ "eth0": {
+ "rx_bytes": 5338,
+ "rx_dropped": 0,
+ "rx_errors": 0,
+ "rx_packets": 36,
+ "tx_bytes": 648,
+ "tx_dropped": 0,
+ "tx_errors": 0,
+ "tx_packets": 8
+ },
+ "eth5": {
+ "rx_bytes": 4641,
+ "rx_dropped": 0,
+ "rx_errors": 0,
+ "rx_packets": 26,
+ "tx_bytes": 690,
+ "tx_dropped": 0,
+ "tx_errors": 0,
+ "tx_packets": 9
+ }
+ },
+ "memory_stats": {
+ "stats": {
+ "cache": 790528,
+ "mapped_file": 618496,
+ "total_inactive_file": 782336,
+ "pgpgout": 1040,
+ "rss": 40960,
+ "total_mapped_file": 618496,
+ "pgpgin": 1243,
+ "pgmajfault": 6,
+ "total_rss": 40960,
+ "hierarchical_memory_limit": 536870912,
+ "total_pgfault": 1298,
+ "total_active_file": 8192,
+ "active_anon": 40960,
+ "total_active_anon": 40960,
+ "total_pgpgout": 1040,
+ "total_cache": 790528,
+ "active_file": 8192,
+ "pgfault": 1298,
+ "inactive_file": 782336,
+ "total_pgpgin": 1243,
+ "hierarchical_memsw_limit": 9223372036854772000
+ },
+ "max_usage": 4825088,
+ "usage": 1343488,
+ "limit": 1033658368
+ },
+ "blkio_stats": {
+ "io_service_bytes_recursive": [
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Read",
+ "value": 790528
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Write"
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Sync",
+ "value": 790528
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Async"
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Total",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Read",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Sync",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Total",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Read",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Sync",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Total",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Read",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Sync",
+ "value": 790528
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Total",
+ "value": 790528
+ }
+ ],
+ "io_serviced_recursive": [
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Read",
+ "value": 10
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Write"
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Sync",
+ "value": 10
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Async"
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Total",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Read",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Sync",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Total",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Read",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Sync",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Total",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Read",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Sync",
+ "value": 10
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 4,
+ "op": "Total",
+ "value": 10
+ }
+ ]
+ },
+ "cpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 26426156,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ "usage_in_usermode": 20000000,
+ "total_usage": 26426156
+ },
+ "system_cpu_usage": 2336100000000,
+ "online_cpus": 1,
+ "throttling_data": {}
+ },
+ "precpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 26426156,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ "usage_in_usermode": 20000000,
+ "total_usage": 26426156
+ },
+ "system_cpu_usage": 2335090000000,
+ "online_cpus": 1,
+ "throttling_data": {}
+ },
+ "storage_stats": {}
+ },
+ "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299": {
+ "read": "2018-11-19T15:40:00.93733207Z",
+ "preread": "2018-11-19T15:39:59.934291009Z",
+ "num_procs": 0,
+ "pids_stats": {},
+ "network": {},
+ "memory_stats": {
+ "stats": {
+ "cache": 5787648,
+ "mapped_file": 3616768,
+ "total_inactive_file": 4321280,
+ "pgpgout": 1674,
+ "rss": 1597440,
+ "total_mapped_file": 3616768,
+ "pgpgin": 3477,
+ "pgmajfault": 40,
+ "total_rss": 1597440,
+ "total_inactive_anon": 4096,
+ "hierarchical_memory_limit": 536870912,
+ "total_pgfault": 2924,
+ "total_active_file": 1462272,
+ "active_anon": 1597440,
+ "total_active_anon": 1597440,
+ "total_pgpgout": 1674,
+ "total_cache": 5787648,
+ "inactive_anon": 4096,
+ "active_file": 1462272,
+ "pgfault": 2924,
+ "inactive_file": 4321280,
+ "total_pgpgin": 3477,
+ "hierarchical_memsw_limit": 9223372036854772000
+ },
+ "max_usage": 8667136,
+ "usage": 8179712,
+ "limit": 1033658368
+ },
+ "blkio_stats": {
+ "io_service_bytes_recursive": [
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Read",
+ "value": 5730304
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Write"
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Sync",
+ "value": 5730304
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Async"
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Total",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Read",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Sync",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Total",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Read",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Sync",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Total",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Read",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Sync",
+ "value": 5730304
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Total",
+ "value": 5730304
+ }
+ ],
+ "io_serviced_recursive": [
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Read",
+ "value": 156
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Write"
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Sync",
+ "value": 156
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Async"
+ },
+ {
+ "major": 202,
+ "minor": 26368,
+ "op": "Total",
+ "value": 156
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Read",
+ "value": 156
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Sync",
+ "value": 156
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 1,
+ "op": "Total",
+ "value": 156
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Read",
+ "value": 156
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Sync",
+ "value": 156
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 2,
+ "op": "Total",
+ "value": 156
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Read",
+ "value": 147
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Write"
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Sync",
+ "value": 147
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Async"
+ },
+ {
+ "major": 253,
+ "minor": 5,
+ "op": "Total",
+ "value": 147
+ }
+ ]
+ },
+ "cpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 65599511,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ "usage_in_usermode": 40000000,
+ "total_usage": 65599511,
+ "usage_in_kernelmode": 10000000
+ },
+ "system_cpu_usage": 2336100000000,
+ "online_cpus": 1,
+ "throttling_data": {}
+ },
+ "precpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 65599511,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ "usage_in_usermode": 40000000,
+ "total_usage": 65599511,
+ "usage_in_kernelmode": 10000000
+ },
+ "system_cpu_usage": 2335090000000,
+ "online_cpus": 1,
+ "throttling_data": {}
+ },
+ "storage_stats": {}
+ }
+}
diff --git a/plugins/inputs/ecs/types.go b/plugins/inputs/ecs/types.go
new file mode 100644
index 0000000000000..0b9b402f6e8d7
--- /dev/null
+++ b/plugins/inputs/ecs/types.go
@@ -0,0 +1,75 @@
+package ecs
+
+import (
+ "encoding/json"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types"
+)
+
+// Task is the ECS task representation
+type Task struct {
+ Cluster string
+ TaskARN string
+ Family string
+ Revision string
+ DesiredStatus string
+ KnownStatus string
+ Containers []Container
+ Limits map[string]float64
+ PullStartedAt time.Time
+ PullStoppedAt time.Time
+}
+
+// Container is the ECS metadata container representation
+type Container struct {
+ ID string `json:"DockerId"`
+ Name string
+ DockerName string
+ Image string
+ ImageID string
+ Labels map[string]string
+ DesiredStatus string
+ KnownStatus string
+ Limits map[string]float64
+ CreatedAt time.Time
+ StartedAt time.Time
+ Stats types.StatsJSON
+ Type string
+ Networks []Network
+}
+
+// Network is a docker network configuration
+type Network struct {
+ NetworkMode string
+ IPv4Addresses []string
+}
+
+func unmarshalTask(r io.Reader) (*Task, error) {
+ task := &Task{}
+ err := json.NewDecoder(r).Decode(task)
+ return task, err
+}
+
+// docker parsers
+func unmarshalStats(r io.Reader) (map[string]types.StatsJSON, error) {
+ var statsMap map[string]types.StatsJSON
+ err := json.NewDecoder(r).Decode(&statsMap)
+ return statsMap, err
+}
+
+// interleaves Stats in to the Container objects in the Task
+func mergeTaskStats(task *Task, stats map[string]types.StatsJSON) {
+ for i, c := range task.Containers {
+ if strings.Trim(c.ID, " ") == "" {
+ continue
+ }
+ stat, ok := stats[c.ID]
+ if !ok {
+ continue
+ }
+ task.Containers[i].Stats = stat
+ }
+}
diff --git a/plugins/inputs/ecs/types_test.go b/plugins/inputs/ecs/types_test.go
new file mode 100644
index 0000000000000..e68e9711e6468
--- /dev/null
+++ b/plugins/inputs/ecs/types_test.go
@@ -0,0 +1,47 @@
+package ecs
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func Test_parseTask(t *testing.T) {
+ r, err := os.Open("testdata/metadata.golden")
+ require.NoError(t, err)
+
+ parsed, err := unmarshalTask(r)
+ require.NoError(t, err)
+
+ require.Equal(t, validMeta, *parsed)
+}
+
+func Test_parseStats(t *testing.T) {
+ r, err := os.Open("testdata/stats.golden")
+ require.NoError(t, err)
+
+ parsed, err := unmarshalStats(r)
+ require.NoError(t, err)
+ require.Equal(t, validStats, parsed)
+}
+
+func Test_mergeTaskStats(t *testing.T) {
+ metadata, err := os.Open("testdata/metadata.golden")
+ require.NoError(t, err)
+
+ parsedMetadata, err := unmarshalTask(metadata)
+ require.NoError(t, err)
+
+ stats, err := os.Open("testdata/stats.golden")
+ require.NoError(t, err)
+
+ parsedStats, err := unmarshalStats(stats)
+ require.NoError(t, err)
+
+ mergeTaskStats(parsedMetadata, parsedStats)
+
+ for _, cont := range parsedMetadata.Containers {
+ require.Equal(t, validStats[cont.ID], cont.Stats)
+ }
+}
diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md
index d8e43da3884d6..54285c3b9e8e3 100644
--- a/plugins/inputs/elasticsearch/README.md
+++ b/plugins/inputs/elasticsearch/README.md
@@ -1,15 +1,32 @@
-# Elasticsearch input plugin
+# Elasticsearch Input Plugin
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
-[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
-and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html)
-or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics.
+[Node Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
+and optionally
+[Cluster-Health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html)
+metrics.
-### Configuration:
+In addition, the following optional queries are only made by the master node:
+ [Cluster Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html)
+ [Indices Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html)
+ [Shard Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html)
-```
+Specific Elasticsearch endpoints that are queried:
+- Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' configuration setting
+- Cluster Heath: /_cluster/health?level=indices
+- Cluster Stats: /_cluster/stats
+- Indices Stats: /_all/_stats
+- Shard Stats: /_all/_stats?level=shards
+
+Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level.
+
+### Configuration
+
+```toml
[[inputs.elasticsearch]]
## specify a list of one or more Elasticsearch servers
+ ## you can add username and password to your url to use basic authentication:
+ ## servers = ["http://user:pass@localhost:9200"]
servers = ["http://localhost:9200"]
## Timeout for HTTP requests to the elastic search server(s)
@@ -17,29 +34,40 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
## When local is true (the default), the node will read only its own stats.
## Set local to false when you want to read the node stats from all nodes
- ## of the cluster.
+ ## of the cluster.
local = true
- ## Set cluster_health to true when you want to also obtain cluster health stats
+ ## Set cluster_health to true when you want to obtain cluster health stats
cluster_health = false
- ## Adjust cluster_health_level when you want to also obtain detailed health stats
+ ## Adjust cluster_health_level when you want to obtain detailed health stats
## The options are
## - indices (default)
## - cluster
# cluster_health_level = "indices"
- ## Set cluster_stats to true when you want to also obtain cluster stats.
+ ## Set cluster_stats to true when you want to obtain cluster stats.
cluster_stats = false
## Only gather cluster_stats from the master node. To work this require local = true
cluster_stats_only_from_master = true
+ ## Indices to collect; can be one or more indices names or _all
+ indices_include = ["_all"]
+
+ ## One of "shards", "cluster", "indices"
+ ## Currently only "shards" is implemented
+ indices_level = "shards"
+
## node_stats is a list of sub-stats that you want to have gathered. Valid options
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
## "breaker". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
+ ## HTTP Basic Authentication username and password.
+ # username = ""
+ # password = ""
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
@@ -48,306 +76,777 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
# insecure_skip_verify = false
```
-### Status mappings
-
-When reporting health (green/yellow/red), additional field `status_code`
-is reported. Field contains mapping from status:string to status_code:int
-with following rules:
-
-* `green` - 1
-* `yellow` - 2
-* `red` - 3
-* `unknown` - 0
-
-### Measurements & Fields:
-
-field data circuit breaker measurement names:
-- elasticsearch_breakers
- - fielddata_estimated_size_in_bytes value=0
- - fielddata_overhead value=1.03
- - fielddata_tripped value=0
- - fielddata_limit_size_in_bytes value=623326003
- - request_estimated_size_in_bytes value=0
- - request_overhead value=1.0
- - request_tripped value=0
- - request_limit_size_in_bytes value=415550668
- - parent_overhead value=1.0
- - parent_tripped value=0
- - parent_limit_size_in_bytes value=727213670
- - parent_estimated_size_in_bytes value=0
-
-File system information, data path, free disk space, read/write measurement names:
+### Metrics
+
+Emitted when `cluster_health = true`:
+
+- elasticsearch_cluster_health
+ - tags:
+ - name
+ - fields:
+ - active_primary_shards (integer)
+ - active_shards (integer)
+ - active_shards_percent_as_number (float)
+ - delayed_unassigned_shards (integer)
+ - initializing_shards (integer)
+ - number_of_data_nodes (integer)
+ - number_of_in_flight_fetch (integer)
+ - number_of_nodes (integer)
+ - number_of_pending_tasks (integer)
+ - relocating_shards (integer)
+ - status (string, one of green, yellow or red)
+ - status_code (integer, green = 1, yellow = 2, red = 3),
+ - task_max_waiting_in_queue_millis (integer)
+ - timed_out (boolean)
+ - unassigned_shards (integer)
+
+Emitted when `cluster_health = true` and `cluster_health_level = "indices"`:
+
+- elasticsearch_cluster_health_indices
+ - tags:
+ - index
+ - name
+ - fields:
+ - active_primary_shards (integer)
+ - active_shards (integer)
+ - initializing_shards (integer)
+ - number_of_replicas (integer)
+ - number_of_shards (integer)
+ - relocating_shards (integer)
+ - status (string, one of green, yellow or red)
+ - status_code (integer, green = 1, yellow = 2, red = 3),
+ - unassigned_shards (integer)
+
+Emitted when `cluster_stats = true`:
+
+- elasticsearch_clusterstats_indices
+ - tags:
+ - cluster_name
+ - node_name
+ - status
+ - fields:
+ - completion_size_in_bytes (float)
+ - count (float)
+ - docs_count (float)
+ - docs_deleted (float)
+ - fielddata_evictions (float)
+ - fielddata_memory_size_in_bytes (float)
+ - query_cache_cache_count (float)
+ - query_cache_cache_size (float)
+ - query_cache_evictions (float)
+ - query_cache_hit_count (float)
+ - query_cache_memory_size_in_bytes (float)
+ - query_cache_miss_count (float)
+ - query_cache_total_count (float)
+ - segments_count (float)
+ - segments_doc_values_memory_in_bytes (float)
+ - segments_fixed_bit_set_memory_in_bytes (float)
+ - segments_index_writer_memory_in_bytes (float)
+ - segments_max_unsafe_auto_id_timestamp (float)
+ - segments_memory_in_bytes (float)
+ - segments_norms_memory_in_bytes (float)
+ - segments_points_memory_in_bytes (float)
+ - segments_stored_fields_memory_in_bytes (float)
+ - segments_term_vectors_memory_in_bytes (float)
+ - segments_terms_memory_in_bytes (float)
+ - segments_version_map_memory_in_bytes (float)
+ - shards_index_primaries_avg (float)
+ - shards_index_primaries_max (float)
+ - shards_index_primaries_min (float)
+ - shards_index_replication_avg (float)
+ - shards_index_replication_max (float)
+ - shards_index_replication_min (float)
+ - shards_index_shards_avg (float)
+ - shards_index_shards_max (float)
+ - shards_index_shards_min (float)
+ - shards_primaries (float)
+ - shards_replication (float)
+ - shards_total (float)
+ - store_size_in_bytes (float)
+
++ elasticsearch_clusterstats_nodes
+ - tags:
+ - cluster_name
+ - node_name
+ - status
+ - fields:
+ - count_coordinating_only (float)
+ - count_data (float)
+ - count_ingest (float)
+ - count_master (float)
+ - count_total (float)
+ - fs_available_in_bytes (float)
+ - fs_free_in_bytes (float)
+ - fs_total_in_bytes (float)
+ - jvm_max_uptime_in_millis (float)
+ - jvm_mem_heap_max_in_bytes (float)
+ - jvm_mem_heap_used_in_bytes (float)
+ - jvm_threads (float)
+ - jvm_versions_0_count (float)
+ - jvm_versions_0_version (string)
+ - jvm_versions_0_vm_name (string)
+ - jvm_versions_0_vm_vendor (string)
+ - jvm_versions_0_vm_version (string)
+ - network_types_http_types_security4 (float)
+ - network_types_transport_types_security4 (float)
+ - os_allocated_processors (float)
+ - os_available_processors (float)
+ - os_mem_free_in_bytes (float)
+ - os_mem_free_percent (float)
+ - os_mem_total_in_bytes (float)
+ - os_mem_used_in_bytes (float)
+ - os_mem_used_percent (float)
+ - os_names_0_count (float)
+ - os_names_0_name (string)
+ - os_pretty_names_0_count (float)
+ - os_pretty_names_0_pretty_name (string)
+ - process_cpu_percent (float)
+ - process_open_file_descriptors_avg (float)
+ - process_open_file_descriptors_max (float)
+ - process_open_file_descriptors_min (float)
+ - versions_0 (string)
+
+Emitted when the appropriate `node_stats` options are set.
+
+- elasticsearch_transport
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - rx_count (float)
+ - rx_size_in_bytes (float)
+ - server_open (float)
+ - tx_count (float)
+ - tx_size_in_bytes (float)
+
++ elasticsearch_breakers
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - accounting_estimated_size_in_bytes (float)
+ - accounting_limit_size_in_bytes (float)
+ - accounting_overhead (float)
+ - accounting_tripped (float)
+ - fielddata_estimated_size_in_bytes (float)
+ - fielddata_limit_size_in_bytes (float)
+ - fielddata_overhead (float)
+ - fielddata_tripped (float)
+ - in_flight_requests_estimated_size_in_bytes (float)
+ - in_flight_requests_limit_size_in_bytes (float)
+ - in_flight_requests_overhead (float)
+ - in_flight_requests_tripped (float)
+ - parent_estimated_size_in_bytes (float)
+ - parent_limit_size_in_bytes (float)
+ - parent_overhead (float)
+ - parent_tripped (float)
+ - request_estimated_size_in_bytes (float)
+ - request_limit_size_in_bytes (float)
+ - request_overhead (float)
+ - request_tripped (float)
+
- elasticsearch_fs
- - timestamp value=1436460392946
- - total_free_in_bytes value=16909316096
- - total_available_in_bytes value=15894814720
- - total_total_in_bytes value=19507089408
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - data_0_available_in_bytes (float)
+ - data_0_free_in_bytes (float)
+ - data_0_total_in_bytes (float)
+ - io_stats_devices_0_operations (float)
+ - io_stats_devices_0_read_kilobytes (float)
+ - io_stats_devices_0_read_operations (float)
+ - io_stats_devices_0_write_kilobytes (float)
+ - io_stats_devices_0_write_operations (float)
+ - io_stats_total_operations (float)
+ - io_stats_total_read_kilobytes (float)
+ - io_stats_total_read_operations (float)
+ - io_stats_total_write_kilobytes (float)
+ - io_stats_total_write_operations (float)
+ - timestamp (float)
+ - total_available_in_bytes (float)
+ - total_free_in_bytes (float)
+ - total_total_in_bytes (float)
+
++ elasticsearch_http
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - current_open (float)
+ - total_opened (float)
-indices size, document count, indexing and deletion times, search times,
-field cache size, merges and flushes measurement names:
- elasticsearch_indices
- - id_cache_memory_size_in_bytes value=0
- - completion_size_in_bytes value=0
- - suggest_total value=0
- - suggest_time_in_millis value=0
- - suggest_current value=0
- - query_cache_memory_size_in_bytes value=0
- - query_cache_evictions value=0
- - query_cache_hit_count value=0
- - query_cache_miss_count value=0
- - store_size_in_bytes value=37715234
- - store_throttle_time_in_millis value=215
- - merges_current_docs value=0
- - merges_current_size_in_bytes value=0
- - merges_total value=133
- - merges_total_time_in_millis value=21060
- - merges_total_docs value=203672
- - merges_total_size_in_bytes value=142900226
- - merges_current value=0
- - filter_cache_memory_size_in_bytes value=7384
- - filter_cache_evictions value=0
- - indexing_index_total value=84790
- - indexing_index_time_in_millis value=29680
- - indexing_index_current value=0
- - indexing_noop_update_total value=0
- - indexing_throttle_time_in_millis value=0
- - indexing_delete_tota value=13879
- - indexing_delete_time_in_millis value=1139
- - indexing_delete_current value=0
- - get_exists_time_in_millis value=0
- - get_missing_total value=1
- - get_missing_time_in_millis value=2
- - get_current value=0
- - get_total value=1
- - get_time_in_millis value=2
- - get_exists_total value=0
- - refresh_total value=1076
- - refresh_total_time_in_millis value=20078
- - percolate_current value=0
- - percolate_memory_size_in_bytes value=-1
- - percolate_queries value=0
- - percolate_total value=0
- - percolate_time_in_millis value=0
- - translog_operations value=17702
- - translog_size_in_bytes value=17
- - recovery_current_as_source value=0
- - recovery_current_as_target value=0
- - recovery_throttle_time_in_millis value=0
- - docs_count value=29652
- - docs_deleted value=5229
- - flush_total_time_in_millis value=2401
- - flush_total value=115
- - fielddata_memory_size_in_bytes value=12996
- - fielddata_evictions value=0
- - search_fetch_current value=0
- - search_open_contexts value=0
- - search_query_total value=1452
- - search_query_time_in_millis value=5695
- - search_query_current value=0
- - search_fetch_total value=414
- - search_fetch_time_in_millis value=146
- - warmer_current value=0
- - warmer_total value=2319
- - warmer_total_time_in_millis value=448
- - segments_count value=134
- - segments_memory_in_bytes value=1285212
- - segments_index_writer_memory_in_bytes value=0
- - segments_index_writer_max_memory_in_bytes value=172368955
- - segments_version_map_memory_in_bytes value=611844
- - segments_fixed_bit_set_memory_in_bytes value=0
-
-HTTP connection measurement names:
-- elasticsearch_http
- - current_open value=3
- - total_opened value=3
-
-JVM stats, memory pool information, garbage collection, buffer pools measurement names:
-- elasticsearch_jvm
- - timestamp value=1436460392945
- - uptime_in_millis value=202245
- - mem_non_heap_used_in_bytes value=39634576
- - mem_non_heap_committed_in_bytes value=40841216
- - mem_pools_young_max_in_bytes value=279183360
- - mem_pools_young_peak_used_in_bytes value=71630848
- - mem_pools_young_peak_max_in_bytes value=279183360
- - mem_pools_young_used_in_bytes value=32685760
- - mem_pools_survivor_peak_used_in_bytes value=8912888
- - mem_pools_survivor_peak_max_in_bytes value=34865152
- - mem_pools_survivor_used_in_bytes value=8912880
- - mem_pools_survivor_max_in_bytes value=34865152
- - mem_pools_old_peak_max_in_bytes value=724828160
- - mem_pools_old_used_in_bytes value=11110928
- - mem_pools_old_max_in_bytes value=724828160
- - mem_pools_old_peak_used_in_bytes value=14354608
- - mem_heap_used_in_bytes value=52709568
- - mem_heap_used_percent value=5
- - mem_heap_committed_in_bytes value=259522560
- - mem_heap_max_in_bytes value=1038876672
- - threads_peak_count value=45
- - threads_count value=44
- - gc_collectors_young_collection_count value=2
- - gc_collectors_young_collection_time_in_millis value=98
- - gc_collectors_old_collection_count value=1
- - gc_collectors_old_collection_time_in_millis value=24
- - buffer_pools_direct_count value=40
- - buffer_pools_direct_used_in_bytes value=6304239
- - buffer_pools_direct_total_capacity_in_bytes value=6304239
- - buffer_pools_mapped_count value=0
- - buffer_pools_mapped_used_in_bytes value=0
- - buffer_pools_mapped_total_capacity_in_bytes value=0
-
-TCP information measurement names:
-- elasticsearch_network
- - tcp_in_errs value=0
- - tcp_passive_opens value=16
- - tcp_curr_estab value=29
- - tcp_in_segs value=113
- - tcp_out_segs value=97
- - tcp_retrans_segs value=0
- - tcp_attempt_fails value=0
- - tcp_active_opens value=13
- - tcp_estab_resets value=0
- - tcp_out_rsts value=0
-
-Operating system stats, load average, cpu, mem, swap measurement names:
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - completion_size_in_bytes (float)
+ - docs_count (float)
+ - docs_deleted (float)
+ - fielddata_evictions (float)
+ - fielddata_memory_size_in_bytes (float)
+ - flush_periodic (float)
+ - flush_total (float)
+ - flush_total_time_in_millis (float)
+ - get_current (float)
+ - get_exists_time_in_millis (float)
+ - get_exists_total (float)
+ - get_missing_time_in_millis (float)
+ - get_missing_total (float)
+ - get_time_in_millis (float)
+ - get_total (float)
+ - indexing_delete_current (float)
+ - indexing_delete_time_in_millis (float)
+ - indexing_delete_total (float)
+ - indexing_index_current (float)
+ - indexing_index_failed (float)
+ - indexing_index_time_in_millis (float)
+ - indexing_index_total (float)
+ - indexing_noop_update_total (float)
+ - indexing_throttle_time_in_millis (float)
+ - merges_current (float)
+ - merges_current_docs (float)
+ - merges_current_size_in_bytes (float)
+ - merges_total (float)
+ - merges_total_auto_throttle_in_bytes (float)
+ - merges_total_docs (float)
+ - merges_total_size_in_bytes (float)
+ - merges_total_stopped_time_in_millis (float)
+ - merges_total_throttled_time_in_millis (float)
+ - merges_total_time_in_millis (float)
+ - query_cache_cache_count (float)
+ - query_cache_cache_size (float)
+ - query_cache_evictions (float)
+ - query_cache_hit_count (float)
+ - query_cache_memory_size_in_bytes (float)
+ - query_cache_miss_count (float)
+ - query_cache_total_count (float)
+ - recovery_current_as_source (float)
+ - recovery_current_as_target (float)
+ - recovery_throttle_time_in_millis (float)
+ - refresh_listeners (float)
+ - refresh_total (float)
+ - refresh_total_time_in_millis (float)
+ - request_cache_evictions (float)
+ - request_cache_hit_count (float)
+ - request_cache_memory_size_in_bytes (float)
+ - request_cache_miss_count (float)
+ - search_fetch_current (float)
+ - search_fetch_time_in_millis (float)
+ - search_fetch_total (float)
+ - search_open_contexts (float)
+ - search_query_current (float)
+ - search_query_time_in_millis (float)
+ - search_query_total (float)
+ - search_scroll_current (float)
+ - search_scroll_time_in_millis (float)
+ - search_scroll_total (float)
+ - search_suggest_current (float)
+ - search_suggest_time_in_millis (float)
+ - search_suggest_total (float)
+ - segments_count (float)
+ - segments_doc_values_memory_in_bytes (float)
+ - segments_fixed_bit_set_memory_in_bytes (float)
+ - segments_index_writer_memory_in_bytes (float)
+ - segments_max_unsafe_auto_id_timestamp (float)
+ - segments_memory_in_bytes (float)
+ - segments_norms_memory_in_bytes (float)
+ - segments_points_memory_in_bytes (float)
+ - segments_stored_fields_memory_in_bytes (float)
+ - segments_term_vectors_memory_in_bytes (float)
+ - segments_terms_memory_in_bytes (float)
+ - segments_version_map_memory_in_bytes (float)
+ - store_size_in_bytes (float)
+ - translog_earliest_last_modified_age (float)
+ - translog_operations (float)
+ - translog_size_in_bytes (float)
+ - translog_uncommitted_operations (float)
+ - translog_uncommitted_size_in_bytes (float)
+ - warmer_current (float)
+ - warmer_total (float)
+ - warmer_total_time_in_millis (float)
+
++ elasticsearch_jvm
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - buffer_pools_direct_count (float)
+ - buffer_pools_direct_total_capacity_in_bytes (float)
+ - buffer_pools_direct_used_in_bytes (float)
+ - buffer_pools_mapped_count (float)
+ - buffer_pools_mapped_total_capacity_in_bytes (float)
+ - buffer_pools_mapped_used_in_bytes (float)
+ - classes_current_loaded_count (float)
+ - classes_total_loaded_count (float)
+ - classes_total_unloaded_count (float)
+ - gc_collectors_old_collection_count (float)
+ - gc_collectors_old_collection_time_in_millis (float)
+ - gc_collectors_young_collection_count (float)
+ - gc_collectors_young_collection_time_in_millis (float)
+ - mem_heap_committed_in_bytes (float)
+ - mem_heap_max_in_bytes (float)
+ - mem_heap_used_in_bytes (float)
+ - mem_heap_used_percent (float)
+ - mem_non_heap_committed_in_bytes (float)
+ - mem_non_heap_used_in_bytes (float)
+ - mem_pools_old_max_in_bytes (float)
+ - mem_pools_old_peak_max_in_bytes (float)
+ - mem_pools_old_peak_used_in_bytes (float)
+ - mem_pools_old_used_in_bytes (float)
+ - mem_pools_survivor_max_in_bytes (float)
+ - mem_pools_survivor_peak_max_in_bytes (float)
+ - mem_pools_survivor_peak_used_in_bytes (float)
+ - mem_pools_survivor_used_in_bytes (float)
+ - mem_pools_young_max_in_bytes (float)
+ - mem_pools_young_peak_max_in_bytes (float)
+ - mem_pools_young_peak_used_in_bytes (float)
+ - mem_pools_young_used_in_bytes (float)
+ - threads_count (float)
+ - threads_peak_count (float)
+ - timestamp (float)
+ - uptime_in_millis (float)
+
- elasticsearch_os
- - swap_used_in_bytes value=0
- - swap_free_in_bytes value=487997440
- - timestamp value=1436460392944
- - uptime_in_millis value=25092
- - cpu_sys value=0
- - cpu_user value=0
- - cpu_idle value=99
- - cpu_usage value=0
- - cpu_stolen value=0
- - mem_free_percent value=74
- - mem_used_percent value=25
- - mem_actual_free_in_bytes value=1565470720
- - mem_actual_used_in_bytes value=534159360
- - mem_free_in_bytes value=477761536
- - mem_used_in_bytes value=1621868544
-
-Process statistics, memory consumption, cpu usage, open file descriptors measurement names:
-- elasticsearch_process
- - mem_resident_in_bytes value=246382592
- - mem_share_in_bytes value=18747392
- - mem_total_virtual_in_bytes value=4747890688
- - timestamp value=1436460392945
- - open_file_descriptors value=160
- - cpu_total_in_millis value=15480
- - cpu_percent value=2
- - cpu_sys_in_millis value=1870
- - cpu_user_in_millis value=13610
-
-Statistics about each thread pool, including current size, queue and rejected tasks measurement names:
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - cgroup_cpu_cfs_period_micros (float)
+ - cgroup_cpu_cfs_quota_micros (float)
+ - cgroup_cpu_stat_number_of_elapsed_periods (float)
+ - cgroup_cpu_stat_number_of_times_throttled (float)
+ - cgroup_cpu_stat_time_throttled_nanos (float)
+ - cgroup_cpuacct_usage_nanos (float)
+ - cpu_load_average_15m (float)
+ - cpu_load_average_1m (float)
+ - cpu_load_average_5m (float)
+ - cpu_percent (float)
+ - mem_free_in_bytes (float)
+ - mem_free_percent (float)
+ - mem_total_in_bytes (float)
+ - mem_used_in_bytes (float)
+ - mem_used_percent (float)
+ - swap_free_in_bytes (float)
+ - swap_total_in_bytes (float)
+ - swap_used_in_bytes (float)
+ - timestamp (float)
+
++ elasticsearch_process
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - cpu_percent (float)
+ - cpu_total_in_millis (float)
+ - max_file_descriptors (float)
+ - mem_total_virtual_in_bytes (float)
+ - open_file_descriptors (float)
+ - timestamp (float)
+
- elasticsearch_thread_pool
- - merge_threads value=6
- - merge_queue value=4
- - merge_active value=5
- - merge_rejected value=2
- - merge_largest value=5
- - merge_completed value=1
- - bulk_threads value=4
- - bulk_queue value=5
- - bulk_active value=7
- - bulk_rejected value=3
- - bulk_largest value=1
- - bulk_completed value=4
- - warmer_threads value=2
- - warmer_queue value=7
- - warmer_active value=3
- - warmer_rejected value=2
- - warmer_largest value=3
- - warmer_completed value=1
- - get_largest value=2
- - get_completed value=1
- - get_threads value=1
- - get_queue value=8
- - get_active value=4
- - get_rejected value=3
- - index_threads value=6
- - index_queue value=8
- - index_active value=4
- - index_rejected value=2
- - index_largest value=3
- - index_completed value=6
- - suggest_threads value=2
- - suggest_queue value=7
- - suggest_active value=2
- - suggest_rejected value=1
- - suggest_largest value=8
- - suggest_completed value=3
- - fetch_shard_store_queue value=7
- - fetch_shard_store_active value=4
- - fetch_shard_store_rejected value=2
- - fetch_shard_store_largest value=4
- - fetch_shard_store_completed value=1
- - fetch_shard_store_threads value=1
- - management_threads value=2
- - management_queue value=3
- - management_active value=1
- - management_rejected value=6
- - management_largest value=2
- - management_completed value=22
- - percolate_queue value=23
- - percolate_active value=13
- - percolate_rejected value=235
- - percolate_largest value=23
- - percolate_completed value=33
- - percolate_threads value=123
- - listener_active value=4
- - listener_rejected value=8
- - listener_largest value=1
- - listener_completed value=1
- - listener_threads value=1
- - listener_queue value=2
- - search_rejected value=7
- - search_largest value=2
- - search_completed value=4
- - search_threads value=5
- - search_queue value=7
- - search_active value=2
- - fetch_shard_started_threads value=3
- - fetch_shard_started_queue value=1
- - fetch_shard_started_active value=5
- - fetch_shard_started_rejected value=6
- - fetch_shard_started_largest value=4
- - fetch_shard_started_completed value=54
- - refresh_rejected value=4
- - refresh_largest value=8
- - refresh_completed value=3
- - refresh_threads value=23
- - refresh_queue value=7
- - refresh_active value=3
- - optimize_threads value=3
- - optimize_queue value=4
- - optimize_active value=1
- - optimize_rejected value=2
- - optimize_largest value=7
- - optimize_completed value=3
- - snapshot_largest value=1
- - snapshot_completed value=0
- - snapshot_threads value=8
- - snapshot_queue value=5
- - snapshot_active value=6
- - snapshot_rejected value=2
- - generic_threads value=1
- - generic_queue value=4
- - generic_active value=6
- - generic_rejected value=3
- - generic_largest value=2
- - generic_completed value=27
- - flush_threads value=3
- - flush_queue value=8
- - flush_active value=0
- - flush_rejected value=1
- - flush_largest value=5
- - flush_completed value=3
-
-Transport statistics about sent and received bytes in cluster communication measurement names:
-- elasticsearch_transport
- - server_open value=13
- - rx_count value=6
- - rx_size_in_bytes value=1380
- - tx_count value=6
- - tx_size_in_bytes value=1380
+ - tags:
+ - cluster_name
+ - node_attribute_ml.enabled
+ - node_attribute_ml.machine_memory
+ - node_attribute_ml.max_open_jobs
+ - node_attribute_xpack.installed
+ - node_host
+ - node_id
+ - node_name
+ - fields:
+ - analyze_active (float)
+ - analyze_completed (float)
+ - analyze_largest (float)
+ - analyze_queue (float)
+ - analyze_rejected (float)
+ - analyze_threads (float)
+ - ccr_active (float)
+ - ccr_completed (float)
+ - ccr_largest (float)
+ - ccr_queue (float)
+ - ccr_rejected (float)
+ - ccr_threads (float)
+ - fetch_shard_started_active (float)
+ - fetch_shard_started_completed (float)
+ - fetch_shard_started_largest (float)
+ - fetch_shard_started_queue (float)
+ - fetch_shard_started_rejected (float)
+ - fetch_shard_started_threads (float)
+ - fetch_shard_store_active (float)
+ - fetch_shard_store_completed (float)
+ - fetch_shard_store_largest (float)
+ - fetch_shard_store_queue (float)
+ - fetch_shard_store_rejected (float)
+ - fetch_shard_store_threads (float)
+ - flush_active (float)
+ - flush_completed (float)
+ - flush_largest (float)
+ - flush_queue (float)
+ - flush_rejected (float)
+ - flush_threads (float)
+ - force_merge_active (float)
+ - force_merge_completed (float)
+ - force_merge_largest (float)
+ - force_merge_queue (float)
+ - force_merge_rejected (float)
+ - force_merge_threads (float)
+ - generic_active (float)
+ - generic_completed (float)
+ - generic_largest (float)
+ - generic_queue (float)
+ - generic_rejected (float)
+ - generic_threads (float)
+ - get_active (float)
+ - get_completed (float)
+ - get_largest (float)
+ - get_queue (float)
+ - get_rejected (float)
+ - get_threads (float)
+ - index_active (float)
+ - index_completed (float)
+ - index_largest (float)
+ - index_queue (float)
+ - index_rejected (float)
+ - index_threads (float)
+ - listener_active (float)
+ - listener_completed (float)
+ - listener_largest (float)
+ - listener_queue (float)
+ - listener_rejected (float)
+ - listener_threads (float)
+ - management_active (float)
+ - management_completed (float)
+ - management_largest (float)
+ - management_queue (float)
+ - management_rejected (float)
+ - management_threads (float)
+ - ml_autodetect_active (float)
+ - ml_autodetect_completed (float)
+ - ml_autodetect_largest (float)
+ - ml_autodetect_queue (float)
+ - ml_autodetect_rejected (float)
+ - ml_autodetect_threads (float)
+ - ml_datafeed_active (float)
+ - ml_datafeed_completed (float)
+ - ml_datafeed_largest (float)
+ - ml_datafeed_queue (float)
+ - ml_datafeed_rejected (float)
+ - ml_datafeed_threads (float)
+ - ml_utility_active (float)
+ - ml_utility_completed (float)
+ - ml_utility_largest (float)
+ - ml_utility_queue (float)
+ - ml_utility_rejected (float)
+ - ml_utility_threads (float)
+ - refresh_active (float)
+ - refresh_completed (float)
+ - refresh_largest (float)
+ - refresh_queue (float)
+ - refresh_rejected (float)
+ - refresh_threads (float)
+ - rollup_indexing_active (float)
+ - rollup_indexing_completed (float)
+ - rollup_indexing_largest (float)
+ - rollup_indexing_queue (float)
+ - rollup_indexing_rejected (float)
+ - rollup_indexing_threads (float)
+ - search_active (float)
+ - search_completed (float)
+ - search_largest (float)
+ - search_queue (float)
+ - search_rejected (float)
+ - search_threads (float)
+ - search_throttled_active (float)
+ - search_throttled_completed (float)
+ - search_throttled_largest (float)
+ - search_throttled_queue (float)
+ - search_throttled_rejected (float)
+ - search_throttled_threads (float)
+ - security-token-key_active (float)
+ - security-token-key_completed (float)
+ - security-token-key_largest (float)
+ - security-token-key_queue (float)
+ - security-token-key_rejected (float)
+ - security-token-key_threads (float)
+ - snapshot_active (float)
+ - snapshot_completed (float)
+ - snapshot_largest (float)
+ - snapshot_queue (float)
+ - snapshot_rejected (float)
+ - snapshot_threads (float)
+ - warmer_active (float)
+ - warmer_completed (float)
+ - warmer_largest (float)
+ - warmer_queue (float)
+ - warmer_rejected (float)
+ - warmer_threads (float)
+ - watcher_active (float)
+ - watcher_completed (float)
+ - watcher_largest (float)
+ - watcher_queue (float)
+ - watcher_rejected (float)
+ - watcher_threads (float)
+ - write_active (float)
+ - write_completed (float)
+ - write_largest (float)
+ - write_queue (float)
+ - write_rejected (float)
+ - write_threads (float)
+
+Emitted when the appropriate `indices_stats` options are set.
+
+- elasticsearch_indices_stats_(primaries|total)
+ - tags:
+ - index_name
+ - fields:
+ - completion_size_in_bytes (float)
+ - docs_count (float)
+ - docs_deleted (float)
+ - fielddata_evictions (float)
+ - fielddata_memory_size_in_bytes (float)
+ - flush_periodic (float)
+ - flush_total (float)
+ - flush_total_time_in_millis (float)
+ - get_current (float)
+ - get_exists_time_in_millis (float)
+ - get_exists_total (float)
+ - get_missing_time_in_millis (float)
+ - get_missing_total (float)
+ - get_time_in_millis (float)
+ - get_total (float)
+ - indexing_delete_current (float)
+ - indexing_delete_time_in_millis (float)
+ - indexing_delete_total (float)
+ - indexing_index_current (float)
+ - indexing_index_failed (float)
+ - indexing_index_time_in_millis (float)
+ - indexing_index_total (float)
+ - indexing_is_throttled (float)
+ - indexing_noop_update_total (float)
+ - indexing_throttle_time_in_millis (float)
+ - merges_current (float)
+ - merges_current_docs (float)
+ - merges_current_size_in_bytes (float)
+ - merges_total (float)
+ - merges_total_auto_throttle_in_bytes (float)
+ - merges_total_docs (float)
+ - merges_total_size_in_bytes (float)
+ - merges_total_stopped_time_in_millis (float)
+ - merges_total_throttled_time_in_millis (float)
+ - merges_total_time_in_millis (float)
+ - query_cache_cache_count (float)
+ - query_cache_cache_size (float)
+ - query_cache_evictions (float)
+ - query_cache_hit_count (float)
+ - query_cache_memory_size_in_bytes (float)
+ - query_cache_miss_count (float)
+ - query_cache_total_count (float)
+ - recovery_current_as_source (float)
+ - recovery_current_as_target (float)
+ - recovery_throttle_time_in_millis (float)
+ - refresh_external_total (float)
+ - refresh_external_total_time_in_millis (float)
+ - refresh_listeners (float)
+ - refresh_total (float)
+ - refresh_total_time_in_millis (float)
+ - request_cache_evictions (float)
+ - request_cache_hit_count (float)
+ - request_cache_memory_size_in_bytes (float)
+ - request_cache_miss_count (float)
+ - search_fetch_current (float)
+ - search_fetch_time_in_millis (float)
+ - search_fetch_total (float)
+ - search_open_contexts (float)
+ - search_query_current (float)
+ - search_query_time_in_millis (float)
+ - search_query_total (float)
+ - search_scroll_current (float)
+ - search_scroll_time_in_millis (float)
+ - search_scroll_total (float)
+ - search_suggest_current (float)
+ - search_suggest_time_in_millis (float)
+ - search_suggest_total (float)
+ - segments_count (float)
+ - segments_doc_values_memory_in_bytes (float)
+ - segments_fixed_bit_set_memory_in_bytes (float)
+ - segments_index_writer_memory_in_bytes (float)
+ - segments_max_unsafe_auto_id_timestamp (float)
+ - segments_memory_in_bytes (float)
+ - segments_norms_memory_in_bytes (float)
+ - segments_points_memory_in_bytes (float)
+ - segments_stored_fields_memory_in_bytes (float)
+ - segments_term_vectors_memory_in_bytes (float)
+ - segments_terms_memory_in_bytes (float)
+ - segments_version_map_memory_in_bytes (float)
+ - store_size_in_bytes (float)
+ - translog_earliest_last_modified_age (float)
+ - translog_operations (float)
+ - translog_size_in_bytes (float)
+ - translog_uncommitted_operations (float)
+ - translog_uncommitted_size_in_bytes (float)
+ - warmer_current (float)
+ - warmer_total (float)
+ - warmer_total_time_in_millis (float)
+
+Emitted when the appropriate `shards_stats` options are set.
+
+- elasticsearch_indices_stats_shards_total
+ - fields:
+ - failed (float)
+ - successful (float)
+ - total (float)
+
+- elasticsearch_indices_stats_shards
+ - tags:
+ - index_name
+ - node_name
+ - shard_name
+ - type
+ - fields:
+ - commit_generation (float)
+ - commit_num_docs (float)
+ - completion_size_in_bytes (float)
+ - docs_count (float)
+ - docs_deleted (float)
+ - fielddata_evictions (float)
+ - fielddata_memory_size_in_bytes (float)
+ - flush_periodic (float)
+ - flush_total (float)
+ - flush_total_time_in_millis (float)
+ - get_current (float)
+ - get_exists_time_in_millis (float)
+ - get_exists_total (float)
+ - get_missing_time_in_millis (float)
+ - get_missing_total (float)
+ - get_time_in_millis (float)
+ - get_total (float)
+ - indexing_delete_current (float)
+ - indexing_delete_time_in_millis (float)
+ - indexing_delete_total (float)
+ - indexing_index_current (float)
+ - indexing_index_failed (float)
+ - indexing_index_time_in_millis (float)
+ - indexing_index_total (float)
+ - indexing_is_throttled (bool)
+ - indexing_noop_update_total (float)
+ - indexing_throttle_time_in_millis (float)
+ - merges_current (float)
+ - merges_current_docs (float)
+ - merges_current_size_in_bytes (float)
+ - merges_total (float)
+ - merges_total_auto_throttle_in_bytes (float)
+ - merges_total_docs (float)
+ - merges_total_size_in_bytes (float)
+ - merges_total_stopped_time_in_millis (float)
+ - merges_total_throttled_time_in_millis (float)
+ - merges_total_time_in_millis (float)
+ - query_cache_cache_count (float)
+ - query_cache_cache_size (float)
+ - query_cache_evictions (float)
+ - query_cache_hit_count (float)
+ - query_cache_memory_size_in_bytes (float)
+ - query_cache_miss_count (float)
+ - query_cache_total_count (float)
+ - recovery_current_as_source (float)
+ - recovery_current_as_target (float)
+ - recovery_throttle_time_in_millis (float)
+ - refresh_external_total (float)
+ - refresh_external_total_time_in_millis (float)
+ - refresh_listeners (float)
+ - refresh_total (float)
+ - refresh_total_time_in_millis (float)
+ - request_cache_evictions (float)
+ - request_cache_hit_count (float)
+ - request_cache_memory_size_in_bytes (float)
+ - request_cache_miss_count (float)
+ - retention_leases_primary_term (float)
+ - retention_leases_version (float)
+ - routing_state (int) (UNASSIGNED = 1, INITIALIZING = 2, STARTED = 3, RELOCATING = 4, other = 0)
+ - search_fetch_current (float)
+ - search_fetch_time_in_millis (float)
+ - search_fetch_total (float)
+ - search_open_contexts (float)
+ - search_query_current (float)
+ - search_query_time_in_millis (float)
+ - search_query_total (float)
+ - search_scroll_current (float)
+ - search_scroll_time_in_millis (float)
+ - search_scroll_total (float)
+ - search_suggest_current (float)
+ - search_suggest_time_in_millis (float)
+ - search_suggest_total (float)
+ - segments_count (float)
+ - segments_doc_values_memory_in_bytes (float)
+ - segments_fixed_bit_set_memory_in_bytes (float)
+ - segments_index_writer_memory_in_bytes (float)
+ - segments_max_unsafe_auto_id_timestamp (float)
+ - segments_memory_in_bytes (float)
+ - segments_norms_memory_in_bytes (float)
+ - segments_points_memory_in_bytes (float)
+ - segments_stored_fields_memory_in_bytes (float)
+ - segments_term_vectors_memory_in_bytes (float)
+ - segments_terms_memory_in_bytes (float)
+ - segments_version_map_memory_in_bytes (float)
+ - seq_no_global_checkpoint (float)
+ - seq_no_local_checkpoint (float)
+ - seq_no_max_seq_no (float)
+ - shard_path_is_custom_data_path (bool)
+ - store_size_in_bytes (float)
+ - translog_earliest_last_modified_age (float)
+ - translog_operations (float)
+ - translog_size_in_bytes (float)
+ - translog_uncommitted_operations (float)
+ - translog_uncommitted_size_in_bytes (float)
+ - warmer_current (float)
+ - warmer_total (float)
+ - warmer_total_time_in_millis (float)
diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go
index 479bfcfdadefc..b6dfd2a81b11f 100644
--- a/plugins/inputs/elasticsearch/elasticsearch.go
+++ b/plugins/inputs/elasticsearch/elasticsearch.go
@@ -6,13 +6,14 @@ import (
"io/ioutil"
"net/http"
"regexp"
+ "sort"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
)
@@ -27,6 +28,7 @@ const statsPathLocal = "/_nodes/_local/stats"
type nodeStat struct {
Host string `json:"host"`
Name string `json:"name"`
+ Roles []string `json:"roles"`
Attributes map[string]string `json:"attributes"`
Indices interface{} `json:"indices"`
OS interface{} `json:"os"`
@@ -40,30 +42,32 @@ type nodeStat struct {
}
type clusterHealth struct {
- ClusterName string `json:"cluster_name"`
- Status string `json:"status"`
- TimedOut bool `json:"timed_out"`
- NumberOfNodes int `json:"number_of_nodes"`
- NumberOfDataNodes int `json:"number_of_data_nodes"`
ActivePrimaryShards int `json:"active_primary_shards"`
ActiveShards int `json:"active_shards"`
- RelocatingShards int `json:"relocating_shards"`
+ ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
+ ClusterName string `json:"cluster_name"`
+ DelayedUnassignedShards int `json:"delayed_unassigned_shards"`
InitializingShards int `json:"initializing_shards"`
- UnassignedShards int `json:"unassigned_shards"`
+ NumberOfDataNodes int `json:"number_of_data_nodes"`
+ NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"`
+ NumberOfNodes int `json:"number_of_nodes"`
NumberOfPendingTasks int `json:"number_of_pending_tasks"`
+ RelocatingShards int `json:"relocating_shards"`
+ Status string `json:"status"`
TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"`
- ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
+ TimedOut bool `json:"timed_out"`
+ UnassignedShards int `json:"unassigned_shards"`
Indices map[string]indexHealth `json:"indices"`
}
type indexHealth struct {
- Status string `json:"status"`
- NumberOfShards int `json:"number_of_shards"`
- NumberOfReplicas int `json:"number_of_replicas"`
ActivePrimaryShards int `json:"active_primary_shards"`
ActiveShards int `json:"active_shards"`
- RelocatingShards int `json:"relocating_shards"`
InitializingShards int `json:"initializing_shards"`
+ NumberOfReplicas int `json:"number_of_replicas"`
+ NumberOfShards int `json:"number_of_shards"`
+ RelocatingShards int `json:"relocating_shards"`
+ Status string `json:"status"`
UnassignedShards int `json:"unassigned_shards"`
}
@@ -75,10 +79,10 @@ type clusterStats struct {
Nodes interface{} `json:"nodes"`
}
-type catMaster struct {
- NodeID string `json:"id"`
- NodeIP string `json:"ip"`
- NodeName string `json:"node"`
+type indexStat struct {
+ Primaries interface{} `json:"primaries"`
+ Total interface{} `json:"total"`
+ Shards map[string][]interface{} `json:"shards"`
}
const sampleConfig = `
@@ -110,11 +114,21 @@ const sampleConfig = `
## Only gather cluster_stats from the master node. To work this require local = true
cluster_stats_only_from_master = true
+ ## Indices to collect; can be one or more indices names or _all
+ indices_include = ["_all"]
+
+ ## One of "shards", "cluster", "indices"
+ indices_level = "shards"
+
## node_stats is a list of sub-stats that you want to have gathered. Valid options
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
## "breaker". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
+ ## HTTP Basic Authentication username and password.
+ # username = ""
+ # password = ""
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
@@ -126,25 +140,37 @@ const sampleConfig = `
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
// servers.
type Elasticsearch struct {
- Local bool
- Servers []string
- HttpTimeout internal.Duration
- ClusterHealth bool
- ClusterHealthLevel string
- ClusterStats bool
- ClusterStatsOnlyFromMaster bool
- NodeStats []string
+ Local bool `toml:"local"`
+ Servers []string `toml:"servers"`
+ HTTPTimeout internal.Duration `toml:"http_timeout"`
+ ClusterHealth bool `toml:"cluster_health"`
+ ClusterHealthLevel string `toml:"cluster_health_level"`
+ ClusterStats bool `toml:"cluster_stats"`
+ ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"`
+ IndicesInclude []string `toml:"indices_include"`
+ IndicesLevel string `toml:"indices_level"`
+ NodeStats []string `toml:"node_stats"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
tls.ClientConfig
- client *http.Client
- catMasterResponseTokens []string
- isMaster bool
+ client *http.Client
+ serverInfo map[string]serverInfo
+ serverInfoMutex sync.Mutex
+}
+type serverInfo struct {
+ nodeID string
+ masterID string
+}
+
+func (i serverInfo) isMaster() bool {
+ return i.nodeID == i.masterID
}
// NewElasticsearch return a new instance of Elasticsearch
func NewElasticsearch() *Elasticsearch {
return &Elasticsearch{
- HttpTimeout: internal.Duration{Duration: time.Second * 5},
+ HTTPTimeout: internal.Duration{Duration: time.Second * 5},
ClusterStatsOnlyFromMaster: true,
ClusterHealthLevel: "indices",
}
@@ -163,6 +189,21 @@ func mapHealthStatusToCode(s string) int {
return 0
}
+// perform shard status mapping
+func mapShardStatusToCode(s string) int {
+ switch strings.ToUpper(s) {
+ case "UNASSIGNED":
+ return 1
+ case "INITIALIZING":
+ return 2
+ case "STARTED":
+ return 3
+ case "RELOCATING":
+ return 4
+ }
+ return 0
+}
+
// SampleConfig returns sample configuration for this plugin.
func (e *Elasticsearch) SampleConfig() string {
return sampleConfig
@@ -177,7 +218,7 @@ func (e *Elasticsearch) Description() string {
// Accumulator.
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
if e.client == nil {
- client, err := e.createHttpClient()
+ client, err := e.createHTTPClient()
if err != nil {
return err
@@ -185,25 +226,49 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
e.client = client
}
- var wg sync.WaitGroup
- wg.Add(len(e.Servers))
+ if e.ClusterStats || len(e.IndicesInclude) > 0 || len(e.IndicesLevel) > 0 {
+ var wgC sync.WaitGroup
+ wgC.Add(len(e.Servers))
- for _, serv := range e.Servers {
- go func(s string, acc telegraf.Accumulator) {
- defer wg.Done()
- url := e.nodeStatsUrl(s)
- e.isMaster = false
+ e.serverInfo = make(map[string]serverInfo)
+ for _, serv := range e.Servers {
+ go func(s string, acc telegraf.Accumulator) {
+ defer wgC.Done()
+ info := serverInfo{}
+
+ var err error
+
+ // Gather node ID
+ if info.nodeID, err = e.gatherNodeID(s + "/_nodes/_local/name"); err != nil {
+ acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
+ return
+ }
- if e.ClusterStats {
// get cat/master information here so NodeStats can determine
// whether this node is the Master
- if err := e.setCatMaster(s + "/_cat/master"); err != nil {
+ if info.masterID, err = e.getCatMaster(s + "/_cat/master"); err != nil {
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
return
}
- }
- // Always gather node states
+ e.serverInfoMutex.Lock()
+ e.serverInfo[s] = info
+ e.serverInfoMutex.Unlock()
+
+ }(serv, acc)
+ }
+ wgC.Wait()
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(len(e.Servers))
+
+ for _, serv := range e.Servers {
+ go func(s string, acc telegraf.Accumulator) {
+ defer wg.Done()
+ url := e.nodeStatsURL(s)
+
+ // Always gather node stats
if err := e.gatherNodeStats(url, acc); err != nil {
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
return
@@ -220,12 +285,26 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
}
}
- if e.ClusterStats && (e.isMaster || !e.ClusterStatsOnlyFromMaster || !e.Local) {
+ if e.ClusterStats && (e.serverInfo[s].isMaster() || !e.ClusterStatsOnlyFromMaster || !e.Local) {
if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil {
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
return
}
}
+
+ if len(e.IndicesInclude) > 0 && (e.serverInfo[s].isMaster() || !e.ClusterStatsOnlyFromMaster || !e.Local) {
+ if e.IndicesLevel != "shards" {
+ if err := e.gatherIndicesStats(s+"/"+strings.Join(e.IndicesInclude, ",")+"/_stats", acc); err != nil {
+ acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
+ return
+ }
+ } else {
+ if err := e.gatherIndicesStats(s+"/"+strings.Join(e.IndicesInclude, ",")+"/_stats?level=shards", acc); err != nil {
+ acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
+ return
+ }
+ }
+ }
}(serv, acc)
}
@@ -233,30 +312,30 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
return nil
}
-func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
+func (e *Elasticsearch) createHTTPClient() (*http.Client, error) {
tlsCfg, err := e.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
tr := &http.Transport{
- ResponseHeaderTimeout: e.HttpTimeout.Duration,
+ ResponseHeaderTimeout: e.HTTPTimeout.Duration,
TLSClientConfig: tlsCfg,
}
client := &http.Client{
Transport: tr,
- Timeout: e.HttpTimeout.Duration,
+ Timeout: e.HTTPTimeout.Duration,
}
return client, nil
}
-func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string {
+func (e *Elasticsearch) nodeStatsURL(baseURL string) string {
var url string
if e.Local {
- url = baseUrl + statsPathLocal
+ url = baseURL + statsPathLocal
} else {
- url = baseUrl + statsPath
+ url = baseURL + statsPath
}
if len(e.NodeStats) == 0 {
@@ -266,26 +345,39 @@ func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string {
return fmt.Sprintf("%s/%s", url, strings.Join(e.NodeStats, ","))
}
+func (e *Elasticsearch) gatherNodeID(url string) (string, error) {
+ nodeStats := &struct {
+ ClusterName string `json:"cluster_name"`
+ Nodes map[string]*nodeStat `json:"nodes"`
+ }{}
+ if err := e.gatherJSONData(url, nodeStats); err != nil {
+ return "", err
+ }
+
+ // Only 1 should be returned
+ for id := range nodeStats.Nodes {
+ return id, nil
+ }
+ return "", nil
+}
+
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
nodeStats := &struct {
ClusterName string `json:"cluster_name"`
Nodes map[string]*nodeStat `json:"nodes"`
}{}
- if err := e.gatherJsonData(url, nodeStats); err != nil {
+ if err := e.gatherJSONData(url, nodeStats); err != nil {
return err
}
for id, n := range nodeStats.Nodes {
+ sort.Strings(n.Roles)
tags := map[string]string{
"node_id": id,
"node_host": n.Host,
"node_name": n.Name,
"cluster_name": nodeStats.ClusterName,
- }
-
- if e.ClusterStats {
- // check for master
- e.isMaster = (id == e.catMasterResponseTokens[0])
+ "node_roles": strings.Join(n.Roles, ","),
}
for k, v := range n.Attributes {
@@ -325,24 +417,26 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator) error {
healthStats := &clusterHealth{}
- if err := e.gatherJsonData(url, healthStats); err != nil {
+ if err := e.gatherJSONData(url, healthStats); err != nil {
return err
}
measurementTime := time.Now()
clusterFields := map[string]interface{}{
- "status": healthStats.Status,
- "status_code": mapHealthStatusToCode(healthStats.Status),
- "timed_out": healthStats.TimedOut,
- "number_of_nodes": healthStats.NumberOfNodes,
- "number_of_data_nodes": healthStats.NumberOfDataNodes,
"active_primary_shards": healthStats.ActivePrimaryShards,
"active_shards": healthStats.ActiveShards,
- "relocating_shards": healthStats.RelocatingShards,
+ "active_shards_percent_as_number": healthStats.ActiveShardsPercentAsNumber,
+ "delayed_unassigned_shards": healthStats.DelayedUnassignedShards,
"initializing_shards": healthStats.InitializingShards,
- "unassigned_shards": healthStats.UnassignedShards,
+ "number_of_data_nodes": healthStats.NumberOfDataNodes,
+ "number_of_in_flight_fetch": healthStats.NumberOfInFlightFetch,
+ "number_of_nodes": healthStats.NumberOfNodes,
"number_of_pending_tasks": healthStats.NumberOfPendingTasks,
+ "relocating_shards": healthStats.RelocatingShards,
+ "status": healthStats.Status,
+ "status_code": mapHealthStatusToCode(healthStats.Status),
"task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis,
- "active_shards_percent_as_number": healthStats.ActiveShardsPercentAsNumber,
+ "timed_out": healthStats.TimedOut,
+ "unassigned_shards": healthStats.UnassignedShards,
}
acc.AddFields(
"elasticsearch_cluster_health",
@@ -353,20 +447,20 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator
for name, health := range healthStats.Indices {
indexFields := map[string]interface{}{
- "status": health.Status,
- "status_code": mapHealthStatusToCode(health.Status),
- "number_of_shards": health.NumberOfShards,
- "number_of_replicas": health.NumberOfReplicas,
"active_primary_shards": health.ActivePrimaryShards,
"active_shards": health.ActiveShards,
- "relocating_shards": health.RelocatingShards,
"initializing_shards": health.InitializingShards,
+ "number_of_replicas": health.NumberOfReplicas,
+ "number_of_shards": health.NumberOfShards,
+ "relocating_shards": health.RelocatingShards,
+ "status": health.Status,
+ "status_code": mapHealthStatusToCode(health.Status),
"unassigned_shards": health.UnassignedShards,
}
acc.AddFields(
- "elasticsearch_indices",
+ "elasticsearch_cluster_health_indices",
indexFields,
- map[string]string{"index": name},
+ map[string]string{"index": name, "name": healthStats.ClusterName},
measurementTime,
)
}
@@ -375,7 +469,7 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
clusterStats := &clusterStats{}
- if err := e.gatherJsonData(url, clusterStats); err != nil {
+ if err := e.gatherJSONData(url, clusterStats); err != nil {
return err
}
now := time.Now()
@@ -403,31 +497,146 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator)
return nil
}
-func (e *Elasticsearch) setCatMaster(url string) error {
- r, err := e.client.Get(url)
- if err != nil {
+func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) error {
+ indicesStats := &struct {
+ Shards map[string]interface{} `json:"_shards"`
+ All map[string]interface{} `json:"_all"`
+ Indices map[string]indexStat `json:"indices"`
+ }{}
+
+ if err := e.gatherJSONData(url, indicesStats); err != nil {
return err
}
+ now := time.Now()
+
+ // Total Shards Stats
+ shardsStats := map[string]interface{}{}
+ for k, v := range indicesStats.Shards {
+ shardsStats[k] = v
+ }
+ acc.AddFields("elasticsearch_indices_stats_shards_total", shardsStats, map[string]string{}, now)
+
+ // All Stats
+ for m, s := range indicesStats.All {
+ // parse Json, ignoring strings and bools
+ jsonParser := jsonparser.JSONFlattener{}
+ err := jsonParser.FullFlattenJSON("_", s, true, true)
+ if err != nil {
+ return err
+ }
+ acc.AddFields("elasticsearch_indices_stats_"+m, jsonParser.Fields, map[string]string{"index_name": "_all"}, now)
+ }
+
+ // Individual Indices stats
+ for id, index := range indicesStats.Indices {
+ indexTag := map[string]string{"index_name": id}
+ stats := map[string]interface{}{
+ "primaries": index.Primaries,
+ "total": index.Total,
+ }
+ for m, s := range stats {
+ f := jsonparser.JSONFlattener{}
+ // parse Json, getting strings and bools
+ err := f.FullFlattenJSON("", s, true, true)
+ if err != nil {
+ return err
+ }
+ acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now)
+ }
+
+ if e.IndicesLevel == "shards" {
+ for shardNumber, shards := range index.Shards {
+ for _, shard := range shards {
+
+ // Get Shard Stats
+ flattened := jsonparser.JSONFlattener{}
+ err := flattened.FullFlattenJSON("", shard, true, true)
+ if err != nil {
+ return err
+ }
+
+ // determine shard tag and primary/replica designation
+ shardType := "replica"
+ if flattened.Fields["routing_primary"] == true {
+ shardType = "primary"
+ }
+ delete(flattened.Fields, "routing_primary")
+
+ routingState, ok := flattened.Fields["routing_state"].(string)
+ if ok {
+ flattened.Fields["routing_state"] = mapShardStatusToCode(routingState)
+ }
+
+ routingNode, _ := flattened.Fields["routing_node"].(string)
+ shardTags := map[string]string{
+ "index_name": id,
+ "node_id": routingNode,
+ "shard_name": string(shardNumber),
+ "type": shardType,
+ }
+
+ for key, field := range flattened.Fields {
+ switch field.(type) {
+ case string, bool:
+ delete(flattened.Fields, key)
+ }
+ }
+
+ acc.AddFields("elasticsearch_indices_stats_shards",
+ flattened.Fields,
+ shardTags,
+ now)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (e *Elasticsearch) getCatMaster(url string) (string, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return "", err
+ }
+
+ if e.Username != "" || e.Password != "" {
+ req.SetBasicAuth(e.Username, e.Password)
+ }
+
+ r, err := e.client.Do(req)
+ if err != nil {
+ return "", err
+ }
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
// to let the underlying transport close the connection and re-establish a new one for
// future calls.
- return fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK)
+ return "", fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK)
}
response, err := ioutil.ReadAll(r.Body)
if err != nil {
- return err
+ return "", err
}
- e.catMasterResponseTokens = strings.Split(string(response), " ")
+ masterID := strings.Split(string(response), " ")[0]
- return nil
+ return masterID, nil
}
-func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error {
- r, err := e.client.Get(url)
+func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ if e.Username != "" || e.Password != "" {
+ req.SetBasicAuth(e.Username, e.Password)
+ }
+
+ r, err := e.client.Do(req)
if err != nil {
return err
}
diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go
index 1616bfeb23e7e..ad91c898a1a5c 100644
--- a/plugins/inputs/elasticsearch/elasticsearch_test.go
+++ b/plugins/inputs/elasticsearch/elasticsearch_test.go
@@ -9,6 +9,7 @@ import (
"github.com/influxdata/telegraf/testutil"
"fmt"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -20,8 +21,12 @@ func defaultTags() map[string]string {
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
"node_name": "test.host.com",
"node_host": "test",
+ "node_roles": "data,ingest,master",
}
}
+func defaultServerInfo() serverInfo {
+ return serverInfo{nodeID: "", masterID: "SDFsfSDFsdfFSDSDfSFDSDF"}
+}
type transportMock struct {
statusCode int
@@ -49,8 +54,8 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
func (t *transportMock) CancelRequest(_ *http.Request) {
}
-func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) {
- if es.isMaster != expected {
+func checkIsMaster(es *Elasticsearch, server string, expected bool, t *testing.T) {
+ if es.serverInfo[server].isMaster() != expected {
msg := fmt.Sprintf("IsMaster set incorrectly")
assert.Fail(t, msg)
}
@@ -65,7 +70,7 @@ func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
- acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags)
+ acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHTTPExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
}
@@ -73,13 +78,15 @@ func TestGather(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = defaultServerInfo()
var acc testutil.Accumulator
if err := acc.GatherError(es.Gather); err != nil {
t.Fatal(err)
}
- checkIsMaster(es, false, t)
+ checkIsMaster(es, es.Servers[0], false, t)
checkNodeStatsResult(t, &acc)
}
@@ -88,13 +95,15 @@ func TestGatherIndividualStats(t *testing.T) {
es.Servers = []string{"http://example.com:9200"}
es.NodeStats = []string{"jvm", "process"}
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess)
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = defaultServerInfo()
var acc testutil.Accumulator
if err := acc.GatherError(es.Gather); err != nil {
t.Fatal(err)
}
- checkIsMaster(es, false, t)
+ checkIsMaster(es, es.Servers[0], false, t)
tags := defaultTags()
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
@@ -104,7 +113,7 @@ func TestGatherIndividualStats(t *testing.T) {
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
- acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags)
+ acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHTTPExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
}
@@ -112,13 +121,15 @@ func TestGatherNodeStats(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = defaultServerInfo()
var acc testutil.Accumulator
if err := es.gatherNodeStats("junk", &acc); err != nil {
t.Fatal(err)
}
- checkIsMaster(es, false, t)
+ checkIsMaster(es, es.Servers[0], false, t)
checkNodeStatsResult(t, &acc)
}
@@ -128,21 +139,23 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) {
es.ClusterHealth = true
es.ClusterHealthLevel = ""
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = defaultServerInfo()
var acc testutil.Accumulator
require.NoError(t, es.gatherClusterHealth("junk", &acc))
- checkIsMaster(es, false, t)
+ checkIsMaster(es, es.Servers[0], false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
- acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
+ acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
v1IndexExpected,
map[string]string{"index": "v1"})
- acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
+ acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
v2IndexExpected,
map[string]string{"index": "v2"})
}
@@ -153,21 +166,23 @@ func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) {
es.ClusterHealth = true
es.ClusterHealthLevel = "cluster"
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = defaultServerInfo()
var acc testutil.Accumulator
require.NoError(t, es.gatherClusterHealth("junk", &acc))
- checkIsMaster(es, false, t)
+ checkIsMaster(es, es.Servers[0], false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
- acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
+ acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
v1IndexExpected,
map[string]string{"index": "v1"})
- acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
+ acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
v2IndexExpected,
map[string]string{"index": "v2"})
}
@@ -178,23 +193,25 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) {
es.ClusterHealth = true
es.ClusterHealthLevel = "indices"
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices)
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = defaultServerInfo()
var acc testutil.Accumulator
require.NoError(t, es.gatherClusterHealth("junk", &acc))
- checkIsMaster(es, false, t)
+ checkIsMaster(es, es.Servers[0], false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
- acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
+ acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
v1IndexExpected,
- map[string]string{"index": "v1"})
+ map[string]string{"index": "v1", "name": "elasticsearch_telegraf"})
- acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
+ acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
v2IndexExpected,
- map[string]string{"index": "v2"})
+ map[string]string{"index": "v2", "name": "elasticsearch_telegraf"})
}
func TestGatherClusterStatsMaster(t *testing.T) {
@@ -202,13 +219,18 @@ func TestGatherClusterStatsMaster(t *testing.T) {
es := newElasticsearchWithClient()
es.ClusterStats = true
es.Servers = []string{"http://example.com:9200"}
+ es.serverInfo = make(map[string]serverInfo)
+ info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""}
// first get catMaster
es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult)
- require.NoError(t, es.setCatMaster("junk"))
+ masterID, err := es.getCatMaster("junk")
+ require.NoError(t, err)
+ info.masterID = masterID
+ es.serverInfo["http://example.com:9200"] = info
IsMasterResultTokens := strings.Split(string(IsMasterResult), " ")
- if es.catMasterResponseTokens[0] != IsMasterResultTokens[0] {
+ if masterID != IsMasterResultTokens[0] {
msg := fmt.Sprintf("catmaster is incorrect")
assert.Fail(t, msg)
}
@@ -221,7 +243,7 @@ func TestGatherClusterStatsMaster(t *testing.T) {
t.Fatal(err)
}
- checkIsMaster(es, true, t)
+ checkIsMaster(es, es.Servers[0], true, t)
checkNodeStatsResult(t, &acc)
// now test the clusterstats method
@@ -243,13 +265,16 @@ func TestGatherClusterStatsNonMaster(t *testing.T) {
es := newElasticsearchWithClient()
es.ClusterStats = true
es.Servers = []string{"http://example.com:9200"}
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""}
// first get catMaster
es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult)
- require.NoError(t, es.setCatMaster("junk"))
+ masterID, err := es.getCatMaster("junk")
+ require.NoError(t, err)
IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ")
- if es.catMasterResponseTokens[0] != IsNotMasterResultTokens[0] {
+ if masterID != IsNotMasterResultTokens[0] {
msg := fmt.Sprintf("catmaster is incorrect")
assert.Fail(t, msg)
}
@@ -263,10 +288,67 @@ func TestGatherClusterStatsNonMaster(t *testing.T) {
}
// ensure flag is clear so Cluster Stats would not be done
- checkIsMaster(es, false, t)
+ checkIsMaster(es, es.Servers[0], false, t)
checkNodeStatsResult(t, &acc)
}
+func TestGatherClusterIndicesStats(t *testing.T) {
+ es := newElasticsearchWithClient()
+ es.IndicesInclude = []string{"_all"}
+ es.Servers = []string{"http://example.com:9200"}
+ es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesResponse)
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = defaultServerInfo()
+
+ var acc testutil.Accumulator
+ if err := es.gatherIndicesStats("junk", &acc); err != nil {
+ t.Fatal(err)
+ }
+
+ acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
+ clusterIndicesExpected,
+ map[string]string{"index_name": "twitter"})
+}
+
+func TestGatherClusterIndiceShardsStats(t *testing.T) {
+ es := newElasticsearchWithClient()
+ es.IndicesLevel = "shards"
+ es.Servers = []string{"http://example.com:9200"}
+ es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesShardsResponse)
+ es.serverInfo = make(map[string]serverInfo)
+ es.serverInfo["http://example.com:9200"] = defaultServerInfo()
+
+ var acc testutil.Accumulator
+ if err := es.gatherIndicesStats("junk", &acc); err != nil {
+ t.Fatal(err)
+ }
+
+ acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
+ clusterIndicesExpected,
+ map[string]string{"index_name": "twitter"})
+
+ primaryTags := map[string]string{
+ "index_name": "twitter",
+ "node_id": "oqvR8I1dTpONvwRM30etww",
+ "shard_name": "0",
+ "type": "primary",
+ }
+
+ acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards",
+ clusterIndicesPrimaryShardsExpected,
+ primaryTags)
+
+ replicaTags := map[string]string{
+ "index_name": "twitter",
+ "node_id": "oqvR8I1dTpONvwRM30etww",
+ "shard_name": "1",
+ "type": "replica",
+ }
+ acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards",
+ clusterIndicesReplicaShardsExpected,
+ replicaTags)
+}
+
func newElasticsearchWithClient() *Elasticsearch {
es := NewElasticsearch()
es.client = &http.Client{}
diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go
index 622abeaf87b44..a04fe1521e999 100644
--- a/plugins/inputs/elasticsearch/testdata_test.go
+++ b/plugins/inputs/elasticsearch/testdata_test.go
@@ -7,11 +7,13 @@ const clusterHealthResponse = `
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
+ "number_of_in_flight_fetch": 0,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
+ "delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100.0
@@ -25,11 +27,13 @@ const clusterHealthResponseWithIndices = `
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
+ "number_of_in_flight_fetch": 0,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
+ "delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100.0,
@@ -64,11 +68,13 @@ var clusterHealthExpected = map[string]interface{}{
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
+ "number_of_in_flight_fetch": 0,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
+ "delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100.0,
@@ -111,6 +117,11 @@ const nodeStatsResponse = `
"inet[/127.0.0.1:9300]",
"NONE"
],
+ "roles": [
+ "master",
+ "data",
+ "ingest"
+ ],
"attributes": {
"master": "true"
},
@@ -511,16 +522,8 @@ const nodeStatsResponse = `
"tripped": 0
}
}
- }
- }
-}
-`
-
-const nodeStatsResponseJVMProcess = `
-{
- "cluster_name": "es-testcluster",
- "nodes": {
- "SDFsfSDFsdfFSDSDfSFDSDF": {
+ },
+ "SDFsfSDFsdfFSDSDfSPOJUY": {
"timestamp": 1436365550135,
"name": "test.host.com",
"transport_address": "inet[/127.0.0.1:9300]",
@@ -532,6 +535,137 @@ const nodeStatsResponseJVMProcess = `
"attributes": {
"master": "true"
},
+ "indices": {
+ "docs": {
+ "count": 29652,
+ "deleted": 5229
+ },
+ "store": {
+ "size_in_bytes": 37715234,
+ "throttle_time_in_millis": 215
+ },
+ "indexing": {
+ "index_total": 84790,
+ "index_time_in_millis": 29680,
+ "index_current": 0,
+ "delete_total": 13879,
+ "delete_time_in_millis": 1139,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 1,
+ "time_in_millis": 2,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 1,
+ "missing_time_in_millis": 2,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 1452,
+ "query_time_in_millis": 5695,
+ "query_current": 0,
+ "fetch_total": 414,
+ "fetch_time_in_millis": 146,
+ "fetch_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 133,
+ "total_time_in_millis": 21060,
+ "total_docs": 203672,
+ "total_size_in_bytes": 142900226
+ },
+ "refresh": {
+ "total": 1076,
+ "total_time_in_millis": 20078
+ },
+ "flush": {
+ "total": 115,
+ "total_time_in_millis": 2401
+ },
+ "warmer": {
+ "current": 0,
+ "total": 2319,
+ "total_time_in_millis": 448
+ },
+ "filter_cache": {
+ "memory_size_in_bytes": 7384,
+ "evictions": 0
+ },
+ "id_cache": {
+ "memory_size_in_bytes": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 12996,
+ "evictions": 0
+ },
+ "percolate": {
+ "total": 0,
+ "time_in_millis": 0,
+ "current": 0,
+ "memory_size_in_bytes": -1,
+ "memory_size": "-1b",
+ "queries": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 134,
+ "memory_in_bytes": 1285212,
+ "index_writer_memory_in_bytes": 0,
+ "index_writer_max_memory_in_bytes": 172368955,
+ "version_map_memory_in_bytes": 611844,
+ "fixed_bit_set_memory_in_bytes": 0
+ },
+ "translog": {
+ "operations": 17702,
+ "size_in_bytes": 17
+ },
+ "suggest": {
+ "total": 0,
+ "time_in_millis": 0,
+ "current": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ },
+ "os": {
+ "timestamp": 1436460392944,
+ "load_average": [
+ 0.01,
+ 0.04,
+ 0.05
+ ],
+ "mem": {
+ "free_in_bytes": 477761536,
+ "used_in_bytes": 1621868544,
+ "free_percent": 74,
+ "used_percent": 25,
+ "actual_free_in_bytes": 1565470720,
+ "actual_used_in_bytes": 534159360
+ },
+ "swap": {
+ "used_in_bytes": 0,
+ "free_in_bytes": 487997440
+ }
+ },
"process": {
"timestamp": 1436460392945,
"open_file_descriptors": 160,
@@ -604,268 +738,560 @@ const nodeStatsResponseJVMProcess = `
"total_capacity_in_bytes": 0
}
}
- }
- }
- }
-}
-`
-
-var nodestatsIndicesExpected = map[string]interface{}{
- "id_cache_memory_size_in_bytes": float64(0),
- "completion_size_in_bytes": float64(0),
- "suggest_total": float64(0),
- "suggest_time_in_millis": float64(0),
- "suggest_current": float64(0),
- "query_cache_memory_size_in_bytes": float64(0),
- "query_cache_evictions": float64(0),
- "query_cache_hit_count": float64(0),
- "query_cache_miss_count": float64(0),
- "store_size_in_bytes": float64(37715234),
- "store_throttle_time_in_millis": float64(215),
- "merges_current_docs": float64(0),
- "merges_current_size_in_bytes": float64(0),
- "merges_total": float64(133),
- "merges_total_time_in_millis": float64(21060),
- "merges_total_docs": float64(203672),
- "merges_total_size_in_bytes": float64(142900226),
- "merges_current": float64(0),
- "filter_cache_memory_size_in_bytes": float64(7384),
- "filter_cache_evictions": float64(0),
- "indexing_index_total": float64(84790),
- "indexing_index_time_in_millis": float64(29680),
- "indexing_index_current": float64(0),
- "indexing_noop_update_total": float64(0),
- "indexing_throttle_time_in_millis": float64(0),
- "indexing_delete_total": float64(13879),
- "indexing_delete_time_in_millis": float64(1139),
- "indexing_delete_current": float64(0),
- "get_exists_time_in_millis": float64(0),
- "get_missing_total": float64(1),
- "get_missing_time_in_millis": float64(2),
- "get_current": float64(0),
- "get_total": float64(1),
- "get_time_in_millis": float64(2),
- "get_exists_total": float64(0),
- "refresh_total": float64(1076),
- "refresh_total_time_in_millis": float64(20078),
- "percolate_current": float64(0),
- "percolate_memory_size_in_bytes": float64(-1),
- "percolate_queries": float64(0),
- "percolate_total": float64(0),
- "percolate_time_in_millis": float64(0),
- "translog_operations": float64(17702),
- "translog_size_in_bytes": float64(17),
- "recovery_current_as_source": float64(0),
- "recovery_current_as_target": float64(0),
- "recovery_throttle_time_in_millis": float64(0),
- "docs_count": float64(29652),
- "docs_deleted": float64(5229),
- "flush_total_time_in_millis": float64(2401),
- "flush_total": float64(115),
- "fielddata_memory_size_in_bytes": float64(12996),
- "fielddata_evictions": float64(0),
- "search_fetch_current": float64(0),
- "search_open_contexts": float64(0),
- "search_query_total": float64(1452),
- "search_query_time_in_millis": float64(5695),
- "search_query_current": float64(0),
- "search_fetch_total": float64(414),
- "search_fetch_time_in_millis": float64(146),
- "warmer_current": float64(0),
- "warmer_total": float64(2319),
- "warmer_total_time_in_millis": float64(448),
- "segments_count": float64(134),
- "segments_memory_in_bytes": float64(1285212),
- "segments_index_writer_memory_in_bytes": float64(0),
- "segments_index_writer_max_memory_in_bytes": float64(172368955),
- "segments_version_map_memory_in_bytes": float64(611844),
- "segments_fixed_bit_set_memory_in_bytes": float64(0),
-}
-
-var nodestatsOsExpected = map[string]interface{}{
- "load_average_0": float64(0.01),
- "load_average_1": float64(0.04),
- "load_average_2": float64(0.05),
- "swap_used_in_bytes": float64(0),
- "swap_free_in_bytes": float64(487997440),
- "timestamp": float64(1436460392944),
- "mem_free_percent": float64(74),
- "mem_used_percent": float64(25),
- "mem_actual_free_in_bytes": float64(1565470720),
- "mem_actual_used_in_bytes": float64(534159360),
- "mem_free_in_bytes": float64(477761536),
- "mem_used_in_bytes": float64(1621868544),
-}
-
-var nodestatsProcessExpected = map[string]interface{}{
- "mem_total_virtual_in_bytes": float64(4747890688),
- "timestamp": float64(1436460392945),
- "open_file_descriptors": float64(160),
- "cpu_total_in_millis": float64(15480),
- "cpu_percent": float64(2),
- "cpu_sys_in_millis": float64(1870),
- "cpu_user_in_millis": float64(13610),
-}
-
-var nodestatsJvmExpected = map[string]interface{}{
- "timestamp": float64(1436460392945),
- "uptime_in_millis": float64(202245),
- "mem_non_heap_used_in_bytes": float64(39634576),
- "mem_non_heap_committed_in_bytes": float64(40841216),
- "mem_pools_young_max_in_bytes": float64(279183360),
- "mem_pools_young_peak_used_in_bytes": float64(71630848),
- "mem_pools_young_peak_max_in_bytes": float64(279183360),
- "mem_pools_young_used_in_bytes": float64(32685760),
- "mem_pools_survivor_peak_used_in_bytes": float64(8912888),
- "mem_pools_survivor_peak_max_in_bytes": float64(34865152),
- "mem_pools_survivor_used_in_bytes": float64(8912880),
- "mem_pools_survivor_max_in_bytes": float64(34865152),
- "mem_pools_old_peak_max_in_bytes": float64(724828160),
- "mem_pools_old_used_in_bytes": float64(11110928),
- "mem_pools_old_max_in_bytes": float64(724828160),
- "mem_pools_old_peak_used_in_bytes": float64(14354608),
- "mem_heap_used_in_bytes": float64(52709568),
- "mem_heap_used_percent": float64(5),
- "mem_heap_committed_in_bytes": float64(259522560),
- "mem_heap_max_in_bytes": float64(1038876672),
- "threads_peak_count": float64(45),
- "threads_count": float64(44),
- "gc_collectors_young_collection_count": float64(2),
- "gc_collectors_young_collection_time_in_millis": float64(98),
- "gc_collectors_old_collection_count": float64(1),
- "gc_collectors_old_collection_time_in_millis": float64(24),
- "buffer_pools_direct_count": float64(40),
- "buffer_pools_direct_used_in_bytes": float64(6304239),
- "buffer_pools_direct_total_capacity_in_bytes": float64(6304239),
- "buffer_pools_mapped_count": float64(0),
- "buffer_pools_mapped_used_in_bytes": float64(0),
- "buffer_pools_mapped_total_capacity_in_bytes": float64(0),
+ },
+ "thread_pool": {
+ "percolate": {
+ "threads": 123,
+ "queue": 23,
+ "active": 13,
+ "rejected": 235,
+ "largest": 23,
+ "completed": 33
+ },
+ "fetch_shard_started": {
+ "threads": 3,
+ "queue": 1,
+ "active": 5,
+ "rejected": 6,
+ "largest": 4,
+ "completed": 54
+ },
+ "listener": {
+ "threads": 1,
+ "queue": 2,
+ "active": 4,
+ "rejected": 8,
+ "largest": 1,
+ "completed": 1
+ },
+ "index": {
+ "threads": 6,
+ "queue": 8,
+ "active": 4,
+ "rejected": 2,
+ "largest": 3,
+ "completed": 6
+ },
+ "refresh": {
+ "threads": 23,
+ "queue": 7,
+ "active": 3,
+ "rejected": 4,
+ "largest": 8,
+ "completed": 3
+ },
+ "suggest": {
+ "threads": 2,
+ "queue": 7,
+ "active": 2,
+ "rejected": 1,
+ "largest": 8,
+ "completed": 3
+ },
+ "generic": {
+ "threads": 1,
+ "queue": 4,
+ "active": 6,
+ "rejected": 3,
+ "largest": 2,
+ "completed": 27
+ },
+ "warmer": {
+ "threads": 2,
+ "queue": 7,
+ "active": 3,
+ "rejected": 2,
+ "largest": 3,
+ "completed": 1
+ },
+ "search": {
+ "threads": 5,
+ "queue": 7,
+ "active": 2,
+ "rejected": 7,
+ "largest": 2,
+ "completed": 4
+ },
+ "flush": {
+ "threads": 3,
+ "queue": 8,
+ "active": 0,
+ "rejected": 1,
+ "largest": 5,
+ "completed": 3
+ },
+ "optimize": {
+ "threads": 3,
+ "queue": 4,
+ "active": 1,
+ "rejected": 2,
+ "largest": 7,
+ "completed": 3
+ },
+ "fetch_shard_store": {
+ "threads": 1,
+ "queue": 7,
+ "active": 4,
+ "rejected": 2,
+ "largest": 4,
+ "completed": 1
+ },
+ "management": {
+ "threads": 2,
+ "queue": 3,
+ "active": 1,
+ "rejected": 6,
+ "largest": 2,
+ "completed": 22
+ },
+ "get": {
+ "threads": 1,
+ "queue": 8,
+ "active": 4,
+ "rejected": 3,
+ "largest": 2,
+ "completed": 1
+ },
+ "merge": {
+ "threads": 6,
+ "queue": 4,
+ "active": 5,
+ "rejected": 2,
+ "largest": 5,
+ "completed": 1
+ },
+ "bulk": {
+ "threads": 4,
+ "queue": 5,
+ "active": 7,
+ "rejected": 3,
+ "largest": 1,
+ "completed": 4
+ },
+ "snapshot": {
+ "threads": 8,
+ "queue": 5,
+ "active": 6,
+ "rejected": 2,
+ "largest": 1,
+ "completed": 0
+ }
+ },
+ "fs": {
+ "timestamp": 1436460392946,
+ "total": {
+ "total_in_bytes": 19507089408,
+ "free_in_bytes": 16909316096,
+ "available_in_bytes": 15894814720
+ },
+ "data": [
+ {
+ "path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0",
+ "mount": "/usr/share/elasticsearch/data",
+ "type": "ext4",
+ "total_in_bytes": 19507089408,
+ "free_in_bytes": 16909316096,
+ "available_in_bytes": 15894814720
+ }
+ ]
+ },
+ "transport": {
+ "server_open": 13,
+ "rx_count": 6,
+ "rx_size_in_bytes": 1380,
+ "tx_count": 6,
+ "tx_size_in_bytes": 1380
+ },
+ "http": {
+ "current_open": 3,
+ "total_opened": 3
+ },
+ "breakers": {
+ "fielddata": {
+ "limit_size_in_bytes": 623326003,
+ "limit_size": "594.4mb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1.03,
+ "tripped": 0
+ },
+ "request": {
+ "limit_size_in_bytes": 415550668,
+ "limit_size": "396.2mb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1.0,
+ "tripped": 0
+ },
+ "parent": {
+ "limit_size_in_bytes": 727213670,
+ "limit_size": "693.5mb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1.0,
+ "tripped": 0
+ }
+ }
+ }
+ }
}
+`
-var nodestatsThreadPoolExpected = map[string]interface{}{
- "merge_threads": float64(6),
- "merge_queue": float64(4),
- "merge_active": float64(5),
- "merge_rejected": float64(2),
- "merge_largest": float64(5),
- "merge_completed": float64(1),
- "bulk_threads": float64(4),
- "bulk_queue": float64(5),
- "bulk_active": float64(7),
- "bulk_rejected": float64(3),
- "bulk_largest": float64(1),
- "bulk_completed": float64(4),
- "warmer_threads": float64(2),
- "warmer_queue": float64(7),
- "warmer_active": float64(3),
- "warmer_rejected": float64(2),
- "warmer_largest": float64(3),
- "warmer_completed": float64(1),
- "get_largest": float64(2),
- "get_completed": float64(1),
- "get_threads": float64(1),
- "get_queue": float64(8),
- "get_active": float64(4),
- "get_rejected": float64(3),
- "index_threads": float64(6),
- "index_queue": float64(8),
- "index_active": float64(4),
- "index_rejected": float64(2),
- "index_largest": float64(3),
- "index_completed": float64(6),
- "suggest_threads": float64(2),
- "suggest_queue": float64(7),
- "suggest_active": float64(2),
- "suggest_rejected": float64(1),
- "suggest_largest": float64(8),
- "suggest_completed": float64(3),
- "fetch_shard_store_queue": float64(7),
- "fetch_shard_store_active": float64(4),
- "fetch_shard_store_rejected": float64(2),
- "fetch_shard_store_largest": float64(4),
- "fetch_shard_store_completed": float64(1),
- "fetch_shard_store_threads": float64(1),
- "management_threads": float64(2),
- "management_queue": float64(3),
- "management_active": float64(1),
- "management_rejected": float64(6),
- "management_largest": float64(2),
- "management_completed": float64(22),
- "percolate_queue": float64(23),
- "percolate_active": float64(13),
- "percolate_rejected": float64(235),
- "percolate_largest": float64(23),
- "percolate_completed": float64(33),
- "percolate_threads": float64(123),
- "listener_active": float64(4),
- "listener_rejected": float64(8),
- "listener_largest": float64(1),
- "listener_completed": float64(1),
- "listener_threads": float64(1),
- "listener_queue": float64(2),
- "search_rejected": float64(7),
- "search_largest": float64(2),
- "search_completed": float64(4),
- "search_threads": float64(5),
- "search_queue": float64(7),
- "search_active": float64(2),
- "fetch_shard_started_threads": float64(3),
- "fetch_shard_started_queue": float64(1),
- "fetch_shard_started_active": float64(5),
- "fetch_shard_started_rejected": float64(6),
- "fetch_shard_started_largest": float64(4),
- "fetch_shard_started_completed": float64(54),
- "refresh_rejected": float64(4),
- "refresh_largest": float64(8),
- "refresh_completed": float64(3),
- "refresh_threads": float64(23),
- "refresh_queue": float64(7),
- "refresh_active": float64(3),
- "optimize_threads": float64(3),
- "optimize_queue": float64(4),
- "optimize_active": float64(1),
- "optimize_rejected": float64(2),
- "optimize_largest": float64(7),
- "optimize_completed": float64(3),
- "snapshot_largest": float64(1),
- "snapshot_completed": float64(0),
- "snapshot_threads": float64(8),
- "snapshot_queue": float64(5),
- "snapshot_active": float64(6),
- "snapshot_rejected": float64(2),
- "generic_threads": float64(1),
- "generic_queue": float64(4),
- "generic_active": float64(6),
- "generic_rejected": float64(3),
- "generic_largest": float64(2),
- "generic_completed": float64(27),
- "flush_threads": float64(3),
- "flush_queue": float64(8),
- "flush_active": float64(0),
- "flush_rejected": float64(1),
- "flush_largest": float64(5),
- "flush_completed": float64(3),
+const nodeStatsResponseJVMProcess = `
+{
+ "cluster_name": "es-testcluster",
+ "nodes": {
+ "SDFsfSDFsdfFSDSDfSFDSDF": {
+ "timestamp": 1436365550135,
+ "name": "test.host.com",
+ "transport_address": "inet[/127.0.0.1:9300]",
+ "host": "test",
+ "ip": [
+ "inet[/127.0.0.1:9300]",
+ "NONE"
+ ],
+ "roles": [
+ "master",
+ "data",
+ "ingest"
+ ],
+ "attributes": {
+ "master": "true"
+ },
+ "process": {
+ "timestamp": 1436460392945,
+ "open_file_descriptors": 160,
+ "cpu": {
+ "percent": 2,
+ "sys_in_millis": 1870,
+ "user_in_millis": 13610,
+ "total_in_millis": 15480
+ },
+ "mem": {
+ "total_virtual_in_bytes": 4747890688
+ }
+ },
+ "jvm": {
+ "timestamp": 1436460392945,
+ "uptime_in_millis": 202245,
+ "mem": {
+ "heap_used_in_bytes": 52709568,
+ "heap_used_percent": 5,
+ "heap_committed_in_bytes": 259522560,
+ "heap_max_in_bytes": 1038876672,
+ "non_heap_used_in_bytes": 39634576,
+ "non_heap_committed_in_bytes": 40841216,
+ "pools": {
+ "young": {
+ "used_in_bytes": 32685760,
+ "max_in_bytes": 279183360,
+ "peak_used_in_bytes": 71630848,
+ "peak_max_in_bytes": 279183360
+ },
+ "survivor": {
+ "used_in_bytes": 8912880,
+ "max_in_bytes": 34865152,
+ "peak_used_in_bytes": 8912888,
+ "peak_max_in_bytes": 34865152
+ },
+ "old": {
+ "used_in_bytes": 11110928,
+ "max_in_bytes": 724828160,
+ "peak_used_in_bytes": 14354608,
+ "peak_max_in_bytes": 724828160
+ }
+ }
+ },
+ "threads": {
+ "count": 44,
+ "peak_count": 45
+ },
+ "gc": {
+ "collectors": {
+ "young": {
+ "collection_count": 2,
+ "collection_time_in_millis": 98
+ },
+ "old": {
+ "collection_count": 1,
+ "collection_time_in_millis": 24
+ }
+ }
+ },
+ "buffer_pools": {
+ "direct": {
+ "count": 40,
+ "used_in_bytes": 6304239,
+ "total_capacity_in_bytes": 6304239
+ },
+ "mapped": {
+ "count": 0,
+ "used_in_bytes": 0,
+ "total_capacity_in_bytes": 0
+ }
+ }
+ }
+ }
+ }
}
+`
-var nodestatsFsExpected = map[string]interface{}{
- "data_0_total_in_bytes": float64(19507089408),
- "data_0_free_in_bytes": float64(16909316096),
- "data_0_available_in_bytes": float64(15894814720),
- "timestamp": float64(1436460392946),
- "total_free_in_bytes": float64(16909316096),
- "total_available_in_bytes": float64(15894814720),
- "total_total_in_bytes": float64(19507089408),
+var nodestatsIndicesExpected = map[string]interface{}{
+ "id_cache_memory_size_in_bytes": float64(0),
+ "completion_size_in_bytes": float64(0),
+ "suggest_total": float64(0),
+ "suggest_time_in_millis": float64(0),
+ "suggest_current": float64(0),
+ "query_cache_memory_size_in_bytes": float64(0),
+ "query_cache_evictions": float64(0),
+ "query_cache_hit_count": float64(0),
+ "query_cache_miss_count": float64(0),
+ "store_size_in_bytes": float64(37715234),
+ "store_throttle_time_in_millis": float64(215),
+ "merges_current_docs": float64(0),
+ "merges_current_size_in_bytes": float64(0),
+ "merges_total": float64(133),
+ "merges_total_time_in_millis": float64(21060),
+ "merges_total_docs": float64(203672),
+ "merges_total_size_in_bytes": float64(142900226),
+ "merges_current": float64(0),
+ "filter_cache_memory_size_in_bytes": float64(7384),
+ "filter_cache_evictions": float64(0),
+ "indexing_index_total": float64(84790),
+ "indexing_index_time_in_millis": float64(29680),
+ "indexing_index_current": float64(0),
+ "indexing_noop_update_total": float64(0),
+ "indexing_throttle_time_in_millis": float64(0),
+ "indexing_delete_total": float64(13879),
+ "indexing_delete_time_in_millis": float64(1139),
+ "indexing_delete_current": float64(0),
+ "get_exists_time_in_millis": float64(0),
+ "get_missing_total": float64(1),
+ "get_missing_time_in_millis": float64(2),
+ "get_current": float64(0),
+ "get_total": float64(1),
+ "get_time_in_millis": float64(2),
+ "get_exists_total": float64(0),
+ "refresh_total": float64(1076),
+ "refresh_total_time_in_millis": float64(20078),
+ "percolate_current": float64(0),
+ "percolate_memory_size_in_bytes": float64(-1),
+ "percolate_queries": float64(0),
+ "percolate_total": float64(0),
+ "percolate_time_in_millis": float64(0),
+ "translog_operations": float64(17702),
+ "translog_size_in_bytes": float64(17),
+ "recovery_current_as_source": float64(0),
+ "recovery_current_as_target": float64(0),
+ "recovery_throttle_time_in_millis": float64(0),
+ "docs_count": float64(29652),
+ "docs_deleted": float64(5229),
+ "flush_total_time_in_millis": float64(2401),
+ "flush_total": float64(115),
+ "fielddata_memory_size_in_bytes": float64(12996),
+ "fielddata_evictions": float64(0),
+ "search_fetch_current": float64(0),
+ "search_open_contexts": float64(0),
+ "search_query_total": float64(1452),
+ "search_query_time_in_millis": float64(5695),
+ "search_query_current": float64(0),
+ "search_fetch_total": float64(414),
+ "search_fetch_time_in_millis": float64(146),
+ "warmer_current": float64(0),
+ "warmer_total": float64(2319),
+ "warmer_total_time_in_millis": float64(448),
+ "segments_count": float64(134),
+ "segments_memory_in_bytes": float64(1285212),
+ "segments_index_writer_memory_in_bytes": float64(0),
+ "segments_index_writer_max_memory_in_bytes": float64(172368955),
+ "segments_version_map_memory_in_bytes": float64(611844),
+ "segments_fixed_bit_set_memory_in_bytes": float64(0),
}
-var nodestatsTransportExpected = map[string]interface{}{
- "server_open": float64(13),
- "rx_count": float64(6),
- "rx_size_in_bytes": float64(1380),
- "tx_count": float64(6),
- "tx_size_in_bytes": float64(1380),
+var nodestatsOsExpected = map[string]interface{}{
+ "load_average_0": float64(0.01),
+ "load_average_1": float64(0.04),
+ "load_average_2": float64(0.05),
+ "swap_used_in_bytes": float64(0),
+ "swap_free_in_bytes": float64(487997440),
+ "timestamp": float64(1436460392944),
+ "mem_free_percent": float64(74),
+ "mem_used_percent": float64(25),
+ "mem_actual_free_in_bytes": float64(1565470720),
+ "mem_actual_used_in_bytes": float64(534159360),
+ "mem_free_in_bytes": float64(477761536),
+ "mem_used_in_bytes": float64(1621868544),
}
-var nodestatsHttpExpected = map[string]interface{}{
+var nodestatsProcessExpected = map[string]interface{}{
+ "mem_total_virtual_in_bytes": float64(4747890688),
+ "timestamp": float64(1436460392945),
+ "open_file_descriptors": float64(160),
+ "cpu_total_in_millis": float64(15480),
+ "cpu_percent": float64(2),
+ "cpu_sys_in_millis": float64(1870),
+ "cpu_user_in_millis": float64(13610),
+}
+
+var nodestatsJvmExpected = map[string]interface{}{
+ "timestamp": float64(1436460392945),
+ "uptime_in_millis": float64(202245),
+ "mem_non_heap_used_in_bytes": float64(39634576),
+ "mem_non_heap_committed_in_bytes": float64(40841216),
+ "mem_pools_young_max_in_bytes": float64(279183360),
+ "mem_pools_young_peak_used_in_bytes": float64(71630848),
+ "mem_pools_young_peak_max_in_bytes": float64(279183360),
+ "mem_pools_young_used_in_bytes": float64(32685760),
+ "mem_pools_survivor_peak_used_in_bytes": float64(8912888),
+ "mem_pools_survivor_peak_max_in_bytes": float64(34865152),
+ "mem_pools_survivor_used_in_bytes": float64(8912880),
+ "mem_pools_survivor_max_in_bytes": float64(34865152),
+ "mem_pools_old_peak_max_in_bytes": float64(724828160),
+ "mem_pools_old_used_in_bytes": float64(11110928),
+ "mem_pools_old_max_in_bytes": float64(724828160),
+ "mem_pools_old_peak_used_in_bytes": float64(14354608),
+ "mem_heap_used_in_bytes": float64(52709568),
+ "mem_heap_used_percent": float64(5),
+ "mem_heap_committed_in_bytes": float64(259522560),
+ "mem_heap_max_in_bytes": float64(1038876672),
+ "threads_peak_count": float64(45),
+ "threads_count": float64(44),
+ "gc_collectors_young_collection_count": float64(2),
+ "gc_collectors_young_collection_time_in_millis": float64(98),
+ "gc_collectors_old_collection_count": float64(1),
+ "gc_collectors_old_collection_time_in_millis": float64(24),
+ "buffer_pools_direct_count": float64(40),
+ "buffer_pools_direct_used_in_bytes": float64(6304239),
+ "buffer_pools_direct_total_capacity_in_bytes": float64(6304239),
+ "buffer_pools_mapped_count": float64(0),
+ "buffer_pools_mapped_used_in_bytes": float64(0),
+ "buffer_pools_mapped_total_capacity_in_bytes": float64(0),
+}
+
+var nodestatsThreadPoolExpected = map[string]interface{}{
+ "merge_threads": float64(6),
+ "merge_queue": float64(4),
+ "merge_active": float64(5),
+ "merge_rejected": float64(2),
+ "merge_largest": float64(5),
+ "merge_completed": float64(1),
+ "bulk_threads": float64(4),
+ "bulk_queue": float64(5),
+ "bulk_active": float64(7),
+ "bulk_rejected": float64(3),
+ "bulk_largest": float64(1),
+ "bulk_completed": float64(4),
+ "warmer_threads": float64(2),
+ "warmer_queue": float64(7),
+ "warmer_active": float64(3),
+ "warmer_rejected": float64(2),
+ "warmer_largest": float64(3),
+ "warmer_completed": float64(1),
+ "get_largest": float64(2),
+ "get_completed": float64(1),
+ "get_threads": float64(1),
+ "get_queue": float64(8),
+ "get_active": float64(4),
+ "get_rejected": float64(3),
+ "index_threads": float64(6),
+ "index_queue": float64(8),
+ "index_active": float64(4),
+ "index_rejected": float64(2),
+ "index_largest": float64(3),
+ "index_completed": float64(6),
+ "suggest_threads": float64(2),
+ "suggest_queue": float64(7),
+ "suggest_active": float64(2),
+ "suggest_rejected": float64(1),
+ "suggest_largest": float64(8),
+ "suggest_completed": float64(3),
+ "fetch_shard_store_queue": float64(7),
+ "fetch_shard_store_active": float64(4),
+ "fetch_shard_store_rejected": float64(2),
+ "fetch_shard_store_largest": float64(4),
+ "fetch_shard_store_completed": float64(1),
+ "fetch_shard_store_threads": float64(1),
+ "management_threads": float64(2),
+ "management_queue": float64(3),
+ "management_active": float64(1),
+ "management_rejected": float64(6),
+ "management_largest": float64(2),
+ "management_completed": float64(22),
+ "percolate_queue": float64(23),
+ "percolate_active": float64(13),
+ "percolate_rejected": float64(235),
+ "percolate_largest": float64(23),
+ "percolate_completed": float64(33),
+ "percolate_threads": float64(123),
+ "listener_active": float64(4),
+ "listener_rejected": float64(8),
+ "listener_largest": float64(1),
+ "listener_completed": float64(1),
+ "listener_threads": float64(1),
+ "listener_queue": float64(2),
+ "search_rejected": float64(7),
+ "search_largest": float64(2),
+ "search_completed": float64(4),
+ "search_threads": float64(5),
+ "search_queue": float64(7),
+ "search_active": float64(2),
+ "fetch_shard_started_threads": float64(3),
+ "fetch_shard_started_queue": float64(1),
+ "fetch_shard_started_active": float64(5),
+ "fetch_shard_started_rejected": float64(6),
+ "fetch_shard_started_largest": float64(4),
+ "fetch_shard_started_completed": float64(54),
+ "refresh_rejected": float64(4),
+ "refresh_largest": float64(8),
+ "refresh_completed": float64(3),
+ "refresh_threads": float64(23),
+ "refresh_queue": float64(7),
+ "refresh_active": float64(3),
+ "optimize_threads": float64(3),
+ "optimize_queue": float64(4),
+ "optimize_active": float64(1),
+ "optimize_rejected": float64(2),
+ "optimize_largest": float64(7),
+ "optimize_completed": float64(3),
+ "snapshot_largest": float64(1),
+ "snapshot_completed": float64(0),
+ "snapshot_threads": float64(8),
+ "snapshot_queue": float64(5),
+ "snapshot_active": float64(6),
+ "snapshot_rejected": float64(2),
+ "generic_threads": float64(1),
+ "generic_queue": float64(4),
+ "generic_active": float64(6),
+ "generic_rejected": float64(3),
+ "generic_largest": float64(2),
+ "generic_completed": float64(27),
+ "flush_threads": float64(3),
+ "flush_queue": float64(8),
+ "flush_active": float64(0),
+ "flush_rejected": float64(1),
+ "flush_largest": float64(5),
+ "flush_completed": float64(3),
+}
+
+var nodestatsFsExpected = map[string]interface{}{
+ "data_0_total_in_bytes": float64(19507089408),
+ "data_0_free_in_bytes": float64(16909316096),
+ "data_0_available_in_bytes": float64(15894814720),
+ "timestamp": float64(1436460392946),
+ "total_free_in_bytes": float64(16909316096),
+ "total_available_in_bytes": float64(15894814720),
+ "total_total_in_bytes": float64(19507089408),
+}
+
+var nodestatsTransportExpected = map[string]interface{}{
+ "server_open": float64(13),
+ "rx_count": float64(6),
+ "rx_size_in_bytes": float64(1380),
+ "tx_count": float64(6),
+ "tx_size_in_bytes": float64(1380),
+}
+
+var nodestatsHTTPExpected = map[string]interface{}{
"current_open": float64(3),
"total_opened": float64(3),
}
@@ -906,251 +1332,2516 @@ const clusterStatsResponse = `
"max":4,
"avg":4.0
},
- "primaries":{
- "min":4,
- "max":4,
- "avg":4.0
+ "primaries":{
+ "min":4,
+ "max":4,
+ "avg":4.0
+ },
+ "replication":{
+ "min":0.0,
+ "max":0.0,
+ "avg":0.0
+ }
+ }
+ },
+ "docs":{
+ "count":4,
+ "deleted":0
+ },
+ "store":{
+ "size_in_bytes":17084,
+ "throttle_time_in_millis":0
+ },
+ "fielddata":{
+ "memory_size_in_bytes":0,
+ "evictions":0
+ },
+ "query_cache":{
+ "memory_size_in_bytes":0,
+ "total_count":0,
+ "hit_count":0,
+ "miss_count":0,
+ "cache_size":0,
+ "cache_count":0,
+ "evictions":0
+ },
+ "completion":{
+ "size_in_bytes":0
+ },
+ "segments":{
+ "count":4,
+ "memory_in_bytes":11828,
+ "terms_memory_in_bytes":8932,
+ "stored_fields_memory_in_bytes":1248,
+ "term_vectors_memory_in_bytes":0,
+ "norms_memory_in_bytes":1280,
+ "doc_values_memory_in_bytes":368,
+ "index_writer_memory_in_bytes":0,
+ "index_writer_max_memory_in_bytes":2048000,
+ "version_map_memory_in_bytes":0,
+ "fixed_bit_set_memory_in_bytes":0
+ },
+ "percolate":{
+ "total":0,
+ "time_in_millis":0,
+ "current":0,
+ "memory_size_in_bytes":-1,
+ "memory_size":"-1b",
+ "queries":0
+ }
+ },
+ "nodes":{
+ "count":{
+ "total":1,
+ "master_only":0,
+ "data_only":0,
+ "master_data":1,
+ "client":0
+ },
+ "versions":[
+ {
+ "version": "2.3.3"
+ }
+ ],
+ "os":{
+ "available_processors":1,
+ "allocated_processors":1,
+ "mem":{
+ "total_in_bytes":593301504
+ },
+ "names":[
+ {
+ "name":"Linux",
+ "count":1
+ }
+ ]
+ },
+ "process":{
+ "cpu":{
+ "percent":0
+ },
+ "open_file_descriptors":{
+ "min":145,
+ "max":145,
+ "avg":145
+ }
+ },
+ "jvm":{
+ "max_uptime_in_millis":11580527,
+ "versions":[
+ {
+ "version":"1.8.0_101",
+ "vm_name":"OpenJDK 64-Bit Server VM",
+ "vm_version":"25.101-b13",
+ "vm_vendor":"Oracle Corporation",
+ "count":1
+ }
+ ],
+ "mem":{
+ "heap_used_in_bytes":70550288,
+ "heap_max_in_bytes":1065025536
+ },
+ "threads":30
+ },
+ "fs":{
+ "total_in_bytes":8318783488,
+ "free_in_bytes":6447439872,
+ "available_in_bytes":6344785920
+ },
+ "plugins":[
+ {
+ "name":"cloud-aws",
+ "version":"2.3.3",
+ "description":"The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
+ "jvm":true,
+ "classname":"org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
+ "isolated":true,
+ "site":false
+ },
+ {
+ "name":"kopf",
+ "version":"2.0.1",
+ "description":"kopf - simple web administration tool for Elasticsearch",
+ "url":"/_plugin/kopf/",
+ "jvm":false,
+ "site":true
+ },
+ {
+ "name":"tr-metrics",
+ "version":"7bd5b4b",
+ "description":"Logs cluster and node stats for performance monitoring.",
+ "jvm":true,
+ "classname":"com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
+ "isolated":true,
+ "site":false
+ }
+ ]
+ }
+}
+`
+
+var clusterstatsIndicesExpected = map[string]interface{}{
+ "completion_size_in_bytes": float64(0),
+ "count": float64(1),
+ "docs_count": float64(4),
+ "docs_deleted": float64(0),
+ "fielddata_evictions": float64(0),
+ "fielddata_memory_size_in_bytes": float64(0),
+ "percolate_current": float64(0),
+ "percolate_memory_size_in_bytes": float64(-1),
+ "percolate_queries": float64(0),
+ "percolate_time_in_millis": float64(0),
+ "percolate_total": float64(0),
+ "percolate_memory_size": "-1b",
+ "query_cache_cache_count": float64(0),
+ "query_cache_cache_size": float64(0),
+ "query_cache_evictions": float64(0),
+ "query_cache_hit_count": float64(0),
+ "query_cache_memory_size_in_bytes": float64(0),
+ "query_cache_miss_count": float64(0),
+ "query_cache_total_count": float64(0),
+ "segments_count": float64(4),
+ "segments_doc_values_memory_in_bytes": float64(368),
+ "segments_fixed_bit_set_memory_in_bytes": float64(0),
+ "segments_index_writer_max_memory_in_bytes": float64(2.048e+06),
+ "segments_index_writer_memory_in_bytes": float64(0),
+ "segments_memory_in_bytes": float64(11828),
+ "segments_norms_memory_in_bytes": float64(1280),
+ "segments_stored_fields_memory_in_bytes": float64(1248),
+ "segments_term_vectors_memory_in_bytes": float64(0),
+ "segments_terms_memory_in_bytes": float64(8932),
+ "segments_version_map_memory_in_bytes": float64(0),
+ "shards_index_primaries_avg": float64(4),
+ "shards_index_primaries_max": float64(4),
+ "shards_index_primaries_min": float64(4),
+ "shards_index_replication_avg": float64(0),
+ "shards_index_replication_max": float64(0),
+ "shards_index_replication_min": float64(0),
+ "shards_index_shards_avg": float64(4),
+ "shards_index_shards_max": float64(4),
+ "shards_index_shards_min": float64(4),
+ "shards_primaries": float64(4),
+ "shards_replication": float64(0),
+ "shards_total": float64(4),
+ "store_size_in_bytes": float64(17084),
+ "store_throttle_time_in_millis": float64(0),
+}
+
+var clusterstatsNodesExpected = map[string]interface{}{
+ "count_client": float64(0),
+ "count_data_only": float64(0),
+ "count_master_data": float64(1),
+ "count_master_only": float64(0),
+ "count_total": float64(1),
+ "fs_available_in_bytes": float64(6.34478592e+09),
+ "fs_free_in_bytes": float64(6.447439872e+09),
+ "fs_total_in_bytes": float64(8.318783488e+09),
+ "jvm_max_uptime_in_millis": float64(1.1580527e+07),
+ "jvm_mem_heap_max_in_bytes": float64(1.065025536e+09),
+ "jvm_mem_heap_used_in_bytes": float64(7.0550288e+07),
+ "jvm_threads": float64(30),
+ "jvm_versions_0_count": float64(1),
+ "jvm_versions_0_version": "1.8.0_101",
+ "jvm_versions_0_vm_name": "OpenJDK 64-Bit Server VM",
+ "jvm_versions_0_vm_vendor": "Oracle Corporation",
+ "jvm_versions_0_vm_version": "25.101-b13",
+ "os_allocated_processors": float64(1),
+ "os_available_processors": float64(1),
+ "os_mem_total_in_bytes": float64(5.93301504e+08),
+ "os_names_0_count": float64(1),
+ "os_names_0_name": "Linux",
+ "process_cpu_percent": float64(0),
+ "process_open_file_descriptors_avg": float64(145),
+ "process_open_file_descriptors_max": float64(145),
+ "process_open_file_descriptors_min": float64(145),
+ "versions_0_version": "2.3.3",
+ "plugins_0_classname": "org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
+ "plugins_0_description": "The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
+ "plugins_0_isolated": true,
+ "plugins_0_jvm": true,
+ "plugins_0_name": "cloud-aws",
+ "plugins_0_site": false,
+ "plugins_0_version": "2.3.3",
+ "plugins_1_description": "kopf - simple web administration tool for Elasticsearch",
+ "plugins_1_jvm": false,
+ "plugins_1_name": "kopf",
+ "plugins_1_site": true,
+ "plugins_1_url": "/_plugin/kopf/",
+ "plugins_1_version": "2.0.1",
+ "plugins_2_classname": "com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
+ "plugins_2_description": "Logs cluster and node stats for performance monitoring.",
+ "plugins_2_isolated": true,
+ "plugins_2_jvm": true,
+ "plugins_2_name": "tr-metrics",
+ "plugins_2_site": false,
+ "plugins_2_version": "7bd5b4b",
+}
+
+const IsMasterResult = "SDFsfSDFsdfFSDSDfSFDSDF 10.206.124.66 10.206.124.66 test.host.com "
+
+const IsNotMasterResult = "junk 10.206.124.66 10.206.124.66 test.junk.com "
+
+const clusterIndicesResponse = `
+{
+ "_shards": {
+ "total": 9,
+ "successful": 6,
+ "failed": 0
+ },
+ "_all": {
+ "primaries": {
+ "docs": {
+ "count": 999,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 267500
+ },
+ "indexing": {
+ "index_total": 999,
+ "index_time_in_millis": 548,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 62914560
+ },
+ "refresh": {
+ "total": 9,
+ "total_time_in_millis": 256,
+ "external_total": 9,
+ "external_total_time_in_millis": 258,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 6,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 3,
+ "memory_in_bytes": 12849,
+ "terms_memory_in_bytes": 10580,
+ "stored_fields_memory_in_bytes": 904,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 1152,
+ "points_memory_in_bytes": 9,
+ "doc_values_memory_in_bytes": 204,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 999,
+ "size_in_bytes": 226444,
+ "uncommitted_operations": 999,
+ "uncommitted_size_in_bytes": 226444,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ },
+ "total": {
+ "docs": {
+ "count": 1998,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 535000
+ },
+ "indexing": {
+ "index_total": 1998,
+ "index_time_in_millis": 793,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 125829120
+ },
+ "refresh": {
+ "total": 18,
+ "total_time_in_millis": 518,
+ "external_total": 18,
+ "external_total_time_in_millis": 522,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 12,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 6,
+ "memory_in_bytes": 25698,
+ "terms_memory_in_bytes": 21160,
+ "stored_fields_memory_in_bytes": 1808,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 2304,
+ "points_memory_in_bytes": 18,
+ "doc_values_memory_in_bytes": 408,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 1998,
+ "size_in_bytes": 452888,
+ "uncommitted_operations": 1998,
+ "uncommitted_size_in_bytes": 452888,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ }
+ },
+ "indices": {
+ "twitter": {
+ "uuid": "AtNrbbl_QhirW0p7Fnq26A",
+ "primaries": {
+ "docs": {
+ "count": 999,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 267500
+ },
+ "indexing": {
+ "index_total": 999,
+ "index_time_in_millis": 548,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 62914560
+ },
+ "refresh": {
+ "total": 9,
+ "total_time_in_millis": 256,
+ "external_total": 9,
+ "external_total_time_in_millis": 258,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 6,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 3,
+ "memory_in_bytes": 12849,
+ "terms_memory_in_bytes": 10580,
+ "stored_fields_memory_in_bytes": 904,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 1152,
+ "points_memory_in_bytes": 9,
+ "doc_values_memory_in_bytes": 204,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 999,
+ "size_in_bytes": 226444,
+ "uncommitted_operations": 999,
+ "uncommitted_size_in_bytes": 226444,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ },
+ "total": {
+ "docs": {
+ "count": 1998,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 535000
+ },
+ "indexing": {
+ "index_total": 1998,
+ "index_time_in_millis": 793,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 125829120
+ },
+ "refresh": {
+ "total": 18,
+ "total_time_in_millis": 518,
+ "external_total": 18,
+ "external_total_time_in_millis": 522,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 12,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 6,
+ "memory_in_bytes": 25698,
+ "terms_memory_in_bytes": 21160,
+ "stored_fields_memory_in_bytes": 1808,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 2304,
+ "points_memory_in_bytes": 18,
+ "doc_values_memory_in_bytes": 408,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 1998,
+ "size_in_bytes": 452888,
+ "uncommitted_operations": 1998,
+ "uncommitted_size_in_bytes": 452888,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ }
+ }
+ }
+}`
+
+var clusterIndicesExpected = map[string]interface{}{
+ "completion_size_in_bytes": float64(0),
+ "docs_count": float64(999),
+ "docs_deleted": float64(0),
+ "fielddata_evictions": float64(0),
+ "fielddata_memory_size_in_bytes": float64(0),
+ "flush_periodic": float64(0),
+ "flush_total": float64(0),
+ "flush_total_time_in_millis": float64(0),
+ "get_current": float64(0),
+ "get_exists_time_in_millis": float64(0),
+ "get_exists_total": float64(0),
+ "get_missing_time_in_millis": float64(0),
+ "get_missing_total": float64(0),
+ "get_time_in_millis": float64(0),
+ "get_total": float64(0),
+ "indexing_delete_current": float64(0),
+ "indexing_delete_time_in_millis": float64(0),
+ "indexing_delete_total": float64(0),
+ "indexing_index_current": float64(0),
+ "indexing_index_failed": float64(0),
+ "indexing_index_time_in_millis": float64(548),
+ "indexing_index_total": float64(999),
+ "indexing_is_throttled": false,
+ "indexing_noop_update_total": float64(0),
+ "indexing_throttle_time_in_millis": float64(0),
+ "merges_current": float64(0),
+ "merges_current_docs": float64(0),
+ "merges_current_size_in_bytes": float64(0),
+ "merges_total": float64(0),
+ "merges_total_auto_throttle_in_bytes": float64(62914560),
+ "merges_total_docs": float64(0),
+ "merges_total_size_in_bytes": float64(0),
+ "merges_total_stopped_time_in_millis": float64(0),
+ "merges_total_throttled_time_in_millis": float64(0),
+ "merges_total_time_in_millis": float64(0),
+ "query_cache_cache_count": float64(0),
+ "query_cache_cache_size": float64(0),
+ "query_cache_evictions": float64(0),
+ "query_cache_hit_count": float64(0),
+ "query_cache_memory_size_in_bytes": float64(0),
+ "query_cache_miss_count": float64(0),
+ "query_cache_total_count": float64(0),
+ "recovery_current_as_source": float64(0),
+ "recovery_current_as_target": float64(0),
+ "recovery_throttle_time_in_millis": float64(0),
+ "refresh_external_total": float64(9),
+ "refresh_external_total_time_in_millis": float64(258),
+ "refresh_listeners": float64(0),
+ "refresh_total": float64(9),
+ "refresh_total_time_in_millis": float64(256),
+ "request_cache_evictions": float64(0),
+ "request_cache_hit_count": float64(0),
+ "request_cache_memory_size_in_bytes": float64(0),
+ "request_cache_miss_count": float64(0),
+ "search_fetch_current": float64(0),
+ "search_fetch_time_in_millis": float64(0),
+ "search_fetch_total": float64(0),
+ "search_open_contexts": float64(0),
+ "search_query_current": float64(0),
+ "search_query_time_in_millis": float64(0),
+ "search_query_total": float64(0),
+ "search_scroll_current": float64(0),
+ "search_scroll_time_in_millis": float64(0),
+ "search_scroll_total": float64(0),
+ "search_suggest_current": float64(0),
+ "search_suggest_time_in_millis": float64(0),
+ "search_suggest_total": float64(0),
+ "segments_count": float64(3),
+ "segments_doc_values_memory_in_bytes": float64(204),
+ "segments_fixed_bit_set_memory_in_bytes": float64(0),
+ "segments_index_writer_memory_in_bytes": float64(0),
+ "segments_max_unsafe_auto_id_timestamp": float64(-1),
+ "segments_memory_in_bytes": float64(12849),
+ "segments_norms_memory_in_bytes": float64(1152),
+ "segments_points_memory_in_bytes": float64(9),
+ "segments_stored_fields_memory_in_bytes": float64(904),
+ "segments_term_vectors_memory_in_bytes": float64(0),
+ "segments_terms_memory_in_bytes": float64(10580),
+ "segments_version_map_memory_in_bytes": float64(0),
+ "store_size_in_bytes": float64(267500),
+ "translog_earliest_last_modified_age": float64(0),
+ "translog_operations": float64(999),
+ "translog_size_in_bytes": float64(226444),
+ "translog_uncommitted_operations": float64(999),
+ "translog_uncommitted_size_in_bytes": float64(226444),
+ "warmer_current": float64(0),
+ "warmer_total": float64(6),
+ "warmer_total_time_in_millis": float64(0),
+}
+
+const clusterIndicesShardsResponse = `
+{
+ "_shards": {
+ "total": 9,
+ "successful": 6,
+ "failed": 0
+ },
+ "_all": {
+ "primaries": {
+ "docs": {
+ "count": 999,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 267500
+ },
+ "indexing": {
+ "index_total": 999,
+ "index_time_in_millis": 548,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 62914560
+ },
+ "refresh": {
+ "total": 9,
+ "total_time_in_millis": 256,
+ "external_total": 9,
+ "external_total_time_in_millis": 258,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 6,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 3,
+ "memory_in_bytes": 12849,
+ "terms_memory_in_bytes": 10580,
+ "stored_fields_memory_in_bytes": 904,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 1152,
+ "points_memory_in_bytes": 9,
+ "doc_values_memory_in_bytes": 204,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 999,
+ "size_in_bytes": 226444,
+ "uncommitted_operations": 999,
+ "uncommitted_size_in_bytes": 226444,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ },
+ "total": {
+ "docs": {
+ "count": 1998,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 535000
+ },
+ "indexing": {
+ "index_total": 1998,
+ "index_time_in_millis": 793,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 125829120
+ },
+ "refresh": {
+ "total": 18,
+ "total_time_in_millis": 518,
+ "external_total": 18,
+ "external_total_time_in_millis": 522,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 12,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 6,
+ "memory_in_bytes": 25698,
+ "terms_memory_in_bytes": 21160,
+ "stored_fields_memory_in_bytes": 1808,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 2304,
+ "points_memory_in_bytes": 18,
+ "doc_values_memory_in_bytes": 408,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 1998,
+ "size_in_bytes": 452888,
+ "uncommitted_operations": 1998,
+ "uncommitted_size_in_bytes": 452888,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ }
+ },
+ "indices": {
+ "twitter": {
+ "uuid": "AtNrbbl_QhirW0p7Fnq26A",
+ "primaries": {
+ "docs": {
+ "count": 999,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 267500
+ },
+ "indexing": {
+ "index_total": 999,
+ "index_time_in_millis": 548,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 62914560
+ },
+ "refresh": {
+ "total": 9,
+ "total_time_in_millis": 256,
+ "external_total": 9,
+ "external_total_time_in_millis": 258,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 6,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 3,
+ "memory_in_bytes": 12849,
+ "terms_memory_in_bytes": 10580,
+ "stored_fields_memory_in_bytes": 904,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 1152,
+ "points_memory_in_bytes": 9,
+ "doc_values_memory_in_bytes": 204,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 999,
+ "size_in_bytes": 226444,
+ "uncommitted_operations": 999,
+ "uncommitted_size_in_bytes": 226444,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ },
+ "total": {
+ "docs": {
+ "count": 1998,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 535000
+ },
+ "indexing": {
+ "index_total": 1998,
+ "index_time_in_millis": 793,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 125829120
+ },
+ "refresh": {
+ "total": 18,
+ "total_time_in_millis": 518,
+ "external_total": 18,
+ "external_total_time_in_millis": 522,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 12,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 6,
+ "memory_in_bytes": 25698,
+ "terms_memory_in_bytes": 21160,
+ "stored_fields_memory_in_bytes": 1808,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 2304,
+ "points_memory_in_bytes": 18,
+ "doc_values_memory_in_bytes": 408,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 1998,
+ "size_in_bytes": 452888,
+ "uncommitted_operations": 1998,
+ "uncommitted_size_in_bytes": 452888,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ }
+ },
+ "shards": {
+ "0": [
+ {
+ "routing": {
+ "state": "STARTED",
+ "primary": true,
+ "node": "oqvR8I1dTpONvwRM30etww",
+ "relocating_node": null
+ },
+ "docs": {
+ "count": 340,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 90564
+ },
+ "indexing": {
+ "index_total": 340,
+ "index_time_in_millis": 176,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 20971520
+ },
+ "refresh": {
+ "total": 6,
+ "total_time_in_millis": 103,
+ "external_total": 4,
+ "external_total_time_in_millis": 105,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 1,
+ "periodic": 0,
+ "total_time_in_millis": 32
+ },
+ "warmer": {
+ "current": 0,
+ "total": 3,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 1,
+ "memory_in_bytes": 4301,
+ "terms_memory_in_bytes": 3534,
+ "stored_fields_memory_in_bytes": 312,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 384,
+ "points_memory_in_bytes": 3,
+ "doc_values_memory_in_bytes": 68,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 340,
+ "size_in_bytes": 77158,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 55,
+ "earliest_last_modified_age": 936870
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ },
+ "commit": {
+ "id": "13gxQDHZ96BnNkzSgEdElQ==",
+ "generation": 4,
+ "user_data": {
+ "local_checkpoint": "339",
+ "max_unsafe_auto_id_timestamp": "-1",
+ "min_retained_seq_no": "340",
+ "translog_uuid": "4rp02VCQRTSJXgochWk3Hg",
+ "history_uuid": "-od5QvNmQlero8jatbG-5w",
+ "sync_id": "KKglZYafSaWN_MFUbpNviA",
+ "translog_generation": "3",
+ "max_seq_no": "339"
+ },
+ "num_docs": 340
+ },
+ "seq_no": {
+ "max_seq_no": 339,
+ "local_checkpoint": 339,
+ "global_checkpoint": 339
+ },
+ "retention_leases": {
+ "primary_term": 1,
+ "version": 0,
+ "leases": []
+ },
+ "shard_path": {
+ "state_path": "/usr/share/elasticsearch/data/nodes/0",
+ "data_path": "/usr/share/elasticsearch/data/nodes/0",
+ "is_custom_data_path": false
+ }
+ },
+ {
+ "routing": {
+ "state": "STARTED",
+ "primary": false,
+ "node": "0jfDeZxuTsGblcDGa39DzQ",
+ "relocating_node": null
+ },
+ "docs": {
+ "count": 340,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 90564
+ },
+ "indexing": {
+ "index_total": 340,
+ "index_time_in_millis": 99,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 20971520
+ },
+ "refresh": {
+ "total": 6,
+ "total_time_in_millis": 139,
+ "external_total": 4,
+ "external_total_time_in_millis": 140,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 1,
+ "periodic": 0,
+ "total_time_in_millis": 34
+ },
+ "warmer": {
+ "current": 0,
+ "total": 3,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 1,
+ "memory_in_bytes": 4301,
+ "terms_memory_in_bytes": 3534,
+ "stored_fields_memory_in_bytes": 312,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 384,
+ "points_memory_in_bytes": 3,
+ "doc_values_memory_in_bytes": 68,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 340,
+ "size_in_bytes": 77158,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 55,
+ "earliest_last_modified_age": 936653
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ },
+ "commit": {
+ "id": "A8QO9SiMWYX000riUOApBg==",
+ "generation": 5,
+ "user_data": {
+ "local_checkpoint": "339",
+ "max_unsafe_auto_id_timestamp": "-1",
+ "min_retained_seq_no": "340",
+ "translog_uuid": "9kWpEKQyQ3yIUwwEp4fP8A",
+ "history_uuid": "-od5QvNmQlero8jatbG-5w",
+ "sync_id": "KKglZYafSaWN_MFUbpNviA",
+ "translog_generation": "3",
+ "max_seq_no": "339"
+ },
+ "num_docs": 340
+ },
+ "seq_no": {
+ "max_seq_no": 339,
+ "local_checkpoint": 339,
+ "global_checkpoint": 339
+ },
+ "retention_leases": {
+ "primary_term": 1,
+ "version": 0,
+ "leases": []
+ },
+ "shard_path": {
+ "state_path": "/usr/share/elasticsearch/data/nodes/0",
+ "data_path": "/usr/share/elasticsearch/data/nodes/0",
+ "is_custom_data_path": false
+ }
+ }
+ ],
+ "1": [
+ {
+ "routing": {
+ "state": "STARTED",
+ "primary": false,
+ "node": "oqvR8I1dTpONvwRM30etww",
+ "relocating_node": null
+ },
+ "docs": {
+ "count": 352,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 94584
+ },
+ "indexing": {
+ "index_total": 352,
+ "index_time_in_millis": 66,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 20971520
+ },
+ "refresh": {
+ "total": 6,
+ "total_time_in_millis": 104,
+ "external_total": 4,
+ "external_total_time_in_millis": 106,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 1,
+ "periodic": 0,
+ "total_time_in_millis": 26
+ },
+ "warmer": {
+ "current": 0,
+ "total": 3,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 1,
+ "memory_in_bytes": 4280,
+ "terms_memory_in_bytes": 3529,
+ "stored_fields_memory_in_bytes": 296,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 384,
+ "points_memory_in_bytes": 3,
+ "doc_values_memory_in_bytes": 68,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 352,
+ "size_in_bytes": 79980,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 55,
+ "earliest_last_modified_age": 936144
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ },
+ "commit": {
+ "id": "13gxQDHZ96BnNkzSgEdEkg==",
+ "generation": 5,
+ "user_data": {
+ "local_checkpoint": "351",
+ "max_unsafe_auto_id_timestamp": "-1",
+ "min_retained_seq_no": "352",
+ "translog_uuid": "SjKxb5TIRqCinxWbqVBo-g",
+ "history_uuid": "3SAavs9KTPm-jhaioYg4UA",
+ "sync_id": "swZVzk6tShS0tcbBQt9AjA",
+ "translog_generation": "3",
+ "max_seq_no": "351"
+ },
+ "num_docs": 352
+ },
+ "seq_no": {
+ "max_seq_no": 351,
+ "local_checkpoint": 351,
+ "global_checkpoint": 351
+ },
+ "retention_leases": {
+ "primary_term": 1,
+ "version": 0,
+ "leases": []
+ },
+ "shard_path": {
+ "state_path": "/usr/share/elasticsearch/data/nodes/0",
+ "data_path": "/usr/share/elasticsearch/data/nodes/0",
+ "is_custom_data_path": false
+ }
+ },
+ {
+ "routing": {
+ "state": "STARTED",
+ "primary": true,
+ "node": "0jfDeZxuTsGblcDGa39DzQ",
+ "relocating_node": null
+ },
+ "docs": {
+ "count": 352,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 94584
+ },
+ "indexing": {
+ "index_total": 352,
+ "index_time_in_millis": 154,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 20971520
+ },
+ "refresh": {
+ "total": 6,
+ "total_time_in_millis": 74,
+ "external_total": 4,
+ "external_total_time_in_millis": 74,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 1,
+ "periodic": 0,
+ "total_time_in_millis": 29
+ },
+ "warmer": {
+ "current": 0,
+ "total": 3,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 1,
+ "memory_in_bytes": 4280,
+ "terms_memory_in_bytes": 3529,
+ "stored_fields_memory_in_bytes": 296,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 384,
+ "points_memory_in_bytes": 3,
+ "doc_values_memory_in_bytes": 68,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 352,
+ "size_in_bytes": 79980,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 55,
+ "earliest_last_modified_age": 936839
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ },
+ "commit": {
+ "id": "A8QO9SiMWYX000riUOApAw==",
+ "generation": 4,
+ "user_data": {
+ "local_checkpoint": "351",
+ "max_unsafe_auto_id_timestamp": "-1",
+ "min_retained_seq_no": "352",
+ "translog_uuid": "GpauXMbxQpWKUYGYqQUIdQ",
+ "history_uuid": "3SAavs9KTPm-jhaioYg4UA",
+ "sync_id": "swZVzk6tShS0tcbBQt9AjA",
+ "translog_generation": "3",
+ "max_seq_no": "351"
+ },
+ "num_docs": 352
+ },
+ "seq_no": {
+ "max_seq_no": 351,
+ "local_checkpoint": 351,
+ "global_checkpoint": 351
+ },
+ "retention_leases": {
+ "primary_term": 1,
+ "version": 0,
+ "leases": []
+ },
+ "shard_path": {
+ "state_path": "/usr/share/elasticsearch/data/nodes/0",
+ "data_path": "/usr/share/elasticsearch/data/nodes/0",
+ "is_custom_data_path": false
+ }
+ }
+ ],
+ "2": [
+ {
+ "routing": {
+ "state": "STARTED",
+ "primary": true,
+ "node": "oqvR8I1dTpONvwRM30etww",
+ "relocating_node": null
+ },
+ "docs": {
+ "count": 307,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 82727
+ },
+ "indexing": {
+ "index_total": 307,
+ "index_time_in_millis": 218,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 20971520
+ },
+ "refresh": {
+ "total": 6,
+ "total_time_in_millis": 86,
+ "external_total": 4,
+ "external_total_time_in_millis": 87,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 1,
+ "periodic": 0,
+ "total_time_in_millis": 33
+ },
+ "warmer": {
+ "current": 0,
+ "total": 3,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 1,
+ "memory_in_bytes": 4268,
+ "terms_memory_in_bytes": 3517,
+ "stored_fields_memory_in_bytes": 296,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 384,
+ "points_memory_in_bytes": 3,
+ "doc_values_memory_in_bytes": 68,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 307,
+ "size_in_bytes": 69471,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 55,
+ "earliest_last_modified_age": 936881
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ },
+ "commit": {
+ "id": "13gxQDHZ96BnNkzSgEdElg==",
+ "generation": 4,
+ "user_data": {
+ "local_checkpoint": "306",
+ "max_unsafe_auto_id_timestamp": "-1",
+ "min_retained_seq_no": "307",
+ "translog_uuid": "Y0a3bdIQTD2Ir6Ex9J3gSQ",
+ "history_uuid": "WmsCMyRyRaGz9mnR50wYFA",
+ "sync_id": "nvNppgfgTp63llS8r-Pwiw",
+ "translog_generation": "3",
+ "max_seq_no": "306"
+ },
+ "num_docs": 307
+ },
+ "seq_no": {
+ "max_seq_no": 306,
+ "local_checkpoint": 306,
+ "global_checkpoint": 306
+ },
+ "retention_leases": {
+ "primary_term": 1,
+ "version": 0,
+ "leases": []
+ },
+ "shard_path": {
+ "state_path": "/usr/share/elasticsearch/data/nodes/0",
+ "data_path": "/usr/share/elasticsearch/data/nodes/0",
+ "is_custom_data_path": false
+ }
+ },
+ {
+ "routing": {
+ "state": "STARTED",
+ "primary": false,
+ "node": "0jfDeZxuTsGblcDGa39DzQ",
+ "relocating_node": null
+ },
+ "docs": {
+ "count": 307,
+ "deleted": 0
+ },
+ "store": {
+ "size_in_bytes": 82727
+ },
+ "indexing": {
+ "index_total": 307,
+ "index_time_in_millis": 80,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 20971520
+ },
+ "refresh": {
+ "total": 6,
+ "total_time_in_millis": 33,
+ "external_total": 4,
+ "external_total_time_in_millis": 30,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 1,
+ "periodic": 0,
+ "total_time_in_millis": 37
},
- "replication":{
- "min":0.0,
- "max":0.0,
- "avg":0.0
+ "warmer": {
+ "current": 0,
+ "total": 3,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 1,
+ "memory_in_bytes": 4268,
+ "terms_memory_in_bytes": 3517,
+ "stored_fields_memory_in_bytes": 296,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 384,
+ "points_memory_in_bytes": 3,
+ "doc_values_memory_in_bytes": 68,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -1,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 307,
+ "size_in_bytes": 69471,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 55,
+ "earliest_last_modified_age": 936696
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ },
+ "commit": {
+ "id": "A8QO9SiMWYX000riUOApBw==",
+ "generation": 5,
+ "user_data": {
+ "local_checkpoint": "306",
+ "max_unsafe_auto_id_timestamp": "-1",
+ "min_retained_seq_no": "307",
+ "translog_uuid": "s62inR7FRA2p86axtAIvgA",
+ "history_uuid": "WmsCMyRyRaGz9mnR50wYFA",
+ "sync_id": "nvNppgfgTp63llS8r-Pwiw",
+ "translog_generation": "3",
+ "max_seq_no": "306"
+ },
+ "num_docs": 307
+ },
+ "seq_no": {
+ "max_seq_no": 306,
+ "local_checkpoint": 306,
+ "global_checkpoint": 306
+ },
+ "retention_leases": {
+ "primary_term": 1,
+ "version": 0,
+ "leases": []
+ },
+ "shard_path": {
+ "state_path": "/usr/share/elasticsearch/data/nodes/0",
+ "data_path": "/usr/share/elasticsearch/data/nodes/0",
+ "is_custom_data_path": false
}
- }
- },
- "docs":{
- "count":4,
- "deleted":0
- },
- "store":{
- "size_in_bytes":17084,
- "throttle_time_in_millis":0
- },
- "fielddata":{
- "memory_size_in_bytes":0,
- "evictions":0
- },
- "query_cache":{
- "memory_size_in_bytes":0,
- "total_count":0,
- "hit_count":0,
- "miss_count":0,
- "cache_size":0,
- "cache_count":0,
- "evictions":0
- },
- "completion":{
- "size_in_bytes":0
- },
- "segments":{
- "count":4,
- "memory_in_bytes":11828,
- "terms_memory_in_bytes":8932,
- "stored_fields_memory_in_bytes":1248,
- "term_vectors_memory_in_bytes":0,
- "norms_memory_in_bytes":1280,
- "doc_values_memory_in_bytes":368,
- "index_writer_memory_in_bytes":0,
- "index_writer_max_memory_in_bytes":2048000,
- "version_map_memory_in_bytes":0,
- "fixed_bit_set_memory_in_bytes":0
- },
- "percolate":{
- "total":0,
- "time_in_millis":0,
- "current":0,
- "memory_size_in_bytes":-1,
- "memory_size":"-1b",
- "queries":0
+ }
+ ]
}
- },
- "nodes":{
- "count":{
- "total":1,
- "master_only":0,
- "data_only":0,
- "master_data":1,
- "client":0
- },
- "versions":[
- {
- "version": "2.3.3"
- }
- ],
- "os":{
- "available_processors":1,
- "allocated_processors":1,
- "mem":{
- "total_in_bytes":593301504
- },
- "names":[
- {
- "name":"Linux",
- "count":1
- }
- ]
- },
- "process":{
- "cpu":{
- "percent":0
- },
- "open_file_descriptors":{
- "min":145,
- "max":145,
- "avg":145
- }
- },
- "jvm":{
- "max_uptime_in_millis":11580527,
- "versions":[
- {
- "version":"1.8.0_101",
- "vm_name":"OpenJDK 64-Bit Server VM",
- "vm_version":"25.101-b13",
- "vm_vendor":"Oracle Corporation",
- "count":1
- }
- ],
- "mem":{
- "heap_used_in_bytes":70550288,
- "heap_max_in_bytes":1065025536
- },
- "threads":30
- },
- "fs":{
- "total_in_bytes":8318783488,
- "free_in_bytes":6447439872,
- "available_in_bytes":6344785920
- },
- "plugins":[
- {
- "name":"cloud-aws",
- "version":"2.3.3",
- "description":"The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
- "jvm":true,
- "classname":"org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
- "isolated":true,
- "site":false
- },
- {
- "name":"kopf",
- "version":"2.0.1",
- "description":"kopf - simple web administration tool for Elasticsearch",
- "url":"/_plugin/kopf/",
- "jvm":false,
- "site":true
- },
- {
- "name":"tr-metrics",
- "version":"7bd5b4b",
- "description":"Logs cluster and node stats for performance monitoring.",
- "jvm":true,
- "classname":"com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
- "isolated":true,
- "site":false
- }
- ]
- }
-}
-`
+ }
+ }
+}`
-var clusterstatsIndicesExpected = map[string]interface{}{
- "completion_size_in_bytes": float64(0),
- "count": float64(1),
- "docs_count": float64(4),
- "docs_deleted": float64(0),
- "fielddata_evictions": float64(0),
- "fielddata_memory_size_in_bytes": float64(0),
- "percolate_current": float64(0),
- "percolate_memory_size_in_bytes": float64(-1),
- "percolate_queries": float64(0),
- "percolate_time_in_millis": float64(0),
- "percolate_total": float64(0),
- "percolate_memory_size": "-1b",
- "query_cache_cache_count": float64(0),
- "query_cache_cache_size": float64(0),
- "query_cache_evictions": float64(0),
- "query_cache_hit_count": float64(0),
- "query_cache_memory_size_in_bytes": float64(0),
- "query_cache_miss_count": float64(0),
- "query_cache_total_count": float64(0),
- "segments_count": float64(4),
- "segments_doc_values_memory_in_bytes": float64(368),
- "segments_fixed_bit_set_memory_in_bytes": float64(0),
- "segments_index_writer_max_memory_in_bytes": float64(2.048e+06),
- "segments_index_writer_memory_in_bytes": float64(0),
- "segments_memory_in_bytes": float64(11828),
- "segments_norms_memory_in_bytes": float64(1280),
- "segments_stored_fields_memory_in_bytes": float64(1248),
- "segments_term_vectors_memory_in_bytes": float64(0),
- "segments_terms_memory_in_bytes": float64(8932),
- "segments_version_map_memory_in_bytes": float64(0),
- "shards_index_primaries_avg": float64(4),
- "shards_index_primaries_max": float64(4),
- "shards_index_primaries_min": float64(4),
- "shards_index_replication_avg": float64(0),
- "shards_index_replication_max": float64(0),
- "shards_index_replication_min": float64(0),
- "shards_index_shards_avg": float64(4),
- "shards_index_shards_max": float64(4),
- "shards_index_shards_min": float64(4),
- "shards_primaries": float64(4),
- "shards_replication": float64(0),
- "shards_total": float64(4),
- "store_size_in_bytes": float64(17084),
- "store_throttle_time_in_millis": float64(0),
+var clusterIndicesPrimaryShardsExpected = map[string]interface{}{
+ "commit_generation": float64(4),
+ "commit_num_docs": float64(340),
+ "completion_size_in_bytes": float64(0),
+ "docs_count": float64(340),
+ "docs_deleted": float64(0),
+ "fielddata_evictions": float64(0),
+ "fielddata_memory_size_in_bytes": float64(0),
+ "flush_periodic": float64(0),
+ "flush_total": float64(1),
+ "flush_total_time_in_millis": float64(32),
+ "get_current": float64(0),
+ "get_exists_time_in_millis": float64(0),
+ "get_exists_total": float64(0),
+ "get_missing_time_in_millis": float64(0),
+ "get_missing_total": float64(0),
+ "get_time_in_millis": float64(0),
+ "get_total": float64(0),
+ "indexing_delete_current": float64(0),
+ "indexing_delete_time_in_millis": float64(0),
+ "indexing_delete_total": float64(0),
+ "indexing_index_current": float64(0),
+ "indexing_index_failed": float64(0),
+ "indexing_index_time_in_millis": float64(176),
+ "indexing_index_total": float64(340),
+ "indexing_noop_update_total": float64(0),
+ "indexing_throttle_time_in_millis": float64(0),
+ "merges_current": float64(0),
+ "merges_current_docs": float64(0),
+ "merges_current_size_in_bytes": float64(0),
+ "merges_total": float64(0),
+ "merges_total_auto_throttle_in_bytes": float64(2.097152e+07),
+ "merges_total_docs": float64(0),
+ "merges_total_size_in_bytes": float64(0),
+ "merges_total_stopped_time_in_millis": float64(0),
+ "merges_total_throttled_time_in_millis": float64(0),
+ "merges_total_time_in_millis": float64(0),
+ "query_cache_cache_count": float64(0),
+ "query_cache_cache_size": float64(0),
+ "query_cache_evictions": float64(0),
+ "query_cache_hit_count": float64(0),
+ "query_cache_memory_size_in_bytes": float64(0),
+ "query_cache_miss_count": float64(0),
+ "query_cache_total_count": float64(0),
+ "recovery_current_as_source": float64(0),
+ "recovery_current_as_target": float64(0),
+ "recovery_throttle_time_in_millis": float64(0),
+ "refresh_external_total": float64(4),
+ "refresh_external_total_time_in_millis": float64(105),
+ "refresh_listeners": float64(0),
+ "refresh_total": float64(6),
+ "refresh_total_time_in_millis": float64(103),
+ "request_cache_evictions": float64(0),
+ "request_cache_hit_count": float64(0),
+ "request_cache_memory_size_in_bytes": float64(0),
+ "request_cache_miss_count": float64(0),
+ "retention_leases_primary_term": float64(1),
+ "retention_leases_version": float64(0),
+ "routing_state": int(3),
+ "search_fetch_current": float64(0),
+ "search_fetch_time_in_millis": float64(0),
+ "search_fetch_total": float64(0),
+ "search_open_contexts": float64(0),
+ "search_query_current": float64(0),
+ "search_query_time_in_millis": float64(0),
+ "search_query_total": float64(0),
+ "search_scroll_current": float64(0),
+ "search_scroll_time_in_millis": float64(0),
+ "search_scroll_total": float64(0),
+ "search_suggest_current": float64(0),
+ "search_suggest_time_in_millis": float64(0),
+ "search_suggest_total": float64(0),
+ "segments_count": float64(1),
+ "segments_doc_values_memory_in_bytes": float64(68),
+ "segments_fixed_bit_set_memory_in_bytes": float64(0),
+ "segments_index_writer_memory_in_bytes": float64(0),
+ "segments_max_unsafe_auto_id_timestamp": float64(-1),
+ "segments_memory_in_bytes": float64(4301),
+ "segments_norms_memory_in_bytes": float64(384),
+ "segments_points_memory_in_bytes": float64(3),
+ "segments_stored_fields_memory_in_bytes": float64(312),
+ "segments_term_vectors_memory_in_bytes": float64(0),
+ "segments_terms_memory_in_bytes": float64(3534),
+ "segments_version_map_memory_in_bytes": float64(0),
+ "seq_no_global_checkpoint": float64(339),
+ "seq_no_local_checkpoint": float64(339),
+ "seq_no_max_seq_no": float64(339),
+ "store_size_in_bytes": float64(90564),
+ "translog_earliest_last_modified_age": float64(936870),
+ "translog_operations": float64(340),
+ "translog_size_in_bytes": float64(77158),
+ "translog_uncommitted_operations": float64(0),
+ "translog_uncommitted_size_in_bytes": float64(55),
+ "warmer_current": float64(0),
+ "warmer_total": float64(3),
+ "warmer_total_time_in_millis": float64(0),
}
-var clusterstatsNodesExpected = map[string]interface{}{
- "count_client": float64(0),
- "count_data_only": float64(0),
- "count_master_data": float64(1),
- "count_master_only": float64(0),
- "count_total": float64(1),
- "fs_available_in_bytes": float64(6.34478592e+09),
- "fs_free_in_bytes": float64(6.447439872e+09),
- "fs_total_in_bytes": float64(8.318783488e+09),
- "jvm_max_uptime_in_millis": float64(1.1580527e+07),
- "jvm_mem_heap_max_in_bytes": float64(1.065025536e+09),
- "jvm_mem_heap_used_in_bytes": float64(7.0550288e+07),
- "jvm_threads": float64(30),
- "jvm_versions_0_count": float64(1),
- "jvm_versions_0_version": "1.8.0_101",
- "jvm_versions_0_vm_name": "OpenJDK 64-Bit Server VM",
- "jvm_versions_0_vm_vendor": "Oracle Corporation",
- "jvm_versions_0_vm_version": "25.101-b13",
- "os_allocated_processors": float64(1),
- "os_available_processors": float64(1),
- "os_mem_total_in_bytes": float64(5.93301504e+08),
- "os_names_0_count": float64(1),
- "os_names_0_name": "Linux",
- "process_cpu_percent": float64(0),
- "process_open_file_descriptors_avg": float64(145),
- "process_open_file_descriptors_max": float64(145),
- "process_open_file_descriptors_min": float64(145),
- "versions_0_version": "2.3.3",
- "plugins_0_classname": "org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
- "plugins_0_description": "The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
- "plugins_0_isolated": true,
- "plugins_0_jvm": true,
- "plugins_0_name": "cloud-aws",
- "plugins_0_site": false,
- "plugins_0_version": "2.3.3",
- "plugins_1_description": "kopf - simple web administration tool for Elasticsearch",
- "plugins_1_jvm": false,
- "plugins_1_name": "kopf",
- "plugins_1_site": true,
- "plugins_1_url": "/_plugin/kopf/",
- "plugins_1_version": "2.0.1",
- "plugins_2_classname": "com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
- "plugins_2_description": "Logs cluster and node stats for performance monitoring.",
- "plugins_2_isolated": true,
- "plugins_2_jvm": true,
- "plugins_2_name": "tr-metrics",
- "plugins_2_site": false,
- "plugins_2_version": "7bd5b4b",
+var clusterIndicesReplicaShardsExpected = map[string]interface{}{
+ "commit_generation": float64(5),
+ "commit_num_docs": float64(352),
+ "completion_size_in_bytes": float64(0),
+ "docs_count": float64(352),
+ "docs_deleted": float64(0),
+ "fielddata_evictions": float64(0),
+ "fielddata_memory_size_in_bytes": float64(0),
+ "flush_periodic": float64(0),
+ "flush_total": float64(1),
+ "flush_total_time_in_millis": float64(26),
+ "get_current": float64(0),
+ "get_exists_time_in_millis": float64(0),
+ "get_exists_total": float64(0),
+ "get_missing_time_in_millis": float64(0),
+ "get_missing_total": float64(0),
+ "get_time_in_millis": float64(0),
+ "get_total": float64(0),
+ "indexing_delete_current": float64(0),
+ "indexing_delete_time_in_millis": float64(0),
+ "indexing_delete_total": float64(0),
+ "indexing_index_current": float64(0),
+ "indexing_index_failed": float64(0),
+ "indexing_index_time_in_millis": float64(66),
+ "indexing_index_total": float64(352),
+ "indexing_noop_update_total": float64(0),
+ "indexing_throttle_time_in_millis": float64(0),
+ "merges_current": float64(0),
+ "merges_current_docs": float64(0),
+ "merges_current_size_in_bytes": float64(0),
+ "merges_total": float64(0),
+ "merges_total_auto_throttle_in_bytes": float64(20971520),
+ "merges_total_docs": float64(0),
+ "merges_total_size_in_bytes": float64(0),
+ "merges_total_stopped_time_in_millis": float64(0),
+ "merges_total_throttled_time_in_millis": float64(0),
+ "merges_total_time_in_millis": float64(0),
+ "query_cache_cache_count": float64(0),
+ "query_cache_cache_size": float64(0),
+ "query_cache_evictions": float64(0),
+ "query_cache_hit_count": float64(0),
+ "query_cache_memory_size_in_bytes": float64(0),
+ "query_cache_miss_count": float64(0),
+ "query_cache_total_count": float64(0),
+ "recovery_current_as_source": float64(0),
+ "recovery_current_as_target": float64(0),
+ "recovery_throttle_time_in_millis": float64(0),
+ "refresh_external_total": float64(4),
+ "refresh_external_total_time_in_millis": float64(106),
+ "refresh_listeners": float64(0),
+ "refresh_total": float64(6),
+ "refresh_total_time_in_millis": float64(104),
+ "request_cache_evictions": float64(0),
+ "request_cache_hit_count": float64(0),
+ "request_cache_memory_size_in_bytes": float64(0),
+ "request_cache_miss_count": float64(0),
+ "retention_leases_primary_term": float64(1),
+ "retention_leases_version": float64(0),
+ "routing_state": int(3),
+ "search_fetch_current": float64(0),
+ "search_fetch_time_in_millis": float64(0),
+ "search_fetch_total": float64(0),
+ "search_open_contexts": float64(0),
+ "search_query_current": float64(0),
+ "search_query_time_in_millis": float64(0),
+ "search_query_total": float64(0),
+ "search_scroll_current": float64(0),
+ "search_scroll_time_in_millis": float64(0),
+ "search_scroll_total": float64(0),
+ "search_suggest_current": float64(0),
+ "search_suggest_time_in_millis": float64(0),
+ "search_suggest_total": float64(0),
+ "segments_count": float64(1),
+ "segments_doc_values_memory_in_bytes": float64(68),
+ "segments_fixed_bit_set_memory_in_bytes": float64(0),
+ "segments_index_writer_memory_in_bytes": float64(0),
+ "segments_max_unsafe_auto_id_timestamp": float64(-1),
+ "segments_memory_in_bytes": float64(4280),
+ "segments_norms_memory_in_bytes": float64(384),
+ "segments_points_memory_in_bytes": float64(3),
+ "segments_stored_fields_memory_in_bytes": float64(296),
+ "segments_term_vectors_memory_in_bytes": float64(0),
+ "segments_terms_memory_in_bytes": float64(3529),
+ "segments_version_map_memory_in_bytes": float64(0),
+ "seq_no_global_checkpoint": float64(351),
+ "seq_no_local_checkpoint": float64(351),
+ "seq_no_max_seq_no": float64(351),
+ "store_size_in_bytes": float64(94584),
+ "translog_earliest_last_modified_age": float64(936144),
+ "translog_operations": float64(352),
+ "translog_size_in_bytes": float64(79980),
+ "translog_uncommitted_operations": float64(0),
+ "translog_uncommitted_size_in_bytes": float64(55),
+ "warmer_current": float64(0),
+ "warmer_total": float64(3),
+ "warmer_total_time_in_millis": float64(0),
}
-
-const IsMasterResult = "SDFsfSDFsdfFSDSDfSFDSDF 10.206.124.66 10.206.124.66 test.host.com "
-
-const IsNotMasterResult = "junk 10.206.124.66 10.206.124.66 test.junk.com "
diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md
new file mode 100644
index 0000000000000..3f397cdfbe36f
--- /dev/null
+++ b/plugins/inputs/ethtool/README.md
@@ -0,0 +1,33 @@
+# Ethtool Input Plugin
+
+The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver
+
+### Configuration:
+
+```toml
+# Returns ethtool statistics for given interfaces
+[[inputs.ethtool]]
+ ## List of interfaces to pull metrics for
+ # interface_include = ["eth0"]
+
+ ## List of interfaces to ignore when pulling metrics.
+ # interface_exclude = ["eth1"]
+```
+
+Interfaces can be included or ignored using
+
+- `interface_include`
+- `interface_exclude`
+
+Note that loopback interfaces will be automatically ignored
+
+### Metrics:
+
+Metrics are dependant on the network device and driver
+
+### Example Output:
+
+```
+ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000
+ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000
+```
diff --git a/plugins/inputs/ethtool/ethtool.go b/plugins/inputs/ethtool/ethtool.go
new file mode 100644
index 0000000000000..3f8f8e15618a2
--- /dev/null
+++ b/plugins/inputs/ethtool/ethtool.go
@@ -0,0 +1,50 @@
+package ethtool
+
+import (
+ "net"
+
+ "github.com/influxdata/telegraf"
+)
+
+type Command interface {
+ Init() error
+ DriverName(intf string) (string, error)
+ Interfaces() ([]net.Interface, error)
+ Stats(intf string) (map[string]uint64, error)
+}
+
+type Ethtool struct {
+ // This is the list of interface names to include
+ InterfaceInclude []string `toml:"interface_include"`
+
+ // This is the list of interface names to ignore
+ InterfaceExclude []string `toml:"interface_exclude"`
+
+ Log telegraf.Logger `toml:"-"`
+
+ // the ethtool command
+ command Command
+}
+
+const (
+ pluginName = "ethtool"
+ tagInterface = "interface"
+ tagDriverName = "driver"
+
+ sampleConfig = `
+ ## List of interfaces to pull metrics for
+ # interface_include = ["eth0"]
+
+ ## List of interfaces to ignore when pulling metrics.
+ # interface_exclude = ["eth1"]
+`
+)
+
+func (e *Ethtool) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description returns a one-sentence description on the Input
+func (e *Ethtool) Description() string {
+ return "Returns ethtool statistics for given interfaces"
+}
diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go
new file mode 100644
index 0000000000000..b8c9312cbe309
--- /dev/null
+++ b/plugins/inputs/ethtool/ethtool_linux.go
@@ -0,0 +1,136 @@
+// +build linux
+
+package ethtool
+
+import (
+ "net"
+ "sync"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/pkg/errors"
+ "github.com/safchain/ethtool"
+)
+
+type CommandEthtool struct {
+ ethtool *ethtool.Ethtool
+}
+
+func (e *Ethtool) Gather(acc telegraf.Accumulator) error {
+
+ // Get the list of interfaces
+ interfaces, err := e.command.Interfaces()
+ if err != nil {
+ acc.AddError(err)
+ return nil
+ }
+
+ interfaceFilter, err := filter.NewIncludeExcludeFilter(e.InterfaceInclude, e.InterfaceExclude)
+ if err != nil {
+ return err
+ }
+
+ // parallelize the ethtool call in event of many interfaces
+ var wg sync.WaitGroup
+
+ for _, iface := range interfaces {
+
+ // Check this isn't a loop back and that its matched by the filter
+ if (iface.Flags&net.FlagLoopback == 0) && interfaceFilter.Match(iface.Name) {
+ wg.Add(1)
+
+ go func(i net.Interface) {
+ e.gatherEthtoolStats(i, acc)
+ wg.Done()
+ }(iface)
+ }
+ }
+
+ // Waiting for all the interfaces
+ wg.Wait()
+ return nil
+}
+
+// Initialise the Command Tool
+func (e *Ethtool) Init() error {
+ return e.command.Init()
+}
+
+// Gather the stats for the interface.
+func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulator) {
+
+ tags := make(map[string]string)
+ tags[tagInterface] = iface.Name
+
+ driverName, err := e.command.DriverName(iface.Name)
+ if err != nil {
+ driverErr := errors.Wrapf(err, "%s driver", iface.Name)
+ acc.AddError(driverErr)
+ return
+ }
+
+ tags[tagDriverName] = driverName
+
+ fields := make(map[string]interface{})
+ stats, err := e.command.Stats(iface.Name)
+ if err != nil {
+ statsErr := errors.Wrapf(err, "%s stats", iface.Name)
+ acc.AddError(statsErr)
+ return
+ }
+
+ for k, v := range stats {
+ fields[k] = v
+ }
+
+ acc.AddFields(pluginName, fields, tags)
+}
+
+func NewCommandEthtool() *CommandEthtool {
+ return &CommandEthtool{}
+}
+
+func (c *CommandEthtool) Init() error {
+
+ if c.ethtool != nil {
+ return nil
+ }
+
+ e, err := ethtool.NewEthtool()
+ if err == nil {
+ c.ethtool = e
+ }
+
+ return err
+}
+
+func (c *CommandEthtool) DriverName(intf string) (string, error) {
+ return c.ethtool.DriverName(intf)
+}
+
+func (c *CommandEthtool) Stats(intf string) (map[string]uint64, error) {
+ return c.ethtool.Stats(intf)
+}
+
+func (c *CommandEthtool) Interfaces() ([]net.Interface, error) {
+
+ // Get the list of interfaces
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+
+ return interfaces, nil
+}
+
+func init() {
+
+ inputs.Add(pluginName, func() telegraf.Input {
+ return &Ethtool{
+ InterfaceInclude: []string{},
+ InterfaceExclude: []string{},
+ command: NewCommandEthtool(),
+ }
+ })
+}
diff --git a/plugins/inputs/ethtool/ethtool_notlinux.go b/plugins/inputs/ethtool/ethtool_notlinux.go
new file mode 100644
index 0000000000000..b022e0a46bb72
--- /dev/null
+++ b/plugins/inputs/ethtool/ethtool_notlinux.go
@@ -0,0 +1,23 @@
+// +build !linux
+
+package ethtool
+
+import (
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+func (e *Ethtool) Init() error {
+ e.Log.Warn("Current platform is not supported")
+ return nil
+}
+
+func (e *Ethtool) Gather(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func init() {
+ inputs.Add(pluginName, func() telegraf.Input {
+ return &Ethtool{}
+ })
+}
diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go
new file mode 100644
index 0000000000000..d281644a51ed0
--- /dev/null
+++ b/plugins/inputs/ethtool/ethtool_test.go
@@ -0,0 +1,381 @@
+// +build linux
+
+package ethtool
+
+import (
+ "net"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+)
+
+var command *Ethtool
+var interfaceMap map[string]*InterfaceMock
+
+type InterfaceMock struct {
+ Name string
+ DriverName string
+ Stat map[string]uint64
+ LoopBack bool
+}
+
+type CommandEthtoolMock struct {
+ InterfaceMap map[string]*InterfaceMock
+}
+
+func (c *CommandEthtoolMock) Init() error {
+ // Not required for test mock
+ return nil
+}
+
+func (c *CommandEthtoolMock) DriverName(intf string) (driverName string, err error) {
+ i := c.InterfaceMap[intf]
+ if i != nil {
+ driverName = i.DriverName
+ return
+ }
+ return driverName, errors.New("interface not found")
+}
+
+func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) {
+ interfaceNames := make([]net.Interface, 0)
+ for k, v := range c.InterfaceMap {
+
+ // Whether to set the flag to loopback
+ flag := net.FlagUp
+ if v.LoopBack {
+ flag = net.FlagLoopback
+ }
+
+ // Create a dummy interface
+ iface := net.Interface{
+ Index: 0,
+ MTU: 1500,
+ Name: k,
+ HardwareAddr: nil,
+ Flags: flag,
+ }
+ interfaceNames = append(interfaceNames, iface)
+ }
+ return interfaceNames, nil
+}
+
+func (c *CommandEthtoolMock) Stats(intf string) (stat map[string]uint64, err error) {
+ i := c.InterfaceMap[intf]
+ if i != nil {
+ stat = i.Stat
+ return
+ }
+ return stat, errors.New("interface not found")
+}
+
+func setup() {
+
+ interfaceMap = make(map[string]*InterfaceMock)
+
+ eth1Stat := map[string]uint64{
+ "port_rx_1024_to_15xx": 25167245,
+ "port_rx_128_to_255": 1573526387,
+ "port_rx_15xx_to_jumbo": 137819058,
+ "port_rx_256_to_511": 772038107,
+ "port_rx_512_to_1023": 78294457,
+ "port_rx_64": 8798065,
+ "port_rx_65_to_127": 450348015,
+ "port_rx_bad": 0,
+ "port_rx_bad_bytes": 0,
+ "port_rx_bad_gtjumbo": 0,
+ "port_rx_broadcast": 6428250,
+ "port_rx_bytes": 893460472634,
+ "port_rx_control": 0,
+ "port_rx_dp_di_dropped_packets": 2772680304,
+ "port_rx_dp_hlb_fetch": 0,
+ "port_rx_dp_hlb_wait": 0,
+ "port_rx_dp_q_disabled_packets": 0,
+ "port_rx_dp_streaming_packets": 0,
+ "port_rx_good": 3045991334,
+ "port_rx_good_bytes": 893460472927,
+ "port_rx_gtjumbo": 0,
+ "port_rx_lt64": 0,
+ "port_rx_multicast": 1639566045,
+ "port_rx_nodesc_drops": 0,
+ "port_rx_overflow": 0,
+ "port_rx_packets": 3045991334,
+ "port_rx_pause": 0,
+ "port_rx_pm_discard_bb_overflow": 0,
+ "port_rx_pm_discard_mapping": 0,
+ "port_rx_pm_discard_qbb": 0,
+ "port_rx_pm_discard_vfifo_full": 0,
+ "port_rx_pm_trunc_bb_overflow": 0,
+ "port_rx_pm_trunc_qbb": 0,
+ "port_rx_pm_trunc_vfifo_full": 0,
+ "port_rx_unicast": 1399997040,
+ "port_tx_1024_to_15xx": 236,
+ "port_tx_128_to_255": 275090219,
+ "port_tx_15xx_to_jumbo": 926,
+ "port_tx_256_to_511": 48567221,
+ "port_tx_512_to_1023": 5142016,
+ "port_tx_64": 113903973,
+ "port_tx_65_to_127": 161935699,
+ "port_tx_broadcast": 8,
+ "port_tx_bytes": 94357131016,
+ "port_tx_control": 0,
+ "port_tx_lt64": 0,
+ "port_tx_multicast": 325891647,
+ "port_tx_packets": 604640290,
+ "port_tx_pause": 0,
+ "port_tx_unicast": 278748635,
+ "ptp_bad_syncs": 1,
+ "ptp_fast_syncs": 1,
+ "ptp_filter_matches": 0,
+ "ptp_good_syncs": 136151,
+ "ptp_invalid_sync_windows": 0,
+ "ptp_no_time_syncs": 1,
+ "ptp_non_filter_matches": 0,
+ "ptp_oversize_sync_windows": 53,
+ "ptp_rx_no_timestamp": 0,
+ "ptp_rx_timestamp_packets": 0,
+ "ptp_sync_timeouts": 1,
+ "ptp_timestamp_packets": 0,
+ "ptp_tx_timestamp_packets": 0,
+ "ptp_undersize_sync_windows": 3,
+ "rx-0.rx_packets": 55659234,
+ "rx-1.rx_packets": 87880538,
+ "rx-2.rx_packets": 26746234,
+ "rx-3.rx_packets": 103026471,
+ "rx-4.rx_packets": 0,
+ "rx_eth_crc_err": 0,
+ "rx_frm_trunc": 0,
+ "rx_inner_ip_hdr_chksum_err": 0,
+ "rx_inner_tcp_udp_chksum_err": 0,
+ "rx_ip_hdr_chksum_err": 0,
+ "rx_mcast_mismatch": 0,
+ "rx_merge_events": 0,
+ "rx_merge_packets": 0,
+ "rx_nodesc_trunc": 0,
+ "rx_noskb_drops": 0,
+ "rx_outer_ip_hdr_chksum_err": 0,
+ "rx_outer_tcp_udp_chksum_err": 0,
+ "rx_reset": 0,
+ "rx_tcp_udp_chksum_err": 0,
+ "rx_tobe_disc": 0,
+ "tx-0.tx_packets": 85843565,
+ "tx-1.tx_packets": 108642725,
+ "tx-2.tx_packets": 202596078,
+ "tx-3.tx_packets": 207561010,
+ "tx-4.tx_packets": 0,
+ "tx_cb_packets": 4,
+ "tx_merge_events": 11025,
+ "tx_pio_packets": 531928114,
+ "tx_pushes": 604643378,
+ "tx_tso_bursts": 0,
+ "tx_tso_fallbacks": 0,
+ "tx_tso_long_headers": 0,
+ }
+ eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false}
+ interfaceMap[eth1.Name] = eth1
+
+ eth2Stat := map[string]uint64{
+ "port_rx_1024_to_15xx": 11529312,
+ "port_rx_128_to_255": 1868952037,
+ "port_rx_15xx_to_jumbo": 130339387,
+ "port_rx_256_to_511": 843846270,
+ "port_rx_512_to_1023": 173194372,
+ "port_rx_64": 9190374,
+ "port_rx_65_to_127": 507806115,
+ "port_rx_bad": 0,
+ "port_rx_bad_bytes": 0,
+ "port_rx_bad_gtjumbo": 0,
+ "port_rx_broadcast": 6648019,
+ "port_rx_bytes": 1007358162202,
+ "port_rx_control": 0,
+ "port_rx_dp_di_dropped_packets": 3164124639,
+ "port_rx_dp_hlb_fetch": 0,
+ "port_rx_dp_hlb_wait": 0,
+ "port_rx_dp_q_disabled_packets": 0,
+ "port_rx_dp_streaming_packets": 0,
+ "port_rx_good": 3544857867,
+ "port_rx_good_bytes": 1007358162202,
+ "port_rx_gtjumbo": 0,
+ "port_rx_lt64": 0,
+ "port_rx_multicast": 2231999743,
+ "port_rx_nodesc_drops": 0,
+ "port_rx_overflow": 0,
+ "port_rx_packets": 3544857867,
+ "port_rx_pause": 0,
+ "port_rx_pm_discard_bb_overflow": 0,
+ "port_rx_pm_discard_mapping": 0,
+ "port_rx_pm_discard_qbb": 0,
+ "port_rx_pm_discard_vfifo_full": 0,
+ "port_rx_pm_trunc_bb_overflow": 0,
+ "port_rx_pm_trunc_qbb": 0,
+ "port_rx_pm_trunc_vfifo_full": 0,
+ "port_rx_unicast": 1306210105,
+ "port_tx_1024_to_15xx": 379,
+ "port_tx_128_to_255": 202767251,
+ "port_tx_15xx_to_jumbo": 558,
+ "port_tx_256_to_511": 31454719,
+ "port_tx_512_to_1023": 6865731,
+ "port_tx_64": 17268276,
+ "port_tx_65_to_127": 272816313,
+ "port_tx_broadcast": 6,
+ "port_tx_bytes": 78071946593,
+ "port_tx_control": 0,
+ "port_tx_lt64": 0,
+ "port_tx_multicast": 239510586,
+ "port_tx_packets": 531173227,
+ "port_tx_pause": 0,
+ "port_tx_unicast": 291662635,
+ "ptp_bad_syncs": 0,
+ "ptp_fast_syncs": 0,
+ "ptp_filter_matches": 0,
+ "ptp_good_syncs": 0,
+ "ptp_invalid_sync_windows": 0,
+ "ptp_no_time_syncs": 0,
+ "ptp_non_filter_matches": 0,
+ "ptp_oversize_sync_windows": 0,
+ "ptp_rx_no_timestamp": 0,
+ "ptp_rx_timestamp_packets": 0,
+ "ptp_sync_timeouts": 0,
+ "ptp_timestamp_packets": 0,
+ "ptp_tx_timestamp_packets": 0,
+ "ptp_undersize_sync_windows": 0,
+ "rx-0.rx_packets": 84587075,
+ "rx-1.rx_packets": 74029305,
+ "rx-2.rx_packets": 134586471,
+ "rx-3.rx_packets": 87531322,
+ "rx-4.rx_packets": 0,
+ "rx_eth_crc_err": 0,
+ "rx_frm_trunc": 0,
+ "rx_inner_ip_hdr_chksum_err": 0,
+ "rx_inner_tcp_udp_chksum_err": 0,
+ "rx_ip_hdr_chksum_err": 0,
+ "rx_mcast_mismatch": 0,
+ "rx_merge_events": 0,
+ "rx_merge_packets": 0,
+ "rx_nodesc_trunc": 0,
+ "rx_noskb_drops": 0,
+ "rx_outer_ip_hdr_chksum_err": 0,
+ "rx_outer_tcp_udp_chksum_err": 0,
+ "rx_reset": 0,
+ "rx_tcp_udp_chksum_err": 0,
+ "rx_tobe_disc": 0,
+ "tx-0.tx_packets": 232521451,
+ "tx-1.tx_packets": 97876137,
+ "tx-2.tx_packets": 106822111,
+ "tx-3.tx_packets": 93955050,
+ "tx-4.tx_packets": 0,
+ "tx_cb_packets": 1,
+ "tx_merge_events": 8402,
+ "tx_pio_packets": 481040054,
+ "tx_pushes": 531174491,
+ "tx_tso_bursts": 128,
+ "tx_tso_fallbacks": 0,
+ "tx_tso_long_headers": 0,
+ }
+ eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false}
+ interfaceMap[eth2.Name] = eth2
+
+ // dummy loopback including dummy stat to ensure that the ignore feature is working
+ lo0Stat := map[string]uint64{
+ "dummy": 0,
+ }
+ lo0 := &InterfaceMock{"lo0", "", lo0Stat, true}
+ interfaceMap[lo0.Name] = lo0
+
+ c := &CommandEthtoolMock{interfaceMap}
+ command = &Ethtool{
+ InterfaceInclude: []string{},
+ InterfaceExclude: []string{},
+ command: c,
+ }
+}
+
+func toStringMapInterface(in map[string]uint64) map[string]interface{} {
+ var m = map[string]interface{}{}
+ for k, v := range in {
+ m[k] = v
+ }
+ return m
+}
+
+func TestGather(t *testing.T) {
+
+ setup()
+ var acc testutil.Accumulator
+
+ err := command.Gather(&acc)
+ assert.NoError(t, err)
+ assert.Len(t, acc.Metrics, 2)
+
+ expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat)
+ expectedTagsEth1 := map[string]string{
+ "interface": "eth1",
+ "driver": "driver1",
+ }
+ acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1)
+ expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat)
+ expectedTagsEth2 := map[string]string{
+ "interface": "eth2",
+ "driver": "driver1",
+ }
+ acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2)
+}
+
+func TestGatherIncludeInterfaces(t *testing.T) {
+
+ setup()
+ var acc testutil.Accumulator
+
+ command.InterfaceInclude = append(command.InterfaceInclude, "eth1")
+
+ err := command.Gather(&acc)
+ assert.NoError(t, err)
+ assert.Len(t, acc.Metrics, 1)
+
+ // Should contain eth1
+ expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat)
+ expectedTagsEth1 := map[string]string{
+ "interface": "eth1",
+ "driver": "driver1",
+ }
+ acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1)
+
+ // Should not contain eth2
+ expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat)
+ expectedTagsEth2 := map[string]string{
+ "interface": "eth2",
+ "driver": "driver1",
+ }
+ acc.AssertDoesNotContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2)
+}
+
+func TestGatherIgnoreInterfaces(t *testing.T) {
+
+ setup()
+ var acc testutil.Accumulator
+
+ command.InterfaceExclude = append(command.InterfaceExclude, "eth1")
+
+ err := command.Gather(&acc)
+ assert.NoError(t, err)
+ assert.Len(t, acc.Metrics, 1)
+
+ // Should not contain eth1
+ expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat)
+ expectedTagsEth1 := map[string]string{
+ "interface": "eth1",
+ "driver": "driver1",
+ }
+ acc.AssertDoesNotContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1)
+
+ // Should contain eth2
+ expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat)
+ expectedTagsEth2 := map[string]string{
+ "interface": "eth2",
+ "driver": "driver1",
+ }
+ acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2)
+
+}
diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md
new file mode 100644
index 0000000000000..06c43cf318d39
--- /dev/null
+++ b/plugins/inputs/eventhub_consumer/README.md
@@ -0,0 +1,98 @@
+# Event Hub Consumer Input Plugin
+
+This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub.
+
+### IoT Hub Setup
+
+The main focus for development of this plugin is Azure IoT hub:
+
+1. Create an Azure IoT Hub by following any of the guides provided here: https://docs.microsoft.com/en-us/azure/iot-hub/
+2. Create a device, for example a [simulated Raspberry Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started)
+3. The connection string needed for the plugin is located under *Shared access policies*, both the *iothubowner* and *service* policies should work
+
+### Configuration
+
+```toml
+[[inputs.eventhub_consumer]]
+ ## The default behavior is to create a new Event Hub client from environment variables.
+ ## This requires one of the following sets of environment variables to be set:
+ ##
+ ## 1) Expected Environment Variables:
+ ## - "EVENTHUB_NAMESPACE"
+ ## - "EVENTHUB_NAME"
+ ## - "EVENTHUB_CONNECTION_STRING"
+ ##
+ ## 2) Expected Environment Variables:
+ ## - "EVENTHUB_NAMESPACE"
+ ## - "EVENTHUB_NAME"
+ ## - "EVENTHUB_KEY_NAME"
+ ## - "EVENTHUB_KEY_VALUE"
+
+ ## Uncommenting the option below will create an Event Hub client based solely on the connection string.
+ ## This can either be the associated environment variable or hard coded directly.
+ # connection_string = ""
+
+ ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister
+ # persistence_dir = ""
+
+ ## Change the default consumer group
+ # consumer_group = ""
+
+ ## By default the event hub receives all messages present on the broker, alternative modes can be set below.
+ ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339).
+ ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run).
+ # from_timestamp =
+ # latest = true
+
+ ## Set a custom prefetch count for the receiver(s)
+ # prefetch_count = 1000
+
+ ## Add an epoch to the receiver(s)
+ # epoch = 0
+
+ ## Change to set a custom user agent, "telegraf" is used by default
+ # user_agent = "telegraf"
+
+ ## To consume from a specific partition, set the partition_ids option.
+ ## An empty array will result in receiving from all partitions.
+ # partition_ids = ["0","1"]
+
+ ## Max undelivered messages
+ # max_undelivered_messages = 1000
+
+ ## Set either option below to true to use a system property as timestamp.
+ ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime.
+ ## It is recommended to use this setting when the data itself has no timestamp.
+ # enqueued_time_as_ts = true
+ # iot_hub_enqueued_time_as_ts = true
+
+ ## Tags or fields to create from keys present in the application property bag.
+ ## These could for example be set by message enrichments in Azure IoT Hub.
+ # application_property_tags = []
+ # application_property_fields = []
+
+ ## Tag or field name to use for metadata
+ ## By default all metadata is disabled
+ # sequence_number_field = "SequenceNumber"
+ # enqueued_time_field = "EnqueuedTime"
+ # offset_field = "Offset"
+ # partition_id_tag = "PartitionID"
+ # partition_key_tag = "PartitionKey"
+ # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID"
+ # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID"
+ # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod"
+ # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID"
+ # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime"
+
+ ## Data format to consume.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "influx"
+```
+
+#### Environment Variables
+
+[Full documentation of the available environment variables][envvar].
+
+[envvar]: https://github.com/Azure/azure-event-hubs-go#environment-variables
diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go
new file mode 100644
index 0000000000000..17092de3217eb
--- /dev/null
+++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go
@@ -0,0 +1,422 @@
+package eventhub
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ eventhub "github.com/Azure/azure-event-hubs-go/v3"
+ "github.com/Azure/azure-event-hubs-go/v3/persist"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/parsers"
+)
+
+const (
+ defaultMaxUndeliveredMessages = 1000
+)
+
+type empty struct{}
+type semaphore chan empty
+
+// EventHub is the top level struct for this plugin
+type EventHub struct {
+ // Configuration
+ ConnectionString string `toml:"connection_string"`
+ PersistenceDir string `toml:"persistence_dir"`
+ ConsumerGroup string `toml:"consumer_group"`
+ FromTimestamp time.Time `toml:"from_timestamp"`
+ Latest bool `toml:"latest"`
+ PrefetchCount uint32 `toml:"prefetch_count"`
+ Epoch int64 `toml:"epoch"`
+ UserAgent string `toml:"user_agent"`
+ PartitionIDs []string `toml:"partition_ids"`
+ MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
+ EnqueuedTimeAsTs bool `toml:"enqueued_time_as_ts"`
+ IotHubEnqueuedTimeAsTs bool `toml:"iot_hub_enqueued_time_as_ts"`
+
+ // Metadata
+ ApplicationPropertyFields []string `toml:"application_property_fields"`
+ ApplicationPropertyTags []string `toml:"application_property_tags"`
+ SequenceNumberField string `toml:"sequence_number_field"`
+ EnqueuedTimeField string `toml:"enqueued_time_field"`
+ OffsetField string `toml:"offset_field"`
+ PartitionIDTag string `toml:"partition_id_tag"`
+ PartitionKeyTag string `toml:"partition_key_tag"`
+ IoTHubDeviceConnectionIDTag string `toml:"iot_hub_device_connection_id_tag"`
+ IoTHubAuthGenerationIDTag string `toml:"iot_hub_auth_generation_id_tag"`
+ IoTHubConnectionAuthMethodTag string `toml:"iot_hub_connection_auth_method_tag"`
+ IoTHubConnectionModuleIDTag string `toml:"iot_hub_connection_module_id_tag"`
+ IoTHubEnqueuedTimeField string `toml:"iot_hub_enqueued_time_field"`
+
+ Log telegraf.Logger `toml:"-"`
+
+ // Azure
+ hub *eventhub.Hub
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ parser parsers.Parser
+ in chan []telegraf.Metric
+}
+
+// SampleConfig is provided here
+func (*EventHub) SampleConfig() string {
+ return `
+ ## The default behavior is to create a new Event Hub client from environment variables.
+ ## This requires one of the following sets of environment variables to be set:
+ ##
+ ## 1) Expected Environment Variables:
+ ## - "EVENTHUB_NAMESPACE"
+ ## - "EVENTHUB_NAME"
+ ## - "EVENTHUB_CONNECTION_STRING"
+ ##
+ ## 2) Expected Environment Variables:
+ ## - "EVENTHUB_NAMESPACE"
+ ## - "EVENTHUB_NAME"
+ ## - "EVENTHUB_KEY_NAME"
+ ## - "EVENTHUB_KEY_VALUE"
+
+ ## Uncommenting the option below will create an Event Hub client based solely on the connection string.
+ ## This can either be the associated environment variable or hard coded directly.
+ # connection_string = ""
+
+ ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister
+ # persistence_dir = ""
+
+ ## Change the default consumer group
+ # consumer_group = ""
+
+ ## By default the event hub receives all messages present on the broker, alternative modes can be set below.
+ ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339).
+ ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run).
+ # from_timestamp =
+ # latest = true
+
+ ## Set a custom prefetch count for the receiver(s)
+ # prefetch_count = 1000
+
+ ## Add an epoch to the receiver(s)
+ # epoch = 0
+
+ ## Change to set a custom user agent, "telegraf" is used by default
+ # user_agent = "telegraf"
+
+ ## To consume from a specific partition, set the partition_ids option.
+ ## An empty array will result in receiving from all partitions.
+ # partition_ids = ["0","1"]
+
+ ## Max undelivered messages
+ # max_undelivered_messages = 1000
+
+ ## Set either option below to true to use a system property as timestamp.
+ ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime.
+ ## It is recommended to use this setting when the data itself has no timestamp.
+ # enqueued_time_as_ts = true
+ # iot_hub_enqueued_time_as_ts = true
+
+ ## Tags or fields to create from keys present in the application property bag.
+ ## These could for example be set by message enrichments in Azure IoT Hub.
+ # application_property_tags = []
+ # application_property_fields = []
+
+ ## Tag or field name to use for metadata
+ ## By default all metadata is disabled
+ # sequence_number_field = "SequenceNumber"
+ # enqueued_time_field = "EnqueuedTime"
+ # offset_field = "Offset"
+ # partition_id_tag = "PartitionID"
+ # partition_key_tag = "PartitionKey"
+ # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID"
+ # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID"
+ # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod"
+ # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID"
+ # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime"
+
+ ## Data format to consume.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "influx"
+ `
+}
+
+// Description of the plugin
+func (*EventHub) Description() string {
+ return "Azure Event Hubs service input plugin"
+}
+
+// SetParser sets the parser
+func (e *EventHub) SetParser(parser parsers.Parser) {
+ e.parser = parser
+}
+
+// Gather function is unused
+func (*EventHub) Gather(telegraf.Accumulator) error {
+ return nil
+}
+
+// Init the EventHub ServiceInput
+func (e *EventHub) Init() (err error) {
+ if e.MaxUndeliveredMessages == 0 {
+ e.MaxUndeliveredMessages = defaultMaxUndeliveredMessages
+ }
+
+ // Set hub options
+ hubOpts := []eventhub.HubOption{}
+
+ if e.PersistenceDir != "" {
+ persister, err := persist.NewFilePersister(e.PersistenceDir)
+ if err != nil {
+ return err
+ }
+
+ hubOpts = append(hubOpts, eventhub.HubWithOffsetPersistence(persister))
+ }
+
+ if e.UserAgent != "" {
+ hubOpts = append(hubOpts, eventhub.HubWithUserAgent(e.UserAgent))
+ } else {
+ hubOpts = append(hubOpts, eventhub.HubWithUserAgent(internal.ProductToken()))
+ }
+
+ // Create event hub connection
+ if e.ConnectionString != "" {
+ e.hub, err = eventhub.NewHubFromConnectionString(e.ConnectionString, hubOpts...)
+ } else {
+ e.hub, err = eventhub.NewHubFromEnvironment(hubOpts...)
+ }
+
+ return err
+}
+
+// Start the EventHub ServiceInput
+func (e *EventHub) Start(acc telegraf.Accumulator) error {
+ e.in = make(chan []telegraf.Metric)
+
+ var ctx context.Context
+ ctx, e.cancel = context.WithCancel(context.Background())
+
+ // Start tracking
+ e.wg.Add(1)
+ go func() {
+ defer e.wg.Done()
+ e.startTracking(ctx, acc)
+ }()
+
+ // Configure receiver options
+ receiveOpts, err := e.configureReceiver()
+ if err != nil {
+ return err
+ }
+
+ partitions := e.PartitionIDs
+
+ if len(e.PartitionIDs) == 0 {
+ runtimeinfo, err := e.hub.GetRuntimeInformation(ctx)
+ if err != nil {
+ return err
+ }
+
+ partitions = runtimeinfo.PartitionIDs
+ }
+
+ for _, partitionID := range partitions {
+ _, err = e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...)
+ if err != nil {
+ return fmt.Errorf("creating receiver for partition %q: %v", partitionID, err)
+ }
+ }
+
+ return nil
+}
+
+func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) {
+ receiveOpts := []eventhub.ReceiveOption{}
+
+ if e.ConsumerGroup != "" {
+ receiveOpts = append(receiveOpts, eventhub.ReceiveWithConsumerGroup(e.ConsumerGroup))
+ }
+
+ if !e.FromTimestamp.IsZero() {
+ receiveOpts = append(receiveOpts, eventhub.ReceiveFromTimestamp(e.FromTimestamp))
+ } else if e.Latest {
+ receiveOpts = append(receiveOpts, eventhub.ReceiveWithLatestOffset())
+ }
+
+ if e.PrefetchCount != 0 {
+ receiveOpts = append(receiveOpts, eventhub.ReceiveWithPrefetchCount(e.PrefetchCount))
+ }
+
+ if e.Epoch != 0 {
+ receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch))
+ }
+
+ return receiveOpts, nil
+}
+
+// OnMessage handles an Event. When this function returns without error the
+// Event is immediately accepted and the offset is updated. If an error is
+// returned the Event is marked for redelivery.
+func (e *EventHub) onMessage(ctx context.Context, event *eventhub.Event) error {
+ metrics, err := e.createMetrics(event)
+ if err != nil {
+ return err
+ }
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case e.in <- metrics:
+ return nil
+ }
+}
+
+// OnDelivery returns true if a new slot has opened up in the TrackingAccumulator.
+func (e *EventHub) onDelivery(
+ acc telegraf.TrackingAccumulator,
+ groups map[telegraf.TrackingID][]telegraf.Metric,
+ track telegraf.DeliveryInfo,
+) bool {
+ if track.Delivered() {
+ delete(groups, track.ID())
+ return true
+ }
+
+ // The metric was already accepted when onMessage completed, so we can't
+ // fallback on redelivery from Event Hub. Add a new copy of the metric for
+ // reprocessing.
+ metrics, ok := groups[track.ID()]
+ delete(groups, track.ID())
+ if !ok {
+ // The metrics should always be found, this message indicates a programming error.
+ e.Log.Errorf("Could not find delivery: %d", track.ID())
+ return true
+ }
+
+ backup := deepCopyMetrics(metrics)
+ id := acc.AddTrackingMetricGroup(metrics)
+ groups[id] = backup
+ return false
+}
+
+func (e *EventHub) startTracking(ctx context.Context, ac telegraf.Accumulator) {
+ acc := ac.WithTracking(e.MaxUndeliveredMessages)
+ sem := make(semaphore, e.MaxUndeliveredMessages)
+ groups := make(map[telegraf.TrackingID][]telegraf.Metric, e.MaxUndeliveredMessages)
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case track := <-acc.Delivered():
+ if e.onDelivery(acc, groups, track) {
+ <-sem
+ }
+ case sem <- empty{}:
+ select {
+ case <-ctx.Done():
+ return
+ case track := <-acc.Delivered():
+ if e.onDelivery(acc, groups, track) {
+ <-sem
+ <-sem
+ }
+ case metrics := <-e.in:
+ backup := deepCopyMetrics(metrics)
+ id := acc.AddTrackingMetricGroup(metrics)
+ groups[id] = backup
+ }
+ }
+ }
+}
+
+func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric {
+ metrics := make([]telegraf.Metric, 0, len(in))
+ for _, m := range in {
+ metrics = append(metrics, m.Copy())
+ }
+ return metrics
+}
+
+// CreateMetrics returns the Metrics from the Event.
+func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) {
+ metrics, err := e.parser.Parse(event.Data)
+ if err != nil {
+ return nil, err
+ }
+
+ for i := range metrics {
+ for _, field := range e.ApplicationPropertyFields {
+ if val, ok := event.Get(field); ok {
+ metrics[i].AddField(field, val)
+ }
+ }
+
+ for _, tag := range e.ApplicationPropertyTags {
+ if val, ok := event.Get(tag); ok {
+ metrics[i].AddTag(tag, fmt.Sprintf("%v", val))
+ }
+ }
+
+ if e.SequenceNumberField != "" {
+ metrics[i].AddField(e.SequenceNumberField, *event.SystemProperties.SequenceNumber)
+ }
+
+ if e.EnqueuedTimeAsTs {
+ metrics[i].SetTime(*event.SystemProperties.EnqueuedTime)
+ } else if e.EnqueuedTimeField != "" {
+ metrics[i].AddField(e.EnqueuedTimeField, (*event.SystemProperties.EnqueuedTime).UnixNano()/int64(time.Millisecond))
+ }
+
+ if e.OffsetField != "" {
+ metrics[i].AddField(e.OffsetField, *event.SystemProperties.Offset)
+ }
+
+ if event.SystemProperties.PartitionID != nil && e.PartitionIDTag != "" {
+ metrics[i].AddTag(e.PartitionIDTag, fmt.Sprintf("%d", *event.SystemProperties.PartitionID))
+ }
+ if event.SystemProperties.PartitionKey != nil && e.PartitionKeyTag != "" {
+ metrics[i].AddTag(e.PartitionKeyTag, *event.SystemProperties.PartitionKey)
+ }
+ if event.SystemProperties.IoTHubDeviceConnectionID != nil && e.IoTHubDeviceConnectionIDTag != "" {
+ metrics[i].AddTag(e.IoTHubDeviceConnectionIDTag, *event.SystemProperties.IoTHubDeviceConnectionID)
+ }
+ if event.SystemProperties.IoTHubAuthGenerationID != nil && e.IoTHubAuthGenerationIDTag != "" {
+ metrics[i].AddTag(e.IoTHubAuthGenerationIDTag, *event.SystemProperties.IoTHubAuthGenerationID)
+ }
+ if event.SystemProperties.IoTHubConnectionAuthMethod != nil && e.IoTHubConnectionAuthMethodTag != "" {
+ metrics[i].AddTag(e.IoTHubConnectionAuthMethodTag, *event.SystemProperties.IoTHubConnectionAuthMethod)
+ }
+ if event.SystemProperties.IoTHubConnectionModuleID != nil && e.IoTHubConnectionModuleIDTag != "" {
+ metrics[i].AddTag(e.IoTHubConnectionModuleIDTag, *event.SystemProperties.IoTHubConnectionModuleID)
+ }
+ if event.SystemProperties.IoTHubEnqueuedTime != nil {
+ if e.IotHubEnqueuedTimeAsTs {
+ metrics[i].SetTime(*event.SystemProperties.IoTHubEnqueuedTime)
+ } else if e.IoTHubEnqueuedTimeField != "" {
+ metrics[i].AddField(e.IoTHubEnqueuedTimeField, (*event.SystemProperties.IoTHubEnqueuedTime).UnixNano()/int64(time.Millisecond))
+ }
+ }
+ }
+
+ return metrics, nil
+}
+
+// Stop the EventHub ServiceInput
+func (e *EventHub) Stop() {
+ err := e.hub.Close(context.Background())
+ if err != nil {
+ e.Log.Errorf("Error closing Event Hub connection: %v", err)
+ }
+ e.cancel()
+ e.wg.Wait()
+}
+
+func init() {
+ inputs.Add("eventhub_consumer", func() telegraf.Input {
+ return &EventHub{}
+ })
+}
diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md
index f4e9172424739..8ed0b51110d1d 100644
--- a/plugins/inputs/exec/README.md
+++ b/plugins/inputs/exec/README.md
@@ -50,8 +50,16 @@ It can be paired with the following configuration and will be run at the `interv
### Common Issues:
-#### Q: My script works when I run it by hand, but not when Telegraf is running as a service.
+#### My script works when I run it by hand, but not when Telegraf is running as a service.
This may be related to the Telegraf service running as a different user. The
official packages run Telegraf as the `telegraf` user and group on Linux
systems.
+
+#### With a PowerShell on Windows, the output of the script appears to be truncated.
+
+You may need to set a variable in your script to increase the number of columns
+available for output:
+```
+$host.UI.RawUI.BufferSize = new-object System.Management.Automation.Host.Size(1024,50)
+```
diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go
index 615736b3c1c84..cb4420b0f246f 100644
--- a/plugins/inputs/exec/exec.go
+++ b/plugins/inputs/exec/exec.go
@@ -3,7 +3,6 @@ package exec
import (
"bytes"
"fmt"
- "log"
"os/exec"
"path/filepath"
"runtime"
@@ -11,13 +10,12 @@ import (
"sync"
"time"
- "github.com/kballard/go-shellquote"
-
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/parsers/nagios"
+ "github.com/kballard/go-shellquote"
)
const sampleConfig = `
@@ -51,6 +49,7 @@ type Exec struct {
parser parsers.Parser
runner Runner
+ Log telegraf.Logger `toml:"-"`
}
func NewExec() *Exec {
@@ -161,7 +160,7 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync
if isNagios {
metrics, err = nagios.TryAddState(runErr, metrics)
if err != nil {
- log.Printf("E! [inputs.exec] failed to add nagios state: %s", err)
+ e.Log.Errorf("Failed to add nagios state: %s", err)
}
}
@@ -229,6 +228,10 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
return nil
}
+func (e *Exec) Init() error {
+ return nil
+}
+
func init() {
inputs.Add("exec", func() telegraf.Input {
return NewExec()
diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go
index 5aaef8961ee69..d0fcc71f668e5 100644
--- a/plugins/inputs/exec/exec_test.go
+++ b/plugins/inputs/exec/exec_test.go
@@ -8,7 +8,6 @@ import (
"time"
"github.com/influxdata/telegraf/plugins/parsers"
-
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -97,6 +96,7 @@ func TestExec(t *testing.T) {
MetricName: "exec",
})
e := &Exec{
+ Log: testutil.Logger{},
runner: newRunnerMock([]byte(validJson), nil, nil),
Commands: []string{"testcommand arg1"},
parser: parser,
@@ -126,6 +126,7 @@ func TestExecMalformed(t *testing.T) {
MetricName: "exec",
})
e := &Exec{
+ Log: testutil.Logger{},
runner: newRunnerMock([]byte(malformedJson), nil, nil),
Commands: []string{"badcommand arg1"},
parser: parser,
@@ -142,6 +143,7 @@ func TestCommandError(t *testing.T) {
MetricName: "exec",
})
e := &Exec{
+ Log: testutil.Logger{},
runner: newRunnerMock(nil, nil, fmt.Errorf("exit status code 1")),
Commands: []string{"badcommand"},
parser: parser,
diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md
new file mode 100644
index 0000000000000..aa37e7cd7696a
--- /dev/null
+++ b/plugins/inputs/execd/README.md
@@ -0,0 +1,122 @@
+# Execd Input Plugin
+
+The `execd` plugin runs an external program as a long-running daemon.
+The programs must output metrics in any one of the accepted
+[Input Data Formats][] on the process's STDOUT, and is expected to
+stay running. If you'd instead like the process to collect metrics and then exit,
+check out the [inputs.exec][] plugin.
+
+The `signal` can be configured to send a signal the running daemon on each
+collection interval. This is used for when you want to have Telegraf notify the
+plugin when it's time to run collection. STDIN is recommended, which writes a
+new line to the process's STDIN.
+
+STDERR from the process will be relayed to Telegraf as errors in the logs.
+
+### Configuration:
+
+```toml
+[[inputs.execd]]
+ ## One program to run as daemon.
+ ## NOTE: process and each argument should each be their own string
+ command = ["telegraf-smartctl", "-d", "/dev/sda"]
+
+ ## Define how the process is signaled on each collection interval.
+ ## Valid values are:
+ ## "none" : Do not signal anything. (Recommended for service inputs)
+ ## The process must output metrics by itself.
+ ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs)
+ ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended)
+ ## "SIGUSR1" : Send a USR1 signal. Not available on Windows.
+ ## "SIGUSR2" : Send a USR2 signal. Not available on Windows.
+ signal = "none"
+
+ ## Delay before the process is restarted after an unexpected termination
+ restart_delay = "10s"
+
+ ## Data format to consume.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "influx"
+```
+
+### Example
+
+##### Daemon written in bash using STDIN signaling
+
+```bash
+#!/bin/bash
+
+counter=0
+
+while IFS= read -r LINE; do
+ echo "counter_bash count=${counter}"
+ let counter=counter+1
+done
+```
+
+```toml
+[[inputs.execd]]
+ command = ["plugins/inputs/execd/examples/count.sh"]
+ signal = "STDIN"
+```
+
+##### Go daemon using SIGHUP
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func main() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, syscall.SIGHUP)
+
+ counter := 0
+
+ for {
+ <-c
+
+ fmt.Printf("counter_go count=%d\n", counter)
+ counter++
+ }
+}
+
+```
+
+```toml
+[[inputs.execd]]
+ command = ["plugins/inputs/execd/examples/count.go.exe"]
+ signal = "SIGHUP"
+```
+
+##### Ruby daemon running standalone
+
+```ruby
+#!/usr/bin/env ruby
+
+counter = 0
+
+loop do
+ puts "counter_ruby count=#{counter}"
+ STDOUT.flush
+
+ counter += 1
+ sleep 1
+end
+```
+
+```toml
+[[inputs.execd]]
+ command = ["plugins/inputs/execd/examples/count.rb"]
+ signal = "none"
+```
+
+[Input Data Formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+[inputs.exec]: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/exec/README.md
diff --git a/plugins/inputs/execd/examples/count.go b/plugins/inputs/execd/examples/count.go
new file mode 100644
index 0000000000000..d5e4a12e13359
--- /dev/null
+++ b/plugins/inputs/execd/examples/count.go
@@ -0,0 +1,24 @@
+package main
+
+// Example using HUP signaling
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func main() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, syscall.SIGHUP)
+
+ counter := 0
+
+ for {
+ <-c
+
+ fmt.Printf("counter_go count=%d\n", counter)
+ counter++
+ }
+}
diff --git a/plugins/inputs/execd/examples/count.rb b/plugins/inputs/execd/examples/count.rb
new file mode 100755
index 0000000000000..6b60fbc17ebfb
--- /dev/null
+++ b/plugins/inputs/execd/examples/count.rb
@@ -0,0 +1,21 @@
+#!/usr/bin/env ruby
+
+## Example in Ruby not using any signaling
+
+counter = 0
+
+def time_ns_str(t)
+ ns = t.nsec.to_s
+ (9 - ns.size).times do
+ ns = "0" + ns # left pad
+ end
+ t.to_i.to_s + ns
+end
+
+loop do
+ puts "counter_ruby count=#{counter} #{time_ns_str(Time.now)}"
+ STDOUT.flush
+ counter += 1
+
+ sleep 1
+end
diff --git a/plugins/inputs/execd/examples/count.sh b/plugins/inputs/execd/examples/count.sh
new file mode 100755
index 0000000000000..bbbe8619c94e8
--- /dev/null
+++ b/plugins/inputs/execd/examples/count.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+## Example in bash using STDIN signaling
+
+counter=0
+
+while read LINE; do
+ echo "counter_bash count=${counter}"
+ counter=$((counter+1))
+done
+
+trap "echo terminate 1>&2" EXIT
\ No newline at end of file
diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go
new file mode 100644
index 0000000000000..228c38db50f76
--- /dev/null
+++ b/plugins/inputs/execd/execd.go
@@ -0,0 +1,170 @@
+package execd
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/internal/process"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/parsers/influx"
+)
+
+const sampleConfig = `
+ ## Program to run as daemon
+ command = ["telegraf-smartctl", "-d", "/dev/sda"]
+
+ ## Define how the process is signaled on each collection interval.
+ ## Valid values are:
+ ## "none" : Do not signal anything.
+ ## The process must output metrics by itself.
+ ## "STDIN" : Send a newline on STDIN.
+ ## "SIGHUP" : Send a HUP signal. Not available on Windows.
+ ## "SIGUSR1" : Send a USR1 signal. Not available on Windows.
+ ## "SIGUSR2" : Send a USR2 signal. Not available on Windows.
+ signal = "none"
+
+ ## Delay before the process is restarted after an unexpected termination
+ restart_delay = "10s"
+
+ ## Data format to consume.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "influx"
+`
+
+type Execd struct {
+ Command []string `toml:"command"`
+ Signal string `toml:"signal"`
+ RestartDelay config.Duration `toml:"restart_delay"`
+ Log telegraf.Logger `toml:"-"`
+
+ process *process.Process
+ acc telegraf.Accumulator
+ parser parsers.Parser
+}
+
+func (e *Execd) SampleConfig() string {
+ return sampleConfig
+}
+
+func (e *Execd) Description() string {
+ return "Run executable as long-running input plugin"
+}
+
+func (e *Execd) SetParser(parser parsers.Parser) {
+ e.parser = parser
+}
+
+func (e *Execd) Start(acc telegraf.Accumulator) error {
+ e.acc = acc
+ var err error
+ e.process, err = process.New(e.Command)
+ if err != nil {
+ return fmt.Errorf("error creating new process: %w", err)
+ }
+ e.process.Log = e.Log
+ e.process.RestartDelay = time.Duration(e.RestartDelay)
+ e.process.ReadStdoutFn = e.cmdReadOut
+ e.process.ReadStderrFn = e.cmdReadErr
+
+ if err = e.process.Start(); err != nil {
+ // if there was only one argument, and it contained spaces, warn the user
+ // that they may have configured it wrong.
+ if len(e.Command) == 1 && strings.Contains(e.Command[0], " ") {
+ e.Log.Warn("The inputs.execd Command contained spaces but no arguments. " +
+ "This setting expects the program and arguments as an array of strings, " +
+ "not as a space-delimited string. See the plugin readme for an example.")
+ }
+ return fmt.Errorf("failed to start process %s: %w", e.Command, err)
+ }
+
+ return nil
+}
+
+func (e *Execd) Stop() {
+ e.process.Stop()
+}
+
+func (e *Execd) cmdReadOut(out io.Reader) {
+ if _, isInfluxParser := e.parser.(*influx.Parser); isInfluxParser {
+ // work around the lack of built-in streaming parser. :(
+ e.cmdReadOutStream(out)
+ return
+ }
+
+ scanner := bufio.NewScanner(out)
+
+ for scanner.Scan() {
+ metrics, err := e.parser.Parse(scanner.Bytes())
+ if err != nil {
+ e.acc.AddError(fmt.Errorf("parse error: %w", err))
+ }
+
+ for _, metric := range metrics {
+ e.acc.AddMetric(metric)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ e.acc.AddError(fmt.Errorf("error reading stdout: %w", err))
+ }
+}
+
+func (e *Execd) cmdReadOutStream(out io.Reader) {
+ parser := influx.NewStreamParser(out)
+
+ for {
+ metric, err := parser.Next()
+ if err != nil {
+ if err == influx.EOF {
+ break // stream ended
+ }
+ if parseErr, isParseError := err.(*influx.ParseError); isParseError {
+ // parse error.
+ e.acc.AddError(parseErr)
+ continue
+ }
+ // some non-recoverable error?
+ e.acc.AddError(err)
+ return
+ }
+
+ e.acc.AddMetric(metric)
+ }
+}
+
+func (e *Execd) cmdReadErr(out io.Reader) {
+ scanner := bufio.NewScanner(out)
+
+ for scanner.Scan() {
+ e.Log.Errorf("stderr: %q", scanner.Text())
+ }
+
+ if err := scanner.Err(); err != nil {
+ e.acc.AddError(fmt.Errorf("error reading stderr: %w", err))
+ }
+}
+
+func (e *Execd) Init() error {
+ if len(e.Command) == 0 {
+ return errors.New("no command specified")
+ }
+ return nil
+}
+
+func init() {
+ inputs.Add("execd", func() telegraf.Input {
+ return &Execd{
+ Signal: "none",
+ RestartDelay: config.Duration(10 * time.Second),
+ }
+ })
+}
diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go
new file mode 100644
index 0000000000000..4d8789a8d3215
--- /dev/null
+++ b/plugins/inputs/execd/execd_posix.go
@@ -0,0 +1,44 @@
+// +build !windows
+
+package execd
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+ "time"
+
+ "github.com/influxdata/telegraf"
+)
+
+func (e *Execd) Gather(acc telegraf.Accumulator) error {
+ if e.process == nil || e.process.Cmd == nil {
+ return nil
+ }
+
+ osProcess := e.process.Cmd.Process
+ if osProcess == nil {
+ return nil
+ }
+ switch e.Signal {
+ case "SIGHUP":
+ osProcess.Signal(syscall.SIGHUP)
+ case "SIGUSR1":
+ osProcess.Signal(syscall.SIGUSR1)
+ case "SIGUSR2":
+ osProcess.Signal(syscall.SIGUSR2)
+ case "STDIN":
+ if osStdin, ok := e.process.Stdin.(*os.File); ok {
+ osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second))
+ }
+ if _, err := io.WriteString(e.process.Stdin, "\n"); err != nil {
+ return fmt.Errorf("Error writing to stdin: %s", err)
+ }
+ case "none":
+ default:
+ return fmt.Errorf("invalid signal: %s", e.Signal)
+ }
+
+ return nil
+}
diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go
new file mode 100644
index 0000000000000..a7be617da3a48
--- /dev/null
+++ b/plugins/inputs/execd/execd_test.go
@@ -0,0 +1,193 @@
+// +build !windows
+
+package execd
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/agent"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/models"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+
+ "github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/serializers"
+
+ "github.com/influxdata/telegraf"
+)
+
+func TestSettingConfigWorks(t *testing.T) {
+ cfg := `
+ [[inputs.execd]]
+ command = ["a", "b", "c"]
+ restart_delay = "1m"
+ signal = "SIGHUP"
+ `
+ conf := config.NewConfig()
+ require.NoError(t, conf.LoadConfigData([]byte(cfg)))
+
+ require.Len(t, conf.Inputs, 1)
+ inp, ok := conf.Inputs[0].Input.(*Execd)
+ require.True(t, ok)
+ require.EqualValues(t, []string{"a", "b", "c"}, inp.Command)
+ require.EqualValues(t, 1*time.Minute, inp.RestartDelay)
+ require.EqualValues(t, "SIGHUP", inp.Signal)
+}
+
+func TestExternalInputWorks(t *testing.T) {
+ influxParser, err := parsers.NewInfluxParser()
+ require.NoError(t, err)
+
+ exe, err := os.Executable()
+ require.NoError(t, err)
+
+ e := &Execd{
+ Command: []string{exe, "-counter"},
+ RestartDelay: config.Duration(5 * time.Second),
+ parser: influxParser,
+ Signal: "STDIN",
+ Log: testutil.Logger{},
+ }
+
+ metrics := make(chan telegraf.Metric, 10)
+ defer close(metrics)
+ acc := agent.NewAccumulator(&TestMetricMaker{}, metrics)
+
+ require.NoError(t, e.Start(acc))
+ require.NoError(t, e.Gather(acc))
+
+ // grab a metric and make sure it's a thing
+ m := readChanWithTimeout(t, metrics, 10*time.Second)
+
+ e.Stop()
+
+ require.Equal(t, "counter", m.Name())
+ val, ok := m.GetField("count")
+ require.True(t, ok)
+ require.EqualValues(t, 0, val)
+}
+
+func TestParsesLinesContainingNewline(t *testing.T) {
+ parser, err := parsers.NewInfluxParser()
+ require.NoError(t, err)
+
+ metrics := make(chan telegraf.Metric, 10)
+ defer close(metrics)
+ acc := agent.NewAccumulator(&TestMetricMaker{}, metrics)
+
+ e := &Execd{
+ RestartDelay: config.Duration(5 * time.Second),
+ parser: parser,
+ Signal: "STDIN",
+ acc: acc,
+ Log: testutil.Logger{},
+ }
+
+ cases := []struct {
+ Name string
+ Value string
+ }{
+ {
+ Name: "no-newline",
+ Value: "my message",
+ }, {
+ Name: "newline",
+ Value: "my\nmessage",
+ },
+ }
+
+ for _, test := range cases {
+ t.Run(test.Name, func(t *testing.T) {
+ line := fmt.Sprintf("event message=\"%v\" 1587128639239000000", test.Value)
+
+ e.cmdReadOut(strings.NewReader(line))
+
+ m := readChanWithTimeout(t, metrics, 1*time.Second)
+
+ require.Equal(t, "event", m.Name())
+ val, ok := m.GetField("message")
+ require.True(t, ok)
+ require.Equal(t, test.Value, val)
+ })
+ }
+}
+
+func readChanWithTimeout(t *testing.T, metrics chan telegraf.Metric, timeout time.Duration) telegraf.Metric {
+ to := time.NewTimer(timeout)
+ defer to.Stop()
+ select {
+ case m := <-metrics:
+ return m
+ case <-to.C:
+ require.FailNow(t, "timeout waiting for metric")
+ }
+ return nil
+}
+
+type TestMetricMaker struct{}
+
+func (tm *TestMetricMaker) Name() string {
+ return "TestPlugin"
+}
+
+func (tm *TestMetricMaker) LogName() string {
+ return tm.Name()
+}
+
+func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric {
+ return metric
+}
+
+func (tm *TestMetricMaker) Log() telegraf.Logger {
+ return models.NewLogger("TestPlugin", "test", "")
+}
+
+var counter = flag.Bool("counter", false,
+ "if true, act like line input program instead of test")
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if *counter {
+ runCounterProgram()
+ os.Exit(0)
+ }
+ code := m.Run()
+ os.Exit(code)
+}
+
+func runCounterProgram() {
+ i := 0
+ serializer, err := serializers.NewInfluxSerializer()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "ERR InfluxSerializer failed to load")
+ os.Exit(1)
+ }
+
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ metric, _ := metric.New("counter",
+ map[string]string{},
+ map[string]interface{}{
+ "count": i,
+ },
+ time.Now(),
+ )
+ i++
+
+ b, err := serializer.Serialize(metric)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "ERR %v\n", err)
+ os.Exit(1)
+ }
+ fmt.Fprint(os.Stdout, string(b))
+ }
+
+}
diff --git a/plugins/inputs/execd/execd_windows.go b/plugins/inputs/execd/execd_windows.go
new file mode 100644
index 0000000000000..15e6798f2389b
--- /dev/null
+++ b/plugins/inputs/execd/execd_windows.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package execd
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/influxdata/telegraf"
+)
+
+func (e *Execd) Gather(acc telegraf.Accumulator) error {
+ if e.process == nil {
+ return nil
+ }
+
+ switch e.Signal {
+ case "STDIN":
+ if osStdin, ok := e.process.Stdin.(*os.File); ok {
+ osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second))
+ }
+ if _, err := io.WriteString(e.process.Stdin, "\n"); err != nil {
+ return fmt.Errorf("Error writing to stdin: %s", err)
+ }
+ case "none":
+ default:
+ return fmt.Errorf("invalid signal: %s", e.Signal)
+ }
+
+ return nil
+}
diff --git a/plugins/inputs/execd/shim/README.md b/plugins/inputs/execd/shim/README.md
new file mode 100644
index 0000000000000..761e8b931addb
--- /dev/null
+++ b/plugins/inputs/execd/shim/README.md
@@ -0,0 +1,3 @@
+# Telegraf Execd Go Shim
+
+This is deprecated. Please see [/plugins/common/shim/README.md](https://github.com/influxdata/telegraf/tree/master/plugins/common/shim/README.md)
diff --git a/plugins/inputs/execd/shim/example/cmd/main.go b/plugins/inputs/execd/shim/example/cmd/main.go
new file mode 100644
index 0000000000000..601353483e001
--- /dev/null
+++ b/plugins/inputs/execd/shim/example/cmd/main.go
@@ -0,0 +1,3 @@
+package main
+
+// see /plugins/common/shim/example/cmd/main.go instead.
diff --git a/plugins/inputs/execd/shim/example/cmd/plugin.conf b/plugins/inputs/execd/shim/example/cmd/plugin.conf
new file mode 100644
index 0000000000000..53f89a55946ca
--- /dev/null
+++ b/plugins/inputs/execd/shim/example/cmd/plugin.conf
@@ -0,0 +1,2 @@
+[[inputs.my_plugin_name]]
+ value_name = "value"
diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go
new file mode 100644
index 0000000000000..2ea0b839b3e2f
--- /dev/null
+++ b/plugins/inputs/execd/shim/goshim.go
@@ -0,0 +1,325 @@
+package shim
+
+// this package is deprecated. use plugins/common/shim instead
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/signal"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/BurntSushi/toml"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/agent"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/serializers/influx"
+)
+
+type empty struct{}
+
+var (
+ forever = 100 * 365 * 24 * time.Hour
+ envVarEscaper = strings.NewReplacer(
+ `"`, `\"`,
+ `\`, `\\`,
+ )
+)
+
+const (
+ // PollIntervalDisabled is used to indicate that you want to disable polling,
+ // as opposed to duration 0 meaning poll constantly.
+ PollIntervalDisabled = time.Duration(0)
+)
+
+// Shim allows you to wrap your inputs and run them as if they were part of Telegraf,
+// except built externally.
+type Shim struct {
+ Inputs []telegraf.Input
+ gatherPromptChans []chan empty
+ metricCh chan telegraf.Metric
+
+ stdin io.Reader
+ stdout io.Writer
+ stderr io.Writer
+}
+
+var (
+ oldpkg = "github.com/influxdata/telegraf/plugins/inputs/execd/shim"
+ newpkg = "github.com/influxdata/telegraf/plugins/common/shim"
+)
+
+// New creates a new shim interface
+func New() *Shim {
+ fmt.Fprintf(os.Stderr, "%s is deprecated; please change your import to %s\n",
+ oldpkg, newpkg)
+ return &Shim{
+ stdin: os.Stdin,
+ stdout: os.Stdout,
+ stderr: os.Stderr,
+ }
+}
+
+// AddInput adds the input to the shim. Later calls to Run() will run this input.
+func (s *Shim) AddInput(input telegraf.Input) error {
+ if p, ok := input.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return fmt.Errorf("failed to init input: %s", err)
+ }
+ }
+
+ s.Inputs = append(s.Inputs, input)
+ return nil
+}
+
+// AddInputs adds multiple inputs to the shim. Later calls to Run() will run these.
+func (s *Shim) AddInputs(newInputs []telegraf.Input) error {
+ for _, inp := range newInputs {
+ if err := s.AddInput(inp); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Run the input plugins..
+func (s *Shim) Run(pollInterval time.Duration) error {
+ // context is used only to close the stdin reader. everything else cascades
+ // from that point and closes cleanly when it's done.
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ s.metricCh = make(chan telegraf.Metric, 1)
+
+ wg := sync.WaitGroup{}
+ quit := make(chan os.Signal, 1)
+ signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
+
+ collectMetricsPrompt := make(chan os.Signal, 1)
+ listenForCollectMetricsSignals(ctx, collectMetricsPrompt)
+
+ serializer := influx.NewSerializer()
+
+ for _, input := range s.Inputs {
+ wrappedInput := inputShim{Input: input}
+
+ acc := agent.NewAccumulator(wrappedInput, s.metricCh)
+ acc.SetPrecision(time.Nanosecond)
+
+ if serviceInput, ok := input.(telegraf.ServiceInput); ok {
+ if err := serviceInput.Start(acc); err != nil {
+ return fmt.Errorf("failed to start input: %s", err)
+ }
+ }
+ gatherPromptCh := make(chan empty, 1)
+ s.gatherPromptChans = append(s.gatherPromptChans, gatherPromptCh)
+ wg.Add(1) // one per input
+ go func(input telegraf.Input) {
+ s.startGathering(ctx, input, acc, gatherPromptCh, pollInterval)
+ if serviceInput, ok := input.(telegraf.ServiceInput); ok {
+ serviceInput.Stop()
+ }
+ close(gatherPromptCh)
+ wg.Done()
+ }(input)
+ }
+
+ go s.stdinCollectMetricsPrompt(ctx, cancel, collectMetricsPrompt)
+ go s.closeMetricChannelWhenInputsFinish(&wg)
+
+loop:
+ for {
+ select {
+ case <-quit: // user-triggered quit
+ // cancel, but keep looping until the metric channel closes.
+ cancel()
+ case _, open := <-collectMetricsPrompt:
+ if !open { // stdin-close-triggered quit
+ cancel()
+ continue
+ }
+ s.collectMetrics(ctx)
+ case m, open := <-s.metricCh:
+ if !open {
+ break loop
+ }
+ b, err := serializer.Serialize(m)
+ if err != nil {
+ return fmt.Errorf("failed to serialize metric: %s", err)
+ }
+ // Write this to stdout
+ fmt.Fprint(s.stdout, string(b))
+ }
+ }
+
+ return nil
+}
+
+func hasQuit(ctx context.Context) bool {
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ return false
+ }
+}
+
+func (s *Shim) stdinCollectMetricsPrompt(ctx context.Context, cancel context.CancelFunc, collectMetricsPrompt chan<- os.Signal) {
+ defer func() {
+ cancel()
+ close(collectMetricsPrompt)
+ }()
+
+ scanner := bufio.NewScanner(s.stdin)
+ // for every line read from stdin, make sure we're not supposed to quit,
+ // then push a message on to the collectMetricsPrompt
+ for scanner.Scan() {
+ // first check if we should quit
+ if hasQuit(ctx) {
+ return
+ }
+
+ // now push a non-blocking message to trigger metric collection.
+ pushCollectMetricsRequest(collectMetricsPrompt)
+ }
+}
+
+// pushCollectMetricsRequest pushes a non-blocking (nil) message to the
+// collectMetricsPrompt channel to trigger metric collection.
+// The channel is defined with a buffer of 1, so while it's full, subsequent
+// requests are discarded.
+func pushCollectMetricsRequest(collectMetricsPrompt chan<- os.Signal) {
+ select {
+ case collectMetricsPrompt <- nil:
+ default:
+ }
+}
+
+func (s *Shim) collectMetrics(ctx context.Context) {
+ if hasQuit(ctx) {
+ return
+ }
+ for i := 0; i < len(s.gatherPromptChans); i++ {
+ // push a message out to each channel to collect metrics. don't block.
+ select {
+ case s.gatherPromptChans[i] <- empty{}:
+ default:
+ }
+ }
+}
+
+func (s *Shim) startGathering(ctx context.Context, input telegraf.Input, acc telegraf.Accumulator, gatherPromptCh <-chan empty, pollInterval time.Duration) {
+ if pollInterval == PollIntervalDisabled {
+ return // don't poll
+ }
+ t := time.NewTicker(pollInterval)
+ defer t.Stop()
+ for {
+ // give priority to stopping.
+ if hasQuit(ctx) {
+ return
+ }
+ // see what's up
+ select {
+ case <-ctx.Done():
+ return
+ case <-gatherPromptCh:
+ if err := input.Gather(acc); err != nil {
+ fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err)
+ }
+ case <-t.C:
+ if err := input.Gather(acc); err != nil {
+ fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err)
+ }
+ }
+ }
+}
+
+// LoadConfig loads and adds the inputs to the shim
+func (s *Shim) LoadConfig(filePath *string) error {
+ loadedInputs, err := LoadConfig(filePath)
+ if err != nil {
+ return err
+ }
+ return s.AddInputs(loadedInputs)
+}
+
+// DefaultImportedPlugins defaults to whatever plugins happen to be loaded and
+// have registered themselves with the registry. This makes loading plugins
+// without having to define a config dead easy.
+func DefaultImportedPlugins() (i []telegraf.Input, e error) {
+ for _, inputCreatorFunc := range inputs.Inputs {
+ i = append(i, inputCreatorFunc())
+ }
+ return i, nil
+}
+
+// LoadConfig loads the config and returns inputs that later need to be loaded.
+func LoadConfig(filePath *string) ([]telegraf.Input, error) {
+ if filePath == nil || *filePath == "" {
+ return DefaultImportedPlugins()
+ }
+
+ b, err := ioutil.ReadFile(*filePath)
+ if err != nil {
+ return nil, err
+ }
+
+ s := expandEnvVars(b)
+
+ conf := struct {
+ Inputs map[string][]toml.Primitive
+ }{}
+
+ md, err := toml.Decode(s, &conf)
+ if err != nil {
+ return nil, err
+ }
+
+ return loadConfigIntoInputs(md, conf.Inputs)
+}
+
+func expandEnvVars(contents []byte) string {
+ return os.Expand(string(contents), getEnv)
+}
+
+func getEnv(key string) string {
+ v := os.Getenv(key)
+
+ return envVarEscaper.Replace(v)
+}
+
+func loadConfigIntoInputs(md toml.MetaData, inputConfigs map[string][]toml.Primitive) ([]telegraf.Input, error) {
+ renderedInputs := []telegraf.Input{}
+
+ for name, primitives := range inputConfigs {
+ inputCreator, ok := inputs.Inputs[name]
+ if !ok {
+ return nil, errors.New("unknown input " + name)
+ }
+
+ for _, primitive := range primitives {
+ inp := inputCreator()
+ // Parse specific configuration
+ if err := md.PrimitiveDecode(primitive, inp); err != nil {
+ return nil, err
+ }
+
+ renderedInputs = append(renderedInputs, inp)
+ }
+ }
+ return renderedInputs, nil
+}
+
+func (s *Shim) closeMetricChannelWhenInputsFinish(wg *sync.WaitGroup) {
+ wg.Wait()
+ close(s.metricCh)
+}
diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go
new file mode 100644
index 0000000000000..4e4a04f141b65
--- /dev/null
+++ b/plugins/inputs/execd/shim/goshim_posix.go
@@ -0,0 +1,23 @@
+// +build !windows
+
+package shim
+
+import (
+ "context"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt chan os.Signal) {
+ // just listen to all the signals.
+ signal.Notify(collectMetricsPrompt, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2)
+
+ go func() {
+ select {
+ case <-ctx.Done():
+ // context done. stop to signals to avoid pushing messages to a closed channel
+ signal.Stop(collectMetricsPrompt)
+ }
+ }()
+}
diff --git a/plugins/inputs/execd/shim/goshim_windows.go b/plugins/inputs/execd/shim/goshim_windows.go
new file mode 100644
index 0000000000000..317f8a2f3d4cb
--- /dev/null
+++ b/plugins/inputs/execd/shim/goshim_windows.go
@@ -0,0 +1,22 @@
+// +build windows
+
+package shim
+
+import (
+ "context"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt chan os.Signal) {
+ signal.Notify(collectMetricsPrompt, syscall.SIGHUP)
+
+ go func() {
+ select {
+ case <-ctx.Done():
+ // context done. stop to signals to avoid pushing messages to a closed channel
+ signal.Stop(collectMetricsPrompt)
+ }
+ }()
+}
diff --git a/plugins/inputs/execd/shim/input.go b/plugins/inputs/execd/shim/input.go
new file mode 100644
index 0000000000000..6dff9cd7f1002
--- /dev/null
+++ b/plugins/inputs/execd/shim/input.go
@@ -0,0 +1,20 @@
+package shim
+
+import "github.com/influxdata/telegraf"
+
+// inputShim implements the MetricMaker interface.
+type inputShim struct {
+ Input telegraf.Input
+}
+
+func (i inputShim) LogName() string {
+ return ""
+}
+
+func (i inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric {
+ return m // don't need to do anything to it.
+}
+
+func (i inputShim) Log() telegraf.Logger {
+ return nil
+}
diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go
new file mode 100644
index 0000000000000..873ef89bf655f
--- /dev/null
+++ b/plugins/inputs/execd/shim/shim_posix_test.go
@@ -0,0 +1,63 @@
+// +build !windows
+
+package shim
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "os"
+ "runtime"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestShimUSR1SignalingWorks(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip()
+ return
+ }
+ stdinReader, stdinWriter := io.Pipe()
+ stdoutReader, stdoutWriter := io.Pipe()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ metricProcessed, exited := runInputPlugin(t, 20*time.Minute, stdinReader, stdoutWriter, nil)
+
+ // signal USR1 to yourself.
+ pid := os.Getpid()
+ process, err := os.FindProcess(pid)
+ require.NoError(t, err)
+
+ go func() {
+ // On slow machines this signal can fire before the service comes up.
+ // rather than depend on accurate sleep times, we'll just retry sending
+ // the signal every so often until it goes through.
+ for {
+ select {
+ case <-ctx.Done():
+ return // test is done
+ default:
+ // test isn't done, keep going.
+ process.Signal(syscall.SIGUSR1)
+ time.Sleep(200 * time.Millisecond)
+ }
+ }
+ }()
+
+ <-metricProcessed
+ cancel()
+
+ r := bufio.NewReader(stdoutReader)
+ out, err := r.ReadString('\n')
+ require.NoError(t, err)
+ require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out)
+
+ stdinWriter.Close()
+ readUntilEmpty(r)
+
+ <-exited
+}
diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go
new file mode 100644
index 0000000000000..dbc3462211222
--- /dev/null
+++ b/plugins/inputs/execd/shim/shim_test.go
@@ -0,0 +1,176 @@
+package shim
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+func TestShimWorks(t *testing.T) {
+ stdoutReader, stdoutWriter := io.Pipe()
+
+ stdin, _ := io.Pipe() // hold the stdin pipe open
+
+ metricProcessed, _ := runInputPlugin(t, 10*time.Millisecond, stdin, stdoutWriter, nil)
+
+ <-metricProcessed
+ r := bufio.NewReader(stdoutReader)
+ out, err := r.ReadString('\n')
+ require.NoError(t, err)
+ require.Contains(t, out, "\n")
+ metricLine := strings.Split(out, "\n")[0]
+ require.Equal(t, "measurement,tag=tag field=1i 1234000005678", metricLine)
+}
+
+func TestShimStdinSignalingWorks(t *testing.T) {
+ stdinReader, stdinWriter := io.Pipe()
+ stdoutReader, stdoutWriter := io.Pipe()
+
+ metricProcessed, exited := runInputPlugin(t, 40*time.Second, stdinReader, stdoutWriter, nil)
+
+ stdinWriter.Write([]byte("\n"))
+
+ <-metricProcessed
+
+ r := bufio.NewReader(stdoutReader)
+ out, err := r.ReadString('\n')
+ require.NoError(t, err)
+ require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out)
+
+ stdinWriter.Close()
+
+ readUntilEmpty(r)
+
+ // check that it exits cleanly
+ <-exited
+}
+
+func runInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdout, stderr io.Writer) (metricProcessed chan bool, exited chan bool) {
+ metricProcessed = make(chan bool)
+ exited = make(chan bool)
+ inp := &testInput{
+ metricProcessed: metricProcessed,
+ }
+
+ shim := New()
+ if stdin != nil {
+ shim.stdin = stdin
+ }
+ if stdout != nil {
+ shim.stdout = stdout
+ }
+ if stderr != nil {
+ shim.stderr = stderr
+ }
+
+ shim.AddInput(inp)
+ go func() {
+ err := shim.Run(interval)
+ require.NoError(t, err)
+ exited <- true
+ }()
+ return metricProcessed, exited
+}
+
+type testInput struct {
+ metricProcessed chan bool
+}
+
+func (i *testInput) SampleConfig() string {
+ return ""
+}
+
+func (i *testInput) Description() string {
+ return ""
+}
+
+func (i *testInput) Gather(acc telegraf.Accumulator) error {
+ acc.AddFields("measurement",
+ map[string]interface{}{
+ "field": 1,
+ },
+ map[string]string{
+ "tag": "tag",
+ }, time.Unix(1234, 5678))
+ i.metricProcessed <- true
+ return nil
+}
+
+func (i *testInput) Start(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func (i *testInput) Stop() {
+}
+
+func TestLoadConfig(t *testing.T) {
+ os.Setenv("SECRET_TOKEN", "xxxxxxxxxx")
+ os.Setenv("SECRET_VALUE", `test"\test`)
+
+ inputs.Add("test", func() telegraf.Input {
+ return &serviceInput{}
+ })
+
+ c := "./testdata/plugin.conf"
+ inputs, err := LoadConfig(&c)
+ require.NoError(t, err)
+
+ inp := inputs[0].(*serviceInput)
+
+ require.Equal(t, "awesome name", inp.ServiceName)
+ require.Equal(t, "xxxxxxxxxx", inp.SecretToken)
+ require.Equal(t, `test"\test`, inp.SecretValue)
+}
+
+type serviceInput struct {
+ ServiceName string `toml:"service_name"`
+ SecretToken string `toml:"secret_token"`
+ SecretValue string `toml:"secret_value"`
+}
+
+func (i *serviceInput) SampleConfig() string {
+ return ""
+}
+
+func (i *serviceInput) Description() string {
+ return ""
+}
+
+func (i *serviceInput) Gather(acc telegraf.Accumulator) error {
+ acc.AddFields("measurement",
+ map[string]interface{}{
+ "field": 1,
+ },
+ map[string]string{
+ "tag": "tag",
+ }, time.Unix(1234, 5678))
+
+ return nil
+}
+
+func (i *serviceInput) Start(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func (i *serviceInput) Stop() {
+}
+
+// we can get stuck if stdout gets clogged up and nobody's reading from it.
+// make sure we keep it going
+func readUntilEmpty(r *bufio.Reader) {
+ go func() {
+ var err error
+ for err != io.EOF {
+ _, err = r.ReadString('\n')
+ time.Sleep(10 * time.Millisecond)
+ }
+ }()
+}
diff --git a/plugins/inputs/execd/shim/testdata/plugin.conf b/plugins/inputs/execd/shim/testdata/plugin.conf
new file mode 100644
index 0000000000000..78dbb33a90683
--- /dev/null
+++ b/plugins/inputs/execd/shim/testdata/plugin.conf
@@ -0,0 +1,4 @@
+[[inputs.test]]
+ service_name = "awesome name"
+ secret_token = "${SECRET_TOKEN}"
+ secret_value = "$SECRET_VALUE"
diff --git a/plugins/inputs/fail2ban/README.md b/plugins/inputs/fail2ban/README.md
index 0b0e654148142..1762bbaf209cb 100644
--- a/plugins/inputs/fail2ban/README.md
+++ b/plugins/inputs/fail2ban/README.md
@@ -1,51 +1,54 @@
# Fail2ban Input Plugin
-The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
+The fail2ban plugin gathers the count of failed and banned ip addresses using
+[fail2ban](https://www.fail2ban.org).
This plugin runs the `fail2ban-client` command which generally requires root access.
Acquiring the required permissions can be done using several methods:
-- Use sudo run fail2ban-client.
+- [Use sudo](#using-sudo) run fail2ban-client.
- Run telegraf as root. (not recommended)
-### Using sudo
+### Configuration
-You will need the following in your telegraf config:
```toml
+# Read metrics from fail2ban.
[[inputs.fail2ban]]
- use_sudo = true
+ ## Use sudo to run fail2ban-client
+ use_sudo = false
```
-You will also need to update your sudoers file:
+### Using sudo
+
+Make sure to set `use_sudo = true` in your configuration file.
+
+You will also need to update your sudoers file. It is recommended to modify a
+file in the `/etc/sudoers.d` directory using `visudo`:
+
```bash
-$ visudo
-# Add the following line:
+$ sudo visudo -f /etc/sudoers.d/telegraf
+```
+
+Add the following lines to the file, these commands allow the `telegraf` user
+to call `fail2ban-client` without needing to provide a password and disables
+logging of the call in the auth.log. Consult `man 8 visudo` and `man 5
+sudoers` for details.
+```
Cmnd_Alias FAIL2BAN = /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
telegraf ALL=(root) NOEXEC: NOPASSWD: FAIL2BAN
Defaults!FAIL2BAN !logfile, !syslog, !pam_session
```
-### Configuration:
-
-```toml
-# Read metrics from fail2ban.
-[[inputs.fail2ban]]
- ## Use sudo to run fail2ban-client
- use_sudo = false
-```
-
-### Measurements & Fields:
+### Metrics
- fail2ban
- - failed (integer, count)
- - banned (integer, count)
-
-### Tags:
-
-- All measurements have the following tags:
- - jail
+ - tags:
+ - jail
+ - fields:
+ - failed (integer, count)
+ - banned (integer, count)
-### Example Output:
+### Example Output
```
# fail2ban-client status sshd
diff --git a/plugins/inputs/fibaro/README.md b/plugins/inputs/fibaro/README.md
index 68fda0586506a..54c20310224b3 100644
--- a/plugins/inputs/fibaro/README.md
+++ b/plugins/inputs/fibaro/README.md
@@ -30,6 +30,7 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers
- name (device name)
- type (device type)
- fields:
+ - batteryLevel (float, when available from device)
- energy (float, when available from device)
- power (float, when available from device)
- value (float)
@@ -52,4 +53,5 @@ fibaro,deviceId=220,host=vm1,name=CO2\ (ppm),room=Salon,section=Pièces\ commune
fibaro,deviceId=221,host=vm1,name=Humidité\ (%),room=Salon,section=Pièces\ communes,type=com.fibaro.humiditySensor value=61 1529996807000000000
fibaro,deviceId=222,host=vm1,name=Pression\ (mb),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=1013.7 1529996807000000000
fibaro,deviceId=223,host=vm1,name=Bruit\ (db),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=44 1529996807000000000
+fibaro,deviceId=248,host=vm1,name=Température,room=Garage,section=Extérieur,type=com.fibaro.temperatureSensor batteryLevel=85,value=10.8 1529996807000000000
```
diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go
index 6eacb3ee62ea3..62889cc8dd6f7 100644
--- a/plugins/inputs/fibaro/fibaro.go
+++ b/plugins/inputs/fibaro/fibaro.go
@@ -5,12 +5,15 @@ import (
"fmt"
"net/http"
"strconv"
+ "time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
+const defaultTimeout = 5 * time.Second
+
const sampleConfig = `
## Required Fibaro controller address/hostname.
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
@@ -28,13 +31,13 @@ const description = "Read devices value(s) from a Fibaro controller"
// Fibaro contains connection information
type Fibaro struct {
- URL string
+ URL string `toml:"url"`
// HTTP Basic Auth Credentials
- Username string
- Password string
+ Username string `toml:"username"`
+ Password string `toml:"password"`
- Timeout internal.Duration
+ Timeout internal.Duration `toml:"timeout"`
client *http.Client
}
@@ -66,11 +69,12 @@ type Devices struct {
Type string `json:"type"`
Enabled bool `json:"enabled"`
Properties struct {
- Dead interface{} `json:"dead"`
- Energy interface{} `json:"energy"`
- Power interface{} `json:"power"`
- Value interface{} `json:"value"`
- Value2 interface{} `json:"value2"`
+ BatteryLevel *string `json:"batteryLevel"`
+ Dead string `json:"dead"`
+ Energy *string `json:"energy"`
+ Power *string `json:"power"`
+ Value interface{} `json:"value"`
+ Value2 *string `json:"value2"`
} `json:"properties"`
}
@@ -94,6 +98,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
if err != nil {
return err
}
+ defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
@@ -105,8 +110,6 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
return err
}
- defer resp.Body.Close()
-
dec := json.NewDecoder(resp.Body)
err = dec.Decode(&dataStruct)
if err != nil {
@@ -172,14 +175,20 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
}
fields := make(map[string]interface{})
+ if device.Properties.BatteryLevel != nil {
+ if fValue, err := strconv.ParseFloat(*device.Properties.BatteryLevel, 64); err == nil {
+ fields["batteryLevel"] = fValue
+ }
+ }
+
if device.Properties.Energy != nil {
- if fValue, err := strconv.ParseFloat(device.Properties.Energy.(string), 64); err == nil {
+ if fValue, err := strconv.ParseFloat(*device.Properties.Energy, 64); err == nil {
fields["energy"] = fValue
}
}
if device.Properties.Power != nil {
- if fValue, err := strconv.ParseFloat(device.Properties.Power.(string), 64); err == nil {
+ if fValue, err := strconv.ParseFloat(*device.Properties.Power, 64); err == nil {
fields["power"] = fValue
}
}
@@ -199,7 +208,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
}
if device.Properties.Value2 != nil {
- if fValue, err := strconv.ParseFloat(device.Properties.Value2.(string), 64); err == nil {
+ if fValue, err := strconv.ParseFloat(*device.Properties.Value2, 64); err == nil {
fields["value2"] = fValue
}
}
@@ -212,6 +221,8 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
func init() {
inputs.Add("fibaro", func() telegraf.Input {
- return &Fibaro{}
+ return &Fibaro{
+ Timeout: internal.Duration{Duration: defaultTimeout},
+ }
})
}
diff --git a/plugins/inputs/fibaro/fibaro_test.go b/plugins/inputs/fibaro/fibaro_test.go
index a58ad7c31d5dc..32a1447e3ef4d 100644
--- a/plugins/inputs/fibaro/fibaro_test.go
+++ b/plugins/inputs/fibaro/fibaro_test.go
@@ -107,6 +107,7 @@ const devicesJSON = `
"type": "com.fibaro.temperatureSensor",
"enabled": true,
"properties": {
+ "batteryLevel": "100",
"dead": "false",
"value": "22.80"
},
@@ -196,7 +197,7 @@ func TestJSONSuccess(t *testing.T) {
// Ensure fields / values are correct - Device 4
tags = map[string]string{"deviceId": "4", "section": "Section 3", "room": "Room 4", "name": "Device 4", "type": "com.fibaro.temperatureSensor"}
- fields = map[string]interface{}{"value": float64(22.8)}
+ fields = map[string]interface{}{"batteryLevel": float64(100), "value": float64(22.8)}
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
// Ensure fields / values are correct - Device 5
diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md
index 4358b67ad2668..ef0fb90b0796c 100644
--- a/plugins/inputs/file/README.md
+++ b/plugins/inputs/file/README.md
@@ -1,25 +1,29 @@
# File Input Plugin
-The file plugin updates a list of files every interval and parses the contents
-using the selected [input data format](/docs/DATA_FORMATS_INPUT.md).
+The file plugin parses the **complete** contents of a file **every interval** using
+the selected [input data format][].
-Files will always be read in their entirety, if you wish to tail/follow a file
-use the [tail input plugin](/plugins/inputs/tail) instead.
+**Note:** If you wish to parse only newly appended lines use the [tail][] input
+plugin instead.
### Configuration:
+
```toml
[[inputs.file]]
- ## Files to parse each interval.
- ## These accept standard unix glob matching rules, but with the addition of
- ## ** as a "super asterisk". ie:
- ## /var/log/**.log -> recursively find all .log files in /var/log
- ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
- ## /var/log/apache.log -> only read the apache log file
- files = ["/var/log/apache/access.log"]
+ ## Files to parse each interval. Accept standard unix glob matching rules,
+ ## as well as ** to match recursive files and directories.
+ files = ["/tmp/metrics.out"]
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
+
+ ## Name a tag containing the name of the file the data was parsed from. Leave empty
+ ## to disable.
+ # file_tag = ""
```
+
+[input data format]: /docs/DATA_FORMATS_INPUT.md
+[tail]: /plugins/inputs/tail
diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go
index b93a7ba9925d0..e431bc6df9f15 100644
--- a/plugins/inputs/file/file.go
+++ b/plugins/inputs/file/file.go
@@ -3,28 +3,44 @@ package file
import (
"fmt"
"io/ioutil"
+ "os"
+ "path/filepath"
+ "github.com/dimchansky/utfbom"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/globpath"
+ "github.com/influxdata/telegraf/plugins/common/encoding"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
type File struct {
- Files []string `toml:"files"`
- parser parsers.Parser
+ Files []string `toml:"files"`
+ FileTag string `toml:"file_tag"`
+ CharacterEncoding string `toml:"character_encoding"`
+ parser parsers.Parser
filenames []string
+ decoder *encoding.Decoder
}
const sampleConfig = `
- ## Files to parse each interval.
- ## These accept standard unix glob matching rules, but with the addition of
- ## ** as a "super asterisk". ie:
- ## /var/log/**.log -> recursively find all .log files in /var/log
- ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
- ## /var/log/apache.log -> only read the apache log file
- files = ["/var/log/apache/access.log"]
+ ## Files to parse each interval. Accept standard unix glob matching rules,
+ ## as well as ** to match recursive files and directories.
+ files = ["/tmp/metrics.out"]
+
+ ## Name a tag containing the name of the file the data was parsed from. Leave empty
+ ## to disable.
+ # file_tag = ""
+
+ ## Character encoding to use when interpreting the file contents. Invalid
+ ## characters are replaced using the unicode replacement character. When set
+ ## to the empty string the data is not decoded to text.
+ ## ex: character_encoding = "utf-8"
+ ## character_encoding = "utf-16le"
+ ## character_encoding = "utf-16be"
+ ## character_encoding = ""
+ # character_encoding = ""
## The dataformat to be read from files
## Each data format has its own unique set of configuration options, read
@@ -39,7 +55,13 @@ func (f *File) SampleConfig() string {
}
func (f *File) Description() string {
- return "Reload and gather from file[s] on telegraf's interval."
+ return "Parse a complete file each interval"
+}
+
+func (f *File) Init() error {
+ var err error
+ f.decoder, err = encoding.NewDecoder(f.CharacterEncoding)
+ return err
}
func (f *File) Gather(acc telegraf.Accumulator) error {
@@ -54,7 +76,10 @@ func (f *File) Gather(acc telegraf.Accumulator) error {
}
for _, m := range metrics {
- acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
+ if f.FileTag != "" {
+ m.AddTag(f.FileTag, filepath.Base(k))
+ }
+ acc.AddMetric(m)
}
}
return nil
@@ -83,12 +108,18 @@ func (f *File) refreshFilePaths() error {
}
func (f *File) readMetric(filename string) ([]telegraf.Metric, error) {
- fileContents, err := ioutil.ReadFile(filename)
+ file, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ r, _ := utfbom.Skip(f.decoder.Reader(file))
+ fileContents, err := ioutil.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err)
}
return f.parser.Parse(fileContents)
-
}
func init() {
diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go
index 43322c2e84cf9..427ff25d8c789 100644
--- a/plugins/inputs/file/file_test.go
+++ b/plugins/inputs/file/file_test.go
@@ -4,8 +4,11 @@ import (
"os"
"path/filepath"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/parsers/csv"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -16,17 +19,51 @@ func TestRefreshFilePaths(t *testing.T) {
r := File{
Files: []string{filepath.Join(wd, "dev/testfiles/**.log")},
}
+ err = r.Init()
+ require.NoError(t, err)
err = r.refreshFilePaths()
require.NoError(t, err)
assert.Equal(t, 2, len(r.filenames))
}
+
+func TestFileTag(t *testing.T) {
+ acc := testutil.Accumulator{}
+ wd, err := os.Getwd()
+ require.NoError(t, err)
+ r := File{
+ Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")},
+ FileTag: "filename",
+ }
+ err = r.Init()
+ require.NoError(t, err)
+
+ parserConfig := parsers.Config{
+ DataFormat: "json",
+ }
+ nParser, err := parsers.NewParser(&parserConfig)
+ assert.NoError(t, err)
+ r.parser = nParser
+
+ err = r.Gather(&acc)
+ require.NoError(t, err)
+
+ for _, m := range acc.Metrics {
+ for key, value := range m.Tags {
+ assert.Equal(t, r.FileTag, key)
+ assert.Equal(t, filepath.Base(r.Files[0]), value)
+ }
+ }
+}
+
func TestJSONParserCompile(t *testing.T) {
var acc testutil.Accumulator
wd, _ := os.Getwd()
r := File{
Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")},
}
+ err := r.Init()
+ require.NoError(t, err)
parserConfig := parsers.Config{
DataFormat: "json",
TagKeys: []string{"parent_ignored_child"},
@@ -46,6 +83,8 @@ func TestGrokParser(t *testing.T) {
r := File{
Files: []string{filepath.Join(wd, "dev/testfiles/grok_a.log")},
}
+ err := r.Init()
+ require.NoError(t, err)
parserConfig := parsers.Config{
DataFormat: "grok",
@@ -59,3 +98,152 @@ func TestGrokParser(t *testing.T) {
err = r.Gather(&acc)
assert.Equal(t, len(acc.Metrics), 2)
}
+
+func TestCharacterEncoding(t *testing.T) {
+ expected := []telegraf.Metric{
+ testutil.MustMetric("file",
+ map[string]string{
+ "dest": "example.org",
+ "hop": "1",
+ "ip": "12.122.114.5",
+ },
+ map[string]interface{}{
+ "avg": 21.55,
+ "best": 19.34,
+ "loss": 0.0,
+ "snt": 10,
+ "status": "OK",
+ "stdev": 2.05,
+ "worst": 26.83,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric("file",
+ map[string]string{
+ "dest": "example.org",
+ "hop": "2",
+ "ip": "192.205.32.238",
+ },
+ map[string]interface{}{
+ "avg": 25.11,
+ "best": 20.8,
+ "loss": 0.0,
+ "snt": 10,
+ "status": "OK",
+ "stdev": 6.03,
+ "worst": 38.85,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric("file",
+ map[string]string{
+ "dest": "example.org",
+ "hop": "3",
+ "ip": "152.195.85.133",
+ },
+ map[string]interface{}{
+ "avg": 20.18,
+ "best": 19.75,
+ "loss": 0.0,
+ "snt": 10,
+ "status": "OK",
+ "stdev": 0.0,
+ "worst": 20.78,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric("file",
+ map[string]string{
+ "dest": "example.org",
+ "hop": "4",
+ "ip": "93.184.216.34",
+ },
+ map[string]interface{}{
+ "avg": 24.02,
+ "best": 19.75,
+ "loss": 0.0,
+ "snt": 10,
+ "status": "OK",
+ "stdev": 4.67,
+ "worst": 32.41,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ tests := []struct {
+ name string
+ plugin *File
+ csv *csv.Config
+ file string
+ }{
+ {
+ name: "empty character_encoding with utf-8",
+ plugin: &File{
+ Files: []string{"testdata/mtr-utf-8.csv"},
+ CharacterEncoding: "",
+ },
+ csv: &csv.Config{
+ MetricName: "file",
+ SkipRows: 1,
+ ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"},
+ TagColumns: []string{"dest", "hop", "ip"},
+ },
+ },
+ {
+ name: "utf-8 character_encoding with utf-8",
+ plugin: &File{
+ Files: []string{"testdata/mtr-utf-8.csv"},
+ CharacterEncoding: "utf-8",
+ },
+ csv: &csv.Config{
+ MetricName: "file",
+ SkipRows: 1,
+ ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"},
+ TagColumns: []string{"dest", "hop", "ip"},
+ },
+ },
+ {
+ name: "utf-16le character_encoding with utf-16le",
+ plugin: &File{
+ Files: []string{"testdata/mtr-utf-16le.csv"},
+ CharacterEncoding: "utf-16le",
+ },
+ csv: &csv.Config{
+ MetricName: "file",
+ SkipRows: 1,
+ ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"},
+ TagColumns: []string{"dest", "hop", "ip"},
+ },
+ },
+ {
+ name: "utf-16be character_encoding with utf-16be",
+ plugin: &File{
+ Files: []string{"testdata/mtr-utf-16be.csv"},
+ CharacterEncoding: "utf-16be",
+ },
+ csv: &csv.Config{
+ MetricName: "file",
+ SkipRows: 1,
+ ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"},
+ TagColumns: []string{"dest", "hop", "ip"},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.plugin.Init()
+ require.NoError(t, err)
+
+ parser, err := csv.NewParser(tt.csv)
+ require.NoError(t, err)
+ tt.plugin.SetParser(parser)
+
+ var acc testutil.Accumulator
+ err = tt.plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
+ })
+ }
+}
diff --git a/plugins/inputs/file/testdata/mtr-utf-16be.csv b/plugins/inputs/file/testdata/mtr-utf-16be.csv
new file mode 100644
index 0000000000000..c35596aa031fc
Binary files /dev/null and b/plugins/inputs/file/testdata/mtr-utf-16be.csv differ
diff --git a/plugins/inputs/file/testdata/mtr-utf-16le.csv b/plugins/inputs/file/testdata/mtr-utf-16le.csv
new file mode 100644
index 0000000000000..d82ea30719131
Binary files /dev/null and b/plugins/inputs/file/testdata/mtr-utf-16le.csv differ
diff --git a/plugins/inputs/file/testdata/mtr-utf-8.csv b/plugins/inputs/file/testdata/mtr-utf-8.csv
new file mode 100644
index 0000000000000..f5db3cc1b7d51
--- /dev/null
+++ b/plugins/inputs/file/testdata/mtr-utf-8.csv
@@ -0,0 +1,5 @@
+Mtr_Version,Start_Time,Status,Host,Hop,Ip,Loss%,Snt, ,Last,Avg,Best,Wrst,StDev,
+MTR.0.87,1593667013,OK,example.org,1,12.122.114.5,0.00,10,0,21.86,21.55,19.34,26.83,2.05
+MTR.0.87,1593667013,OK,example.org,2,192.205.32.238,0.00,10,0,32.83,25.11,20.80,38.85,6.03
+MTR.0.87,1593667013,OK,example.org,3,152.195.85.133,0.00,10,0,19.75,20.18,19.75,20.78,0.00
+MTR.0.87,1593667013,OK,example.org,4,93.184.216.34,0.00,10,0,19.75,24.02,19.75,32.41,4.67
diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md
index 49e28caa62177..81fc75908e798 100644
--- a/plugins/inputs/filecount/README.md
+++ b/plugins/inputs/filecount/README.md
@@ -27,6 +27,9 @@ Reports the number and total size of files in specified directories.
## Only count regular files. Defaults to true.
regular_only = true
+ ## Follow all symlinks while walking the directory tree. Defaults to false.
+ follow_symlinks = false
+
## Only count files that are at least this size. If size is
## a negative number, only count files that are smaller than the
## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go
index c0072e0d8765f..30815541c8448 100644
--- a/plugins/inputs/filecount/filecount.go
+++ b/plugins/inputs/filecount/filecount.go
@@ -1,7 +1,6 @@
package filecount
import (
- "log"
"os"
"path/filepath"
"time"
@@ -36,6 +35,9 @@ const sampleConfig = `
## Only count regular files. Defaults to true.
regular_only = true
+ ## Follow all symlinks while walking the directory tree. Defaults to false.
+ follow_symlinks = false
+
## Only count files that are at least this size. If size is
## a negative number, only count files that are smaller than the
## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
@@ -49,15 +51,18 @@ const sampleConfig = `
`
type FileCount struct {
- Directory string // deprecated in 1.9
- Directories []string
- Name string
- Recursive bool
- RegularOnly bool
- Size internal.Size
- MTime internal.Duration `toml:"mtime"`
- fileFilters []fileFilterFunc
- globPaths []globpath.GlobPath
+ Directory string // deprecated in 1.9
+ Directories []string
+ Name string
+ Recursive bool
+ RegularOnly bool
+ FollowSymlinks bool
+ Size internal.Size
+ MTime internal.Duration `toml:"mtime"`
+ fileFilters []fileFilterFunc
+ globPaths []globpath.GlobPath
+ Fs fileSystem
+ Log telegraf.Logger
}
func (_ *FileCount) Description() string {
@@ -159,7 +164,7 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa
if err == nil && rel == "." {
return nil
}
- file, err := os.Stat(path)
+ file, err := fc.Fs.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return nil
@@ -207,9 +212,10 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa
Callback: walkFn,
PostChildrenCallback: postChildrenFn,
Unsorted: true,
+ FollowSymbolicLinks: fc.FollowSymlinks,
ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction {
if os.IsPermission(errors.Cause(err)) {
- log.Println("D! [inputs.filecount]", err)
+ fc.Log.Debug(err)
return godirwalk.SkipNode
}
return godirwalk.Halt
@@ -244,7 +250,7 @@ func (fc *FileCount) Gather(acc telegraf.Accumulator) error {
}
for _, glob := range fc.globPaths {
- for _, dir := range onlyDirectories(glob.GetRoots()) {
+ for _, dir := range fc.onlyDirectories(glob.GetRoots()) {
fc.count(acc, dir, glob)
}
}
@@ -252,10 +258,10 @@ func (fc *FileCount) Gather(acc telegraf.Accumulator) error {
return nil
}
-func onlyDirectories(directories []string) []string {
+func (fc *FileCount) onlyDirectories(directories []string) []string {
out := make([]string, 0)
for _, path := range directories {
- info, err := os.Stat(path)
+ info, err := fc.Fs.Stat(path)
if err == nil && info.IsDir() {
out = append(out, path)
}
@@ -266,11 +272,11 @@ func onlyDirectories(directories []string) []string {
func (fc *FileCount) getDirs() []string {
dirs := make([]string, len(fc.Directories))
for i, dir := range fc.Directories {
- dirs[i] = dir
+ dirs[i] = filepath.Clean(dir)
}
if fc.Directory != "" {
- dirs = append(dirs, fc.Directory)
+ dirs = append(dirs, filepath.Clean(fc.Directory))
}
return dirs
@@ -286,18 +292,21 @@ func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) {
fc.globPaths = append(fc.globPaths, *glob)
}
}
+
}
func NewFileCount() *FileCount {
return &FileCount{
- Directory: "",
- Directories: []string{},
- Name: "*",
- Recursive: true,
- RegularOnly: true,
- Size: internal.Size{Size: 0},
- MTime: internal.Duration{Duration: 0},
- fileFilters: nil,
+ Directory: "",
+ Directories: []string{},
+ Name: "*",
+ Recursive: true,
+ RegularOnly: true,
+ FollowSymlinks: false,
+ Size: internal.Size{Size: 0},
+ MTime: internal.Duration{Duration: 0},
+ fileFilters: nil,
+ Fs: osFS{},
}
}
diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go
index 2294e8ce68d88..568ee07b5d458 100644
--- a/plugins/inputs/filecount/filecount_test.go
+++ b/plugins/inputs/filecount/filecount_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
@@ -18,7 +19,7 @@ func TestNoFilters(t *testing.T) {
matches := []string{"foo", "bar", "baz", "qux",
"subdir/", "subdir/quux", "subdir/quuz",
"subdir/nested2", "subdir/nested2/qux"}
- fileCountEquals(t, fc, len(matches), 9084)
+ fileCountEquals(t, fc, len(matches), 5096)
}
func TestNoFiltersOnChildDir(t *testing.T) {
@@ -30,9 +31,8 @@ func TestNoFiltersOnChildDir(t *testing.T) {
tags := map[string]string{"directory": getTestdataDir() + "/subdir"}
acc := testutil.Accumulator{}
acc.GatherError(fc.Gather)
-
require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches))))
- require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(4542)))
+ require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(600)))
}
func TestNoRecursiveButSuperMeta(t *testing.T) {
@@ -46,7 +46,7 @@ func TestNoRecursiveButSuperMeta(t *testing.T) {
acc.GatherError(fc.Gather)
require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches))))
- require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(4096)))
+ require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(200)))
}
func TestNameFilter(t *testing.T) {
@@ -60,20 +60,22 @@ func TestNonRecursive(t *testing.T) {
fc := getNoFilterFileCount()
fc.Recursive = false
matches := []string{"foo", "bar", "baz", "qux", "subdir"}
- fileCountEquals(t, fc, len(matches), 4542)
+
+ fileCountEquals(t, fc, len(matches), 4496)
}
func TestDoubleAndSimpleStar(t *testing.T) {
fc := getNoFilterFileCount()
fc.Directories = []string{getTestdataDir() + "/**/*"}
matches := []string{"qux"}
+
tags := map[string]string{"directory": getTestdataDir() + "/subdir/nested2"}
acc := testutil.Accumulator{}
acc.GatherError(fc.Gather)
require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches))))
- require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(446)))
+ require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(400)))
}
func TestRegularOnlyFilter(t *testing.T) {
@@ -82,7 +84,8 @@ func TestRegularOnlyFilter(t *testing.T) {
matches := []string{
"foo", "bar", "baz", "qux", "subdir/quux", "subdir/quuz",
"subdir/nested2/qux"}
- fileCountEquals(t, fc, len(matches), 892)
+
+ fileCountEquals(t, fc, len(matches), 800)
}
func TestSizeFilter(t *testing.T) {
@@ -94,31 +97,75 @@ func TestSizeFilter(t *testing.T) {
fc.Size = internal.Size{Size: 100}
matches = []string{"qux", "subdir/nested2//qux"}
- fileCountEquals(t, fc, len(matches), 892)
+
+ fileCountEquals(t, fc, len(matches), 800)
}
func TestMTimeFilter(t *testing.T) {
- oldFile := filepath.Join(getTestdataDir(), "baz")
- mtime := time.Date(1979, time.December, 14, 18, 25, 5, 0, time.UTC)
- if err := os.Chtimes(oldFile, mtime, mtime); err != nil {
- t.Skip("skipping mtime filter test.")
- }
+ mtime := time.Date(2011, time.December, 14, 18, 25, 5, 0, time.UTC)
fileAge := time.Since(mtime) - (60 * time.Second)
fc := getNoFilterFileCount()
fc.MTime = internal.Duration{Duration: -fileAge}
matches := []string{"foo", "bar", "qux",
"subdir/", "subdir/quux", "subdir/quuz",
- "sbudir/nested2", "subdir/nested2/qux"}
- fileCountEquals(t, fc, len(matches), 9084)
+ "subdir/nested2", "subdir/nested2/qux"}
+
+ fileCountEquals(t, fc, len(matches), 5096)
fc.MTime = internal.Duration{Duration: fileAge}
matches = []string{"baz"}
fileCountEquals(t, fc, len(matches), 0)
}
+// The library dependency karrick/godirwalk completely abstracts out the
+// behavior of the FollowSymlinks plugin input option. However, it should at
+// least behave identically when enabled on a filesystem with no symlinks.
+func TestFollowSymlinks(t *testing.T) {
+ fc := getNoFilterFileCount()
+ fc.FollowSymlinks = true
+ matches := []string{"foo", "bar", "baz", "qux",
+ "subdir/", "subdir/quux", "subdir/quuz",
+ "subdir/nested2", "subdir/nested2/qux"}
+
+ fileCountEquals(t, fc, len(matches), 5096)
+}
+
+// Paths with a trailing slash will not exactly match paths produced during the
+// walk as these paths are cleaned before being returned from godirwalk. #6329
+func TestDirectoryWithTrailingSlash(t *testing.T) {
+ plugin := &FileCount{
+ Directories: []string{getTestdataDir() + string(filepath.Separator)},
+ Name: "*",
+ Recursive: true,
+ Fs: getFakeFileSystem(getTestdataDir()),
+ }
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "filecount",
+ map[string]string{
+ "directory": getTestdataDir(),
+ },
+ map[string]interface{}{
+ "count": 9,
+ "size_bytes": 5096,
+ },
+ time.Unix(0, 0),
+ telegraf.Gauge,
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
+}
+
func getNoFilterFileCount() FileCount {
return FileCount{
+ Log: testutil.Logger{},
Directories: []string{getTestdataDir()},
Name: "*",
Recursive: true,
@@ -126,12 +173,60 @@ func getNoFilterFileCount() FileCount {
Size: internal.Size{Size: 0},
MTime: internal.Duration{Duration: 0},
fileFilters: nil,
+ Fs: getFakeFileSystem(getTestdataDir()),
}
}
func getTestdataDir() string {
- _, filename, _, _ := runtime.Caller(1)
- return strings.Replace(filename, "filecount_test.go", "testdata", 1)
+ dir, err := os.Getwd()
+ if err != nil {
+ // if we cannot even establish the test directory, further progress is meaningless
+ panic(err)
+ }
+
+ var chunks []string
+ var testDirectory string
+
+ if runtime.GOOS == "windows" {
+ chunks = strings.Split(dir, "\\")
+ testDirectory = strings.Join(chunks[:], "\\") + "\\testdata"
+ } else {
+ chunks = strings.Split(dir, "/")
+ testDirectory = strings.Join(chunks[:], "/") + "/testdata"
+ }
+ return testDirectory
+}
+
+func getFakeFileSystem(basePath string) fakeFileSystem {
+ // create our desired "filesystem" object, complete with an internal map allowing our funcs to return meta data as requested
+
+ mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC)
+ olderMtime := time.Date(2010, time.December, 14, 18, 25, 5, 0, time.UTC)
+
+ // set file permissions
+ var fmask uint32 = 0666
+ var dmask uint32 = 0666
+
+ // set directory bit
+ dmask |= (1 << uint(32-1))
+
+ // create a lookup map for getting "files" from the "filesystem"
+ fileList := map[string]fakeFileInfo{
+ basePath: {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true},
+ basePath + "/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime},
+ basePath + "/bar": {name: "bar", filemode: uint32(fmask), modtime: mtime},
+ basePath + "/baz": {name: "baz", filemode: uint32(fmask), modtime: olderMtime},
+ basePath + "/qux": {name: "qux", size: int64(400), filemode: uint32(fmask), modtime: mtime},
+ basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true},
+ basePath + "/subdir/quux": {name: "quux", filemode: uint32(fmask), modtime: mtime},
+ basePath + "/subdir/quuz": {name: "quuz", filemode: uint32(fmask), modtime: mtime},
+ basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: uint32(dmask), modtime: mtime, isdir: true},
+ basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)},
+ }
+
+ fs := fakeFileSystem{files: fileList}
+ return fs
+
}
func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) {
diff --git a/plugins/inputs/filecount/filesystem_helpers.go b/plugins/inputs/filecount/filesystem_helpers.go
new file mode 100644
index 0000000000000..2bd6c095142cf
--- /dev/null
+++ b/plugins/inputs/filecount/filesystem_helpers.go
@@ -0,0 +1,73 @@
+package filecount
+
+import (
+ "errors"
+ "io"
+ "os"
+ "time"
+)
+
+/*
+ The code below is lifted from numerous articles and originates from Andrew Gerrand's 10 things you (probably) don't know about Go.
+ it allows for mocking a filesystem; this allows for consistent testing of this code across platforms (directory sizes reported
+ differently by different platforms, for example), while preserving the rest of the functionality as-is, without modification.
+*/
+
+type fileSystem interface {
+ Open(name string) (file, error)
+ Stat(name string) (os.FileInfo, error)
+}
+
+type file interface {
+ io.Closer
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+ Stat() (os.FileInfo, error)
+}
+
+// osFS implements fileSystem using the local disk
+type osFS struct{}
+
+func (osFS) Open(name string) (file, error) { return os.Open(name) }
+func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) }
+
+/*
+ The following are for mocking the filesystem - this allows us to mock Stat() files. This means that we can set file attributes, and know that they
+ will be the same regardless of the platform sitting underneath our tests (directory sizes vary see https://github.com/influxdata/telegraf/issues/6011)
+
+ NOTE: still need the on-disk file structure to mirror this because the 3rd party library ("github.com/karrick/godirwalk") uses its own
+ walk functions, that we cannot mock from here.
+*/
+
+type fakeFileSystem struct {
+ files map[string]fakeFileInfo
+}
+
+type fakeFileInfo struct {
+ name string
+ size int64
+ filemode uint32
+ modtime time.Time
+ isdir bool
+ sys interface{}
+}
+
+func (f fakeFileInfo) Name() string { return f.name }
+func (f fakeFileInfo) Size() int64 { return f.size }
+func (f fakeFileInfo) Mode() os.FileMode { return os.FileMode(f.filemode) }
+func (f fakeFileInfo) ModTime() time.Time { return f.modtime }
+func (f fakeFileInfo) IsDir() bool { return f.isdir }
+func (f fakeFileInfo) Sys() interface{} { return f.sys }
+
+func (f fakeFileSystem) Open(name string) (file, error) {
+ return nil, &os.PathError{Op: "Open", Path: name, Err: errors.New("Not implemented by fake filesystem")}
+}
+
+func (f fakeFileSystem) Stat(name string) (os.FileInfo, error) {
+ if fakeInfo, found := f.files[name]; found {
+ return fakeInfo, nil
+ }
+ return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("No such file or directory")}
+
+}
diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go
new file mode 100644
index 0000000000000..08bb15a2e59cf
--- /dev/null
+++ b/plugins/inputs/filecount/filesystem_helpers_test.go
@@ -0,0 +1,90 @@
+package filecount
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestMTime(t *testing.T) {
+ //this is the time our foo file should have
+ mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC)
+
+ fs := getTestFileSystem()
+ fileInfo, err := fs.Stat("/testdata/foo")
+ require.NoError(t, err)
+ require.Equal(t, mtime, fileInfo.ModTime())
+}
+
+func TestSize(t *testing.T) {
+ //this is the time our foo file should have
+ size := int64(4096)
+ fs := getTestFileSystem()
+ fileInfo, err := fs.Stat("/testdata")
+ require.NoError(t, err)
+ require.Equal(t, size, fileInfo.Size())
+}
+
+func TestIsDir(t *testing.T) {
+ //this is the time our foo file should have
+ dir := true
+ fs := getTestFileSystem()
+ fileInfo, err := fs.Stat("/testdata")
+ require.NoError(t, err)
+ require.Equal(t, dir, fileInfo.IsDir())
+}
+
+func TestRealFS(t *testing.T) {
+ //test that the default (non-test) empty FS causes expected behaviour
+ var fs fileSystem = osFS{}
+ //the following file exists on disk - and not in our fake fs
+ fileInfo, err := fs.Stat(getTestdataDir() + "/qux")
+ require.NoError(t, err)
+ require.Equal(t, false, fileInfo.IsDir())
+ require.Equal(t, int64(446), fileInfo.Size())
+
+ // now swap out real, for fake filesystem
+ fs = getTestFileSystem()
+ // now, the same test as above will return an error as the file doesn't exist in our fake fs
+ expectedError := "Stat " + getTestdataDir() + "/qux: No such file or directory"
+ fileInfo, err = fs.Stat(getTestdataDir() + "/qux")
+ require.Equal(t, expectedError, err.Error())
+ // and verify that what we DO expect to find, we do
+ fileInfo, err = fs.Stat("/testdata/foo")
+ require.NoError(t, err)
+}
+
+func getTestFileSystem() fakeFileSystem {
+ /*
+ create our desired "filesystem" object, complete with an internal map allowing our funcs to return meta data as requested
+
+ type FileInfo interface {
+ Name() string // base name of the file
+ Size() int64 // length in bytes of file
+ Mode() FileMode // file mode bits
+ ModTime() time.Time // modification time
+ IsDir() bool // returns bool indicating if a Dir or not
+ Sys() interface{} // underlying data source. always nil (in this case)
+ }
+
+ */
+
+ mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC)
+
+ // set file permissions
+ var fmask uint32 = 0666
+ var dmask uint32 = 0666
+
+ // set directory bit
+ dmask |= (1 << uint(32-1))
+
+ fileList := map[string]fakeFileInfo{
+ "/testdata": {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true},
+ "/testdata/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime},
+ }
+
+ fs := fakeFileSystem{files: fileList}
+ return fs
+
+}
diff --git a/plugins/inputs/filestat/README.md b/plugins/inputs/filestat/README.md
index 3102c13b077ea..840cafb53c06a 100644
--- a/plugins/inputs/filestat/README.md
+++ b/plugins/inputs/filestat/README.md
@@ -11,6 +11,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
## These accept standard unix glob matching rules, but with the addition of
## ** as a "super asterisk". See https://github.com/gobwas/glob.
files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"]
+
## If true, read the entire file and calculate an md5 checksum.
md5 = false
```
diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go
index 692e58c53e946..bf8ea6c160361 100644
--- a/plugins/inputs/filestat/filestat.go
+++ b/plugins/inputs/filestat/filestat.go
@@ -4,7 +4,6 @@ import (
"crypto/md5"
"fmt"
"io"
- "log"
"os"
"github.com/influxdata/telegraf"
@@ -23,6 +22,7 @@ const sampleConfig = `
## See https://github.com/gobwas/glob for more examples
##
files = ["/var/log/**.log"]
+
## If true, read the entire file and calculate an md5 checksum.
md5 = false
`
@@ -31,6 +31,8 @@ type FileStat struct {
Md5 bool
Files []string
+ Log telegraf.Logger
+
// maps full file paths to globmatch obj
globs map[string]*globpath.GlobPath
}
@@ -41,11 +43,11 @@ func NewFileStat() *FileStat {
}
}
-func (_ *FileStat) Description() string {
+func (*FileStat) Description() string {
return "Read stats about given file(s)"
}
-func (_ *FileStat) SampleConfig() string { return sampleConfig }
+func (*FileStat) SampleConfig() string { return sampleConfig }
func (f *FileStat) Gather(acc telegraf.Accumulator) error {
var err error
@@ -86,7 +88,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
}
if fileInfo == nil {
- log.Printf("E! Unable to get info for file [%s], possible permissions issue",
+ f.Log.Errorf("Unable to get info for file %q, possible permissions issue",
fileName)
} else {
fields["size_bytes"] = fileInfo.Size()
diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go
index 7fdf6cde841dc..a38d3b0aacdc4 100644
--- a/plugins/inputs/filestat/filestat_test.go
+++ b/plugins/inputs/filestat/filestat_test.go
@@ -14,6 +14,7 @@ import (
func TestGatherNoMd5(t *testing.T) {
dir := getTestdataDir()
fs := NewFileStat()
+ fs.Log = testutil.Logger{}
fs.Files = []string{
dir + "log1.log",
dir + "log2.log",
@@ -44,6 +45,7 @@ func TestGatherNoMd5(t *testing.T) {
func TestGatherExplicitFiles(t *testing.T) {
dir := getTestdataDir()
fs := NewFileStat()
+ fs.Log = testutil.Logger{}
fs.Md5 = true
fs.Files = []string{
dir + "log1.log",
@@ -77,6 +79,7 @@ func TestGatherExplicitFiles(t *testing.T) {
func TestGatherGlob(t *testing.T) {
dir := getTestdataDir()
fs := NewFileStat()
+ fs.Log = testutil.Logger{}
fs.Md5 = true
fs.Files = []string{
dir + "*.log",
@@ -103,6 +106,7 @@ func TestGatherGlob(t *testing.T) {
func TestGatherSuperAsterisk(t *testing.T) {
dir := getTestdataDir()
fs := NewFileStat()
+ fs.Log = testutil.Logger{}
fs.Md5 = true
fs.Files = []string{
dir + "**",
@@ -136,6 +140,7 @@ func TestGatherSuperAsterisk(t *testing.T) {
func TestModificationTime(t *testing.T) {
dir := getTestdataDir()
fs := NewFileStat()
+ fs.Log = testutil.Logger{}
fs.Files = []string{
dir + "log1.log",
}
@@ -153,6 +158,7 @@ func TestModificationTime(t *testing.T) {
func TestNoModificationTime(t *testing.T) {
fs := NewFileStat()
+ fs.Log = testutil.Logger{}
fs.Files = []string{
"/non/existant/file",
}
diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md
new file mode 100644
index 0000000000000..7e1f351fa0b7f
--- /dev/null
+++ b/plugins/inputs/fireboard/README.md
@@ -0,0 +1,58 @@
+# Fireboard Input Plugin
+
+The fireboard plugin gathers the real time temperature data from fireboard
+thermometers. In order to use this input plugin, you'll need to sign up to use
+the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html).
+
+### Configuration
+
+```toml
+[[inputs.fireboard]]
+ ## Specify auth token for your account
+ auth_token = "invalidAuthToken"
+ ## You can override the fireboard server URL if necessary
+ # url = https://fireboard.io/api/v1/devices.json
+ ## You can set a different http_timeout if you need to
+ # http_timeout = 4
+```
+
+#### auth_token
+
+In lieu of requiring a username and password, this plugin requires an
+authentication token that you can generate using the [Fireboard REST
+API](https://docs.fireboard.io/reference/restapi.html#Authentication).
+
+#### url
+
+While there should be no reason to override the URL, the option is available
+in case Fireboard changes their site, etc.
+
+#### http_timeout
+
+If you need to increase the HTTP timeout, you can do so here. You can set this
+value in seconds. The default value is four (4) seconds.
+
+### Metrics
+
+The Fireboard REST API docs have good examples of the data that is available,
+currently this input only returns the real time temperatures. Temperature
+values are included if they are less than a minute old.
+
+- fireboard
+ - tags:
+ - channel
+ - scale (Celcius; Farenheit)
+ - title (name of the Fireboard)
+ - uuid (UUID of the Fireboard)
+ - fields:
+ - temperature (float, unit)
+
+### Example Output
+
+This section shows example output in Line Protocol format. You can often use
+`telegraf --input-filter --test` or use the `file` output to get
+this information.
+
+```
+fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000
+```
diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go
new file mode 100644
index 0000000000000..a92930aae9598
--- /dev/null
+++ b/plugins/inputs/fireboard/fireboard.go
@@ -0,0 +1,157 @@
+package fireboard
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// Fireboard gathers statistics from the fireboard.io servers
+type Fireboard struct {
+ AuthToken string `toml:"auth_token"`
+ URL string `toml:"url"`
+ HTTPTimeout internal.Duration `toml:"http_timeout"`
+
+ client *http.Client
+}
+
+// NewFireboard return a new instance of Fireboard with a default http client
+func NewFireboard() *Fireboard {
+ tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
+ client := &http.Client{
+ Transport: tr,
+ Timeout: time.Duration(4 * time.Second),
+ }
+ return &Fireboard{client: client}
+}
+
+// RTT fireboardStats represents the data that is received from Fireboard
+type RTT struct {
+ Temp float64 `json:"temp"`
+ Channel int64 `json:"channel"`
+ Degreetype int `json:"degreetype"`
+ Created string `json:"created"`
+}
+
+type fireboardStats struct {
+ Title string `json:"title"`
+ UUID string `json:"uuid"`
+ Latesttemps []RTT `json:"latest_temps"`
+}
+
+// A sample configuration to only gather stats from localhost, default port.
+const sampleConfig = `
+ ## Specify auth token for your account
+ auth_token = "invalidAuthToken"
+ ## You can override the fireboard server URL if necessary
+ # url = https://fireboard.io/api/v1/devices.json
+ ## You can set a different http_timeout if you need to
+ ## You should set a string using an number and time indicator
+ ## for example "12s" for 12 seconds.
+ # http_timeout = "4s"
+`
+
+// SampleConfig Returns a sample configuration for the plugin
+func (r *Fireboard) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description Returns a description of the plugin
+func (r *Fireboard) Description() string {
+ return "Read real time temps from fireboard.io servers"
+}
+
+// Init the things
+func (r *Fireboard) Init() error {
+
+ if len(r.AuthToken) == 0 {
+ return fmt.Errorf("You must specify an authToken")
+ }
+ if len(r.URL) == 0 {
+ r.URL = "https://fireboard.io/api/v1/devices.json"
+ }
+ // Have a default timeout of 4s
+ if r.HTTPTimeout.Duration == 0 {
+ r.HTTPTimeout.Duration = time.Second * 4
+ }
+
+ r.client.Timeout = r.HTTPTimeout.Duration
+
+ return nil
+}
+
+// Gather Reads stats from all configured servers.
+func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
+
+ // Perform the GET request to the fireboard servers
+ req, err := http.NewRequest("GET", r.URL, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Token "+r.AuthToken)
+ resp, err := r.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ // Successful responses will always return status code 200
+ if resp.StatusCode != http.StatusOK {
+ if resp.StatusCode == http.StatusForbidden {
+ return fmt.Errorf("fireboard server responded with %d [Forbidden], verify your authToken", resp.StatusCode)
+ }
+ return fmt.Errorf("fireboard responded with unexpected status code %d", resp.StatusCode)
+ }
+ // Decode the response JSON into a new stats struct
+ var stats []fireboardStats
+ if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil {
+ return fmt.Errorf("unable to decode fireboard response: %s", err)
+ }
+ // Range over all devices, gathering stats. Returns early in case of any error.
+ for _, s := range stats {
+ r.gatherTemps(s, acc)
+ }
+ return nil
+}
+
+// Return text description of degree type (scale)
+func scale(n int) string {
+ switch n {
+ case 1:
+ return "Celcius"
+ case 2:
+ return "Fahrenheit"
+ default:
+ return ""
+ }
+}
+
+// Gathers stats from a single device, adding them to the accumulator
+func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) {
+ // Construct lookup for scale values
+
+ for _, t := range s.Latesttemps {
+ tags := map[string]string{
+ "title": s.Title,
+ "uuid": s.UUID,
+ "channel": strconv.FormatInt(t.Channel, 10),
+ "scale": scale(t.Degreetype),
+ }
+ fields := map[string]interface{}{
+ "temperature": t.Temp,
+ }
+ acc.AddFields("fireboard", fields, tags)
+ }
+}
+
+func init() {
+ inputs.Add("fireboard", func() telegraf.Input {
+ return NewFireboard()
+ })
+}
diff --git a/plugins/inputs/fireboard/fireboard_test.go b/plugins/inputs/fireboard/fireboard_test.go
new file mode 100644
index 0000000000000..a5e93a4533e59
--- /dev/null
+++ b/plugins/inputs/fireboard/fireboard_test.go
@@ -0,0 +1,74 @@
+package fireboard
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strconv"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFireboard(t *testing.T) {
+ // Create a test server with the const response JSON
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w, response)
+ }))
+ defer ts.Close()
+
+ // Parse the URL of the test server, used to verify the expected host
+ u, err := url.Parse(ts.URL)
+ require.NoError(t, err)
+
+ // Create a new fb instance with our given test server
+ fireboard := NewFireboard()
+ fireboard.AuthToken = "b4bb6e6a7b6231acb9f71b304edb2274693d8849"
+ fireboard.URL = u.String()
+
+ // Create a test accumulator
+ acc := &testutil.Accumulator{}
+
+ // Gather data from the test server
+ err = fireboard.Gather(acc)
+ require.NoError(t, err)
+
+ // Expect the correct values for all known keys
+ expectFields := map[string]interface{}{
+ "temperature": float64(79.9),
+ }
+ // Expect the correct values for all tags
+ expectTags := map[string]string{
+ "title": "telegraf-FireBoard",
+ "uuid": "b55e766c-b308-49b5-93a4-df89fe31efd0",
+ "channel": strconv.FormatInt(1, 10),
+ "scale": "Fahrenheit",
+ }
+
+ acc.AssertContainsTaggedFields(t, "fireboard", expectFields, expectTags)
+}
+
+var response = `
+[{
+ "id": 99999,
+ "title": "telegraf-FireBoard",
+ "created": "2019-03-23T16:48:32.152010Z",
+ "uuid": "b55e766c-b308-49b5-93a4-df89fe31efd0",
+ "hardware_id": "XXXXXXXXX",
+ "latest_temps": [
+ {
+ "temp": 79.9,
+ "channel": 1,
+ "degreetype": 2,
+ "created": "2019-06-25T06:07:10Z"
+ }
+ ],
+ "last_templog": "2019-06-25T06:06:40Z",
+ "model": "FBX11E",
+ "channel_count": 6,
+ "degreetype": 2
+ }]
+`
diff --git a/plugins/inputs/fluentd/README.md b/plugins/inputs/fluentd/README.md
index 6c5bada3cd770..3fabbddb75012 100644
--- a/plugins/inputs/fluentd/README.md
+++ b/plugins/inputs/fluentd/README.md
@@ -1,10 +1,10 @@
# Fluentd Input Plugin
-The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](http://docs.fluentd.org/v0.12/articles/monitoring).
+The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](https://docs.fluentd.org/input/monitor_agent).
This plugin understands data provided by /api/plugin.json resource (/api/config.json is not covered).
You might need to adjust your fluentd configuration, in order to reduce series cardinality in case your fluentd restarts frequently. Every time fluentd starts, `plugin_id` value is given a new random value.
-According to [fluentd documentation](http://docs.fluentd.org/v0.12/articles/config-file), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`.
+According to [fluentd documentation](https://docs.fluentd.org/configuration/config-file#common-plugin-parameter), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`.
example configuration with `@id` parameter for http plugin:
```
diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go
index c999607403ea7..7d4a0cd5eecb4 100644
--- a/plugins/inputs/fluentd/fluentd.go
+++ b/plugins/inputs/fluentd/fluentd.go
@@ -53,7 +53,7 @@ type pluginData struct {
// parse JSON from fluentd Endpoint
// Parameters:
-// data: unprocessed json recivied from endpoint
+// data: unprocessed json received from endpoint
//
// Returns:
// pluginData: slice that contains parsed plugins
@@ -76,7 +76,7 @@ func parse(data []byte) (datapointArray []pluginData, err error) {
// Description - display description
func (h *Fluentd) Description() string { return description }
-// SampleConfig - generate configuretion
+// SampleConfig - generate configuration
func (h *Fluentd) SampleConfig() string { return sampleConfig }
// Gather - Main code responsible for gathering, processing and creating metrics
diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md
index 524d1d0e70fd0..46127082e69c5 100644
--- a/plugins/inputs/github/README.md
+++ b/plugins/inputs/github/README.md
@@ -10,11 +10,17 @@ alternative method for collecting repository information.
```toml
[[inputs.github]]
## List of repositories to monitor
- repositories = ["influxdata/telegraf"]
+ repositories = [
+ "influxdata/telegraf",
+ "influxdata/influxdb"
+ ]
## Github API access token. Unauthenticated requests are limited to 60 per hour.
# access_token = ""
+ ## Github API enterprise url. Github Enterprise accounts must specify their base url.
+ # enterprise_base_url = ""
+
## Timeout for HTTP requests.
# http_timeout = "5s"
```
@@ -28,16 +34,19 @@ alternative method for collecting repository information.
- language - The primary language of the repository
- license - The license set for the repository
- fields:
- - stars (int)
- forks (int)
- open_issues (int)
+ - networks (int)
- size (int)
+ - subscribers (int)
+ - stars (int)
+ - watchers (int)
When the [internal][] input is enabled:
+ internal_github
- tags:
- - access_token - An obfusticated reference to the configured access token or "Unauthenticated"
+ - access_token - An obfuscated reference to the configured access token or "Unauthenticated"
- fields:
- limit - How many requests you are limited to (per hour)
- remaining - How many requests you have remaining (per hour)
@@ -46,7 +55,7 @@ When the [internal][] input is enabled:
### Example Output
```
-github,full_name=influxdata/telegraf,name=telegraf,owner=influxdata,language=Go,license=MIT\ License stars=6401i,forks=2421i,open_issues=722i,size=22611i 1552651811000000000
+github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000
internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000
```
diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go
index ff497e55bd0a7..3e5597707ffe4 100644
--- a/plugins/inputs/github/github.go
+++ b/plugins/inputs/github/github.go
@@ -18,12 +18,13 @@ import (
// GitHub - plugin main structure
type GitHub struct {
- Repositories []string `toml:"repositories"`
- AccessToken string `toml:"access_token"`
- HTTPTimeout internal.Duration `toml:"http_timeout"`
- githubClient *github.Client
+ Repositories []string `toml:"repositories"`
+ AccessToken string `toml:"access_token"`
+ EnterpriseBaseURL string `toml:"enterprise_base_url"`
+ HTTPTimeout internal.Duration `toml:"http_timeout"`
+ githubClient *github.Client
- obfusticatedToken string
+ obfuscatedToken string
RateLimit selfstat.Stat
RateLimitErrors selfstat.Stat
@@ -32,11 +33,17 @@ type GitHub struct {
const sampleConfig = `
## List of repositories to monitor.
- repositories = ["influxdata/telegraf"]
+ repositories = [
+ "influxdata/telegraf",
+ "influxdata/influxdb"
+ ]
## Github API access token. Unauthenticated requests are limited to 60 per hour.
# access_token = ""
+ ## Github API enterprise url. Github Enterprise accounts must specify their base url.
+ # enterprise_base_url = ""
+
## Timeout for HTTP requests.
# http_timeout = "5s"
`
@@ -60,7 +67,7 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error)
Timeout: g.HTTPTimeout.Duration,
}
- g.obfusticatedToken = "Unauthenticated"
+ g.obfuscatedToken = "Unauthenticated"
if g.AccessToken != "" {
tokenSource := oauth2.StaticTokenSource(
@@ -69,11 +76,18 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error)
oauthClient := oauth2.NewClient(ctx, tokenSource)
ctx = context.WithValue(ctx, oauth2.HTTPClient, oauthClient)
- g.obfusticatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:]
+ g.obfuscatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:]
- return github.NewClient(oauthClient), nil
+ return g.newGithubClient(oauthClient)
}
+ return g.newGithubClient(httpClient)
+}
+
+func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) {
+ if g.EnterpriseBaseURL != "" {
+ return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient)
+ }
return github.NewClient(httpClient), nil
}
@@ -91,7 +105,7 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
g.githubClient = githubClient
tokenTags := map[string]string{
- "access_token": g.obfusticatedToken,
+ "access_token": g.obfuscatedToken,
}
g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags)
@@ -148,9 +162,9 @@ func splitRepositoryName(repositoryName string) (string, string, error) {
return splits[0], splits[1], nil
}
-func getLicense(repositoryInfo *github.Repository) string {
- if repositoryInfo.GetLicense() != nil {
- return *repositoryInfo.License.Name
+func getLicense(rI *github.Repository) string {
+ if licenseName := rI.GetLicense().GetName(); licenseName != "" {
+ return licenseName
}
return "None"
@@ -158,19 +172,22 @@ func getLicense(repositoryInfo *github.Repository) string {
func getTags(repositoryInfo *github.Repository) map[string]string {
return map[string]string{
- "owner": *repositoryInfo.Owner.Login,
- "name": *repositoryInfo.Name,
- "language": *repositoryInfo.Language,
+ "owner": repositoryInfo.GetOwner().GetLogin(),
+ "name": repositoryInfo.GetName(),
+ "language": repositoryInfo.GetLanguage(),
"license": getLicense(repositoryInfo),
}
}
func getFields(repositoryInfo *github.Repository) map[string]interface{} {
return map[string]interface{}{
- "stars": *repositoryInfo.StargazersCount,
- "forks": *repositoryInfo.ForksCount,
- "open_issues": *repositoryInfo.OpenIssuesCount,
- "size": *repositoryInfo.Size,
+ "stars": repositoryInfo.GetStargazersCount(),
+ "subscribers": repositoryInfo.GetSubscribersCount(),
+ "watchers": repositoryInfo.GetWatchersCount(),
+ "networks": repositoryInfo.GetNetworkCount(),
+ "forks": repositoryInfo.GetForksCount(),
+ "open_issues": repositoryInfo.GetOpenIssuesCount(),
+ "size": repositoryInfo.GetSize(),
}
}
diff --git a/plugins/inputs/github/github_test.go b/plugins/inputs/github/github_test.go
index 0ebae3a671667..23fda6675cc44 100644
--- a/plugins/inputs/github/github_test.go
+++ b/plugins/inputs/github/github_test.go
@@ -1,6 +1,7 @@
package github
import (
+ "net/http"
"reflect"
"testing"
@@ -8,6 +9,18 @@ import (
"github.com/stretchr/testify/require"
)
+func TestNewGithubClient(t *testing.T) {
+ httpClient := &http.Client{}
+ g := &GitHub{}
+ client, err := g.newGithubClient(httpClient)
+ require.NoError(t, err)
+ require.Contains(t, client.BaseURL.String(), "api.github.com")
+ g.EnterpriseBaseURL = "api.example.com/"
+ enterpriseClient, err := g.newGithubClient(httpClient)
+ require.NoError(t, err)
+ require.Contains(t, enterpriseClient.BaseURL.String(), "api.example.com")
+}
+
func TestSplitRepositoryNameWithWorkingExample(t *testing.T) {
var validRepositoryNames = []struct {
fullName string
@@ -38,7 +51,7 @@ func TestSplitRepositoryNameWithNoSlash(t *testing.T) {
t.Run(tt, func(t *testing.T) {
_, _, err := splitRepositoryName(tt)
- require.NotNil(t, err)
+ require.Error(t, err)
})
}
}
@@ -98,12 +111,17 @@ func TestGetFields(t *testing.T) {
forks := 2
openIssues := 3
size := 4
+ subscribers := 5
+ watchers := 6
repository := gh.Repository{
- StargazersCount: &stars,
- ForksCount: &forks,
- OpenIssuesCount: &openIssues,
- Size: &size,
+ StargazersCount: &stars,
+ ForksCount: &forks,
+ OpenIssuesCount: &openIssues,
+ Size: &size,
+ NetworkCount: &forks,
+ SubscribersCount: &subscribers,
+ WatchersCount: &watchers,
}
getFieldsReturn := getFields(&repository)
@@ -112,8 +130,11 @@ func TestGetFields(t *testing.T) {
correctFieldReturn["stars"] = 1
correctFieldReturn["forks"] = 2
+ correctFieldReturn["networks"] = 2
correctFieldReturn["open_issues"] = 3
correctFieldReturn["size"] = 4
+ correctFieldReturn["subscribers"] = 5
+ correctFieldReturn["watchers"] = 6
require.Equal(t, true, reflect.DeepEqual(getFieldsReturn, correctFieldReturn))
}
diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md
new file mode 100644
index 0000000000000..7387e566dd21b
--- /dev/null
+++ b/plugins/inputs/gnmi/README.md
@@ -0,0 +1,73 @@
+# gNMI (gRPC Network Management Interface) Input Plugin
+
+This plugin consumes telemetry data based on the [gNMI](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md) Subscribe method. TLS is supported for authentication and encryption. This input plugin is vendor-agnostic and is supported on any platform that supports the gNMI spec.
+
+For Cisco devices:
+It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later.
+
+
+### Configuration
+
+```toml
+[[inputs.gnmi]]
+ ## Address and port of the gNMI GRPC server
+ addresses = ["10.49.234.114:57777"]
+
+ ## define credentials
+ username = "cisco"
+ password = "cisco"
+
+ ## gNMI encoding requested (one of: "proto", "json", "json_ietf")
+ # encoding = "proto"
+
+ ## redial in case of failures after
+ redial = "10s"
+
+ ## enable client-side TLS and define CA to authenticate the device
+ # enable_tls = true
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # insecure_skip_verify = true
+
+ ## define client-side TLS certificate & key to authenticate to the device
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## gNMI subscription prefix (optional, can usually be left empty)
+ ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+ # origin = ""
+ # prefix = ""
+ # target = ""
+
+ ## Define additional aliases to map telemetry encoding paths to simple measurement names
+ # [inputs.gnmi.aliases]
+ # ifcounters = "openconfig:/interfaces/interface/state/counters"
+
+ [[inputs.gnmi.subscription]]
+ ## Name of the measurement that will be emitted
+ name = "ifcounters"
+
+ ## Origin and path of the subscription
+ ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+ ##
+ ## origin usually refers to a (YANG) data model implemented by the device
+ ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath)
+ ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr
+ origin = "openconfig-interfaces"
+ path = "/interfaces/interface/state/counters"
+
+ # Subscription mode (one of: "target_defined", "sample", "on_change") and interval
+ subscription_mode = "sample"
+ sample_interval = "10s"
+
+ ## Suppress redundant transmissions when measured values are unchanged
+ # suppress_redundant = false
+
+ ## If suppression is enabled, send updates at least every X seconds anyway
+ # heartbeat_interval = "60s"
+```
+
+### Example Output
+```
+ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115 in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000
+ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115 out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000
+```
diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go
new file mode 100644
index 0000000000000..3c5826ba40033
--- /dev/null
+++ b/plugins/inputs/gnmi/gnmi.go
@@ -0,0 +1,564 @@
+package gnmi
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/metric"
+ internaltls "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
+ "github.com/openconfig/gnmi/proto/gnmi"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/metadata"
+)
+
+// gNMI plugin instance
+type GNMI struct {
+ Addresses []string `toml:"addresses"`
+ Subscriptions []Subscription `toml:"subscription"`
+ Aliases map[string]string `toml:"aliases"`
+
+ // Optional subscription configuration
+ Encoding string
+ Origin string
+ Prefix string
+ Target string
+ UpdatesOnly bool `toml:"updates_only"`
+
+ // gNMI target credentials
+ Username string
+ Password string
+
+ // Redial
+ Redial internal.Duration
+
+ // GRPC TLS settings
+ EnableTLS bool `toml:"enable_tls"`
+ internaltls.ClientConfig
+
+ // Internal state
+ aliases map[string]string
+ acc telegraf.Accumulator
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ Log telegraf.Logger
+}
+
+// Subscription for a gNMI client
+type Subscription struct {
+ Name string
+ Origin string
+ Path string
+
+ // Subscription mode and interval
+ SubscriptionMode string `toml:"subscription_mode"`
+ SampleInterval internal.Duration `toml:"sample_interval"`
+
+ // Duplicate suppression
+ SuppressRedundant bool `toml:"suppress_redundant"`
+ HeartbeatInterval internal.Duration `toml:"heartbeat_interval"`
+}
+
+// Start the http listener service
+func (c *GNMI) Start(acc telegraf.Accumulator) error {
+ var err error
+ var ctx context.Context
+ var tlscfg *tls.Config
+ var request *gnmi.SubscribeRequest
+ c.acc = acc
+ ctx, c.cancel = context.WithCancel(context.Background())
+
+ // Validate configuration
+ if request, err = c.newSubscribeRequest(); err != nil {
+ return err
+ } else if c.Redial.Duration.Nanoseconds() <= 0 {
+ return fmt.Errorf("redial duration must be positive")
+ }
+
+ // Parse TLS config
+ if c.EnableTLS {
+ if tlscfg, err = c.ClientConfig.TLSConfig(); err != nil {
+ return err
+ }
+ }
+
+ if len(c.Username) > 0 {
+ ctx = metadata.AppendToOutgoingContext(ctx, "username", c.Username, "password", c.Password)
+ }
+
+ // Invert explicit alias list and prefill subscription names
+ c.aliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases))
+ for _, subscription := range c.Subscriptions {
+ var gnmiLongPath, gnmiShortPath *gnmi.Path
+
+ // Build the subscription path without keys
+ if gnmiLongPath, err = parsePath(subscription.Origin, subscription.Path, ""); err != nil {
+ return err
+ }
+ if gnmiShortPath, err = parsePath("", subscription.Path, ""); err != nil {
+ return err
+ }
+
+ longPath, _ := c.handlePath(gnmiLongPath, nil, "")
+ shortPath, _ := c.handlePath(gnmiShortPath, nil, "")
+ name := subscription.Name
+
+ // If the user didn't provide a measurement name, use last path element
+ if len(name) == 0 {
+ name = path.Base(shortPath)
+ }
+ if len(name) > 0 {
+ c.aliases[longPath] = name
+ c.aliases[shortPath] = name
+ }
+ }
+ for alias, path := range c.Aliases {
+ c.aliases[path] = alias
+ }
+
+ // Create a goroutine for each device, dial and subscribe
+ c.wg.Add(len(c.Addresses))
+ for _, addr := range c.Addresses {
+ go func(address string) {
+ defer c.wg.Done()
+ for ctx.Err() == nil {
+ if err := c.subscribeGNMI(ctx, address, tlscfg, request); err != nil && ctx.Err() == nil {
+ acc.AddError(err)
+ }
+
+ select {
+ case <-ctx.Done():
+ case <-time.After(c.Redial.Duration):
+ }
+ }
+ }(addr)
+ }
+ return nil
+}
+
+// Create a new gNMI SubscribeRequest
+func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) {
+ // Create subscription objects
+ subscriptions := make([]*gnmi.Subscription, len(c.Subscriptions))
+ for i, subscription := range c.Subscriptions {
+ gnmiPath, err := parsePath(subscription.Origin, subscription.Path, "")
+ if err != nil {
+ return nil, err
+ }
+ mode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)]
+ if !ok {
+ return nil, fmt.Errorf("invalid subscription mode %s", subscription.SubscriptionMode)
+ }
+ subscriptions[i] = &gnmi.Subscription{
+ Path: gnmiPath,
+ Mode: gnmi.SubscriptionMode(mode),
+ SampleInterval: uint64(subscription.SampleInterval.Duration.Nanoseconds()),
+ SuppressRedundant: subscription.SuppressRedundant,
+ HeartbeatInterval: uint64(subscription.HeartbeatInterval.Duration.Nanoseconds()),
+ }
+ }
+
+ // Construct subscribe request
+ gnmiPath, err := parsePath(c.Origin, c.Prefix, c.Target)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.Encoding != "proto" && c.Encoding != "json" && c.Encoding != "json_ietf" {
+ return nil, fmt.Errorf("unsupported encoding %s", c.Encoding)
+ }
+
+ return &gnmi.SubscribeRequest{
+ Request: &gnmi.SubscribeRequest_Subscribe{
+ Subscribe: &gnmi.SubscriptionList{
+ Prefix: gnmiPath,
+ Mode: gnmi.SubscriptionList_STREAM,
+ Encoding: gnmi.Encoding(gnmi.Encoding_value[strings.ToUpper(c.Encoding)]),
+ Subscription: subscriptions,
+ UpdatesOnly: c.UpdatesOnly,
+ },
+ },
+ }, nil
+}
+
+// SubscribeGNMI and extract telemetry data
+func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmi.SubscribeRequest) error {
+ var opt grpc.DialOption
+ if tlscfg != nil {
+ opt = grpc.WithTransportCredentials(credentials.NewTLS(tlscfg))
+ } else {
+ opt = grpc.WithInsecure()
+ }
+
+ client, err := grpc.DialContext(ctx, address, opt)
+ if err != nil {
+ return fmt.Errorf("failed to dial: %v", err)
+ }
+ defer client.Close()
+
+ subscribeClient, err := gnmi.NewGNMIClient(client).Subscribe(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to setup subscription: %v", err)
+ }
+
+ if err = subscribeClient.Send(request); err != nil {
+ // If io.EOF is returned, the stream may have ended and stream status
+ // can be determined by calling Recv.
+ if err != io.EOF {
+ return fmt.Errorf("failed to send subscription request: %v", err)
+ }
+ }
+
+ c.Log.Debugf("Connection to gNMI device %s established", address)
+ defer c.Log.Debugf("Connection to gNMI device %s closed", address)
+ for ctx.Err() == nil {
+ var reply *gnmi.SubscribeResponse
+ if reply, err = subscribeClient.Recv(); err != nil {
+ if err != io.EOF && ctx.Err() == nil {
+ return fmt.Errorf("aborted gNMI subscription: %v", err)
+ }
+ break
+ }
+
+ c.handleSubscribeResponse(address, reply)
+ }
+ return nil
+}
+
+// HandleSubscribeResponse message from gNMI and parse contained telemetry data
+func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResponse) {
+ // Check if response is a gNMI Update and if we have a prefix to derive the measurement name
+ response, ok := reply.Response.(*gnmi.SubscribeResponse_Update)
+ if !ok {
+ return
+ }
+
+ var prefix, prefixAliasPath string
+ grouper := metric.NewSeriesGrouper()
+ timestamp := time.Unix(0, response.Update.Timestamp)
+ prefixTags := make(map[string]string)
+
+ if response.Update.Prefix != nil {
+ prefix, prefixAliasPath = c.handlePath(response.Update.Prefix, prefixTags, "")
+ }
+ prefixTags["source"], _, _ = net.SplitHostPort(address)
+ prefixTags["path"] = prefix
+
+ // Parse individual Update message and create measurements
+ var name, lastAliasPath string
+ for _, update := range response.Update.Update {
+ // Prepare tags from prefix
+ tags := make(map[string]string, len(prefixTags))
+ for key, val := range prefixTags {
+ tags[key] = val
+ }
+ aliasPath, fields := c.handleTelemetryField(update, tags, prefix)
+
+ // Inherent valid alias from prefix parsing
+ if len(prefixAliasPath) > 0 && len(aliasPath) == 0 {
+ aliasPath = prefixAliasPath
+ }
+
+ // Lookup alias if alias-path has changed
+ if aliasPath != lastAliasPath {
+ name = prefix
+ if alias, ok := c.aliases[aliasPath]; ok {
+ name = alias
+ } else {
+ c.Log.Debugf("No measurement alias for gNMI path: %s", name)
+ }
+ }
+
+ // Group metrics
+ for k, v := range fields {
+ key := k
+ if len(aliasPath) < len(key) {
+ // This may not be an exact prefix, due to naming style
+ // conversion on the key.
+ key = key[len(aliasPath)+1:]
+ } else {
+ // Otherwise use the last path element as the field key.
+ key = path.Base(key)
+
+ // If there are no elements skip the item; this would be an
+ // invalid message.
+ key = strings.TrimLeft(key, "/.")
+ if key == "" {
+ c.Log.Errorf("invalid empty path: %q", k)
+ continue
+ }
+ }
+
+ grouper.Add(name, tags, timestamp, key, v)
+ }
+
+ lastAliasPath = aliasPath
+ }
+
+ // Add grouped measurements
+ for _, metric := range grouper.Metrics() {
+ c.acc.AddMetric(metric)
+ }
+}
+
+// HandleTelemetryField and add it to a measurement
+func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, prefix string) (string, map[string]interface{}) {
+ path, aliasPath := c.handlePath(update.Path, tags, prefix)
+
+ var value interface{}
+ var jsondata []byte
+
+ // Make sure a value is actually set
+ if update.Val == nil || update.Val.Value == nil {
+ c.Log.Infof("Discarded empty or legacy type value with path: %q", path)
+ return aliasPath, nil
+ }
+
+ switch val := update.Val.Value.(type) {
+ case *gnmi.TypedValue_AsciiVal:
+ value = val.AsciiVal
+ case *gnmi.TypedValue_BoolVal:
+ value = val.BoolVal
+ case *gnmi.TypedValue_BytesVal:
+ value = val.BytesVal
+ case *gnmi.TypedValue_DecimalVal:
+ value = float64(val.DecimalVal.Digits) / math.Pow(10, float64(val.DecimalVal.Precision))
+ case *gnmi.TypedValue_FloatVal:
+ value = val.FloatVal
+ case *gnmi.TypedValue_IntVal:
+ value = val.IntVal
+ case *gnmi.TypedValue_StringVal:
+ value = val.StringVal
+ case *gnmi.TypedValue_UintVal:
+ value = val.UintVal
+ case *gnmi.TypedValue_JsonIetfVal:
+ jsondata = val.JsonIetfVal
+ case *gnmi.TypedValue_JsonVal:
+ jsondata = val.JsonVal
+ }
+
+ name := strings.Replace(path, "-", "_", -1)
+ fields := make(map[string]interface{})
+ if value != nil {
+ fields[name] = value
+ } else if jsondata != nil {
+ if err := json.Unmarshal(jsondata, &value); err != nil {
+ c.acc.AddError(fmt.Errorf("failed to parse JSON value: %v", err))
+ } else {
+ flattener := jsonparser.JSONFlattener{Fields: fields}
+ flattener.FullFlattenJSON(name, value, true, true)
+ }
+ }
+ return aliasPath, fields
+}
+
+// Parse path to path-buffer and tag-field
+func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string) (string, string) {
+ var aliasPath string
+ builder := bytes.NewBufferString(prefix)
+
+ // Prefix with origin
+ if len(path.Origin) > 0 {
+ builder.WriteString(path.Origin)
+ builder.WriteRune(':')
+ }
+
+ // Parse generic keys from prefix
+ for _, elem := range path.Elem {
+ if len(elem.Name) > 0 {
+ builder.WriteRune('/')
+ builder.WriteString(elem.Name)
+ }
+ name := builder.String()
+
+ if _, exists := c.aliases[name]; exists {
+ aliasPath = name
+ }
+
+ if tags != nil {
+ for key, val := range elem.Key {
+ key = strings.Replace(key, "-", "_", -1)
+
+ // Use short-form of key if possible
+ if _, exists := tags[key]; exists {
+ tags[name+"/"+key] = val
+ } else {
+ tags[key] = val
+ }
+
+ }
+ }
+ }
+
+ return builder.String(), aliasPath
+}
+
+//ParsePath from XPath-like string to gNMI path structure
+func parsePath(origin string, path string, target string) (*gnmi.Path, error) {
+ var err error
+ gnmiPath := gnmi.Path{Origin: origin, Target: target}
+
+ if len(path) > 0 && path[0] != '/' {
+ return nil, fmt.Errorf("path does not start with a '/': %s", path)
+ }
+
+ elem := &gnmi.PathElem{}
+ start, name, value, end := 0, -1, -1, -1
+
+ path = path + "/"
+
+ for i := 0; i < len(path); i++ {
+ if path[i] == '[' {
+ if name >= 0 {
+ break
+ }
+ if end < 0 {
+ end = i
+ elem.Key = make(map[string]string)
+ }
+ name = i + 1
+ } else if path[i] == '=' {
+ if name <= 0 || value >= 0 {
+ break
+ }
+ value = i + 1
+ } else if path[i] == ']' {
+ if name <= 0 || value <= name {
+ break
+ }
+ elem.Key[path[name:value-1]] = strings.Trim(path[value:i], "'\"")
+ name, value = -1, -1
+ } else if path[i] == '/' {
+ if name < 0 {
+ if end < 0 {
+ end = i
+ }
+
+ if end > start {
+ elem.Name = path[start:end]
+ gnmiPath.Elem = append(gnmiPath.Elem, elem)
+ gnmiPath.Element = append(gnmiPath.Element, path[start:i])
+ }
+
+ start, name, value, end = i+1, -1, -1, -1
+ elem = &gnmi.PathElem{}
+ }
+ }
+ }
+
+ if name >= 0 || value >= 0 {
+ err = fmt.Errorf("Invalid gNMI path: %s", path)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &gnmiPath, nil
+}
+
+// Stop listener and cleanup
+func (c *GNMI) Stop() {
+ c.cancel()
+ c.wg.Wait()
+}
+
+const sampleConfig = `
+ ## Address and port of the gNMI GRPC server
+ addresses = ["10.49.234.114:57777"]
+
+ ## define credentials
+ username = "cisco"
+ password = "cisco"
+
+ ## gNMI encoding requested (one of: "proto", "json", "json_ietf")
+ # encoding = "proto"
+
+ ## redial in case of failures after
+ redial = "10s"
+
+ ## enable client-side TLS and define CA to authenticate the device
+ # enable_tls = true
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # insecure_skip_verify = true
+
+ ## define client-side TLS certificate & key to authenticate to the device
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## gNMI subscription prefix (optional, can usually be left empty)
+ ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+ # origin = ""
+ # prefix = ""
+ # target = ""
+
+ ## Define additional aliases to map telemetry encoding paths to simple measurement names
+ #[inputs.gnmi.aliases]
+ # ifcounters = "openconfig:/interfaces/interface/state/counters"
+
+ [[inputs.gnmi.subscription]]
+ ## Name of the measurement that will be emitted
+ name = "ifcounters"
+
+ ## Origin and path of the subscription
+ ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+ ##
+ ## origin usually refers to a (YANG) data model implemented by the device
+ ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath)
+ ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr
+ origin = "openconfig-interfaces"
+ path = "/interfaces/interface/state/counters"
+
+ # Subscription mode (one of: "target_defined", "sample", "on_change") and interval
+ subscription_mode = "sample"
+ sample_interval = "10s"
+
+ ## Suppress redundant transmissions when measured values are unchanged
+ # suppress_redundant = false
+
+ ## If suppression is enabled, send updates at least every X seconds anyway
+ # heartbeat_interval = "60s"
+`
+
+// SampleConfig of plugin
+func (c *GNMI) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description of plugin
+func (c *GNMI) Description() string {
+ return "gNMI telemetry input plugin"
+}
+
+// Gather plugin measurements (unused)
+func (c *GNMI) Gather(_ telegraf.Accumulator) error {
+ return nil
+}
+
+func New() telegraf.Input {
+ return &GNMI{
+ Encoding: "proto",
+ Redial: internal.Duration{Duration: 10 * time.Second},
+ }
+}
+
+func init() {
+ inputs.Add("gnmi", New)
+ // Backwards compatible alias:
+ inputs.Add("cisco_telemetry_gnmi", New)
+}
diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go
new file mode 100644
index 0000000000000..c74fbcd4a5164
--- /dev/null
+++ b/plugins/inputs/gnmi/gnmi_test.go
@@ -0,0 +1,473 @@
+package gnmi
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/openconfig/gnmi/proto/gnmi"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+)
+
+func TestParsePath(t *testing.T) {
+ path := "/foo/bar/bla[shoo=woo][shoop=/woop/]/z"
+ parsed, err := parsePath("theorigin", path, "thetarget")
+
+ assert.NoError(t, err)
+ assert.Equal(t, parsed.Origin, "theorigin")
+ assert.Equal(t, parsed.Target, "thetarget")
+ assert.Equal(t, parsed.Element, []string{"foo", "bar", "bla[shoo=woo][shoop=/woop/]", "z"})
+ assert.Equal(t, parsed.Elem, []*gnmi.PathElem{{Name: "foo"}, {Name: "bar"},
+ {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}})
+
+ parsed, err = parsePath("", "", "")
+ assert.NoError(t, err)
+ assert.Equal(t, *parsed, gnmi.Path{})
+
+ parsed, err = parsePath("", "/foo[[", "")
+ assert.Nil(t, parsed)
+ assert.Equal(t, errors.New("Invalid gNMI path: /foo[[/"), err)
+}
+
+type MockServer struct {
+ SubscribeF func(gnmi.GNMI_SubscribeServer) error
+ GRPCServer *grpc.Server
+}
+
+func (s *MockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) {
+ return nil, nil
+}
+
+func (s *MockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) {
+ return nil, nil
+}
+
+func (s *MockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) {
+ return nil, nil
+}
+
+func (s *MockServer) Subscribe(server gnmi.GNMI_SubscribeServer) error {
+ return s.SubscribeF(server)
+}
+
+func TestWaitError(t *testing.T) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ grpcServer := grpc.NewServer()
+ gnmiServer := &MockServer{
+ SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
+ return fmt.Errorf("testerror")
+ },
+ GRPCServer: grpcServer,
+ }
+ gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
+
+ plugin := &GNMI{
+ Log: testutil.Logger{},
+ Addresses: []string{listener.Addr().String()},
+ Encoding: "proto",
+ Redial: internal.Duration{Duration: 1 * time.Second},
+ }
+
+ var acc testutil.Accumulator
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := grpcServer.Serve(listener)
+ require.NoError(t, err)
+ }()
+
+ acc.WaitError(1)
+ plugin.Stop()
+ grpcServer.Stop()
+ wg.Wait()
+
+ require.Contains(t, acc.Errors,
+ errors.New("aborted gNMI subscription: rpc error: code = Unknown desc = testerror"))
+}
+
+func TestUsernamePassword(t *testing.T) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ grpcServer := grpc.NewServer()
+ gnmiServer := &MockServer{
+ SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
+ metadata, ok := metadata.FromIncomingContext(server.Context())
+ if !ok {
+ return errors.New("failed to get metadata")
+ }
+
+ username := metadata.Get("username")
+ if len(username) != 1 || username[0] != "theusername" {
+ return errors.New("wrong username")
+ }
+
+ password := metadata.Get("password")
+ if len(password) != 1 || password[0] != "thepassword" {
+ return errors.New("wrong password")
+ }
+
+ return errors.New("success")
+ },
+ GRPCServer: grpcServer,
+ }
+ gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
+
+ plugin := &GNMI{
+ Log: testutil.Logger{},
+ Addresses: []string{listener.Addr().String()},
+ Username: "theusername",
+ Password: "thepassword",
+ Encoding: "proto",
+ Redial: internal.Duration{Duration: 1 * time.Second},
+ }
+
+ var acc testutil.Accumulator
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := grpcServer.Serve(listener)
+ require.NoError(t, err)
+ }()
+
+ acc.WaitError(1)
+ plugin.Stop()
+ grpcServer.Stop()
+ wg.Wait()
+
+ require.Contains(t, acc.Errors,
+ errors.New("aborted gNMI subscription: rpc error: code = Unknown desc = success"))
+}
+
+func mockGNMINotification() *gnmi.Notification {
+ return &gnmi.Notification{
+ Timestamp: 1543236572000000000,
+ Prefix: &gnmi.Path{
+ Origin: "type",
+ Elem: []*gnmi.PathElem{
+ {
+ Name: "model",
+ Key: map[string]string{"foo": "bar"},
+ },
+ },
+ Target: "subscription",
+ },
+ Update: []*gnmi.Update{
+ {
+ Path: &gnmi.Path{
+ Elem: []*gnmi.PathElem{
+ {Name: "some"},
+ {
+ Name: "path",
+ Key: map[string]string{"name": "str", "uint64": "1234"}},
+ },
+ },
+ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 5678}},
+ },
+ {
+ Path: &gnmi.Path{
+ Elem: []*gnmi.PathElem{
+ {Name: "other"},
+ {Name: "path"},
+ },
+ },
+ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foobar"}},
+ },
+ {
+ Path: &gnmi.Path{
+ Elem: []*gnmi.PathElem{
+ {Name: "other"},
+ {Name: "this"},
+ },
+ },
+ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "that"}},
+ },
+ },
+ }
+}
+
+func TestNotification(t *testing.T) {
+ tests := []struct {
+ name string
+ plugin *GNMI
+ server *MockServer
+ expected []telegraf.Metric
+ }{
+ {
+ name: "multiple metrics",
+ plugin: &GNMI{
+ Log: testutil.Logger{},
+ Encoding: "proto",
+ Redial: internal.Duration{Duration: 1 * time.Second},
+ Subscriptions: []Subscription{
+ {
+ Name: "alias",
+ Origin: "type",
+ Path: "/model",
+ SubscriptionMode: "sample",
+ },
+ },
+ },
+ server: &MockServer{
+ SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
+ notification := mockGNMINotification()
+ server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}})
+ server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}})
+ notification.Prefix.Elem[0].Key["foo"] = "bar2"
+ notification.Update[0].Path.Elem[1].Key["name"] = "str2"
+ notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}}
+ server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}})
+ return nil
+ },
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "alias",
+ map[string]string{
+ "path": "type:/model",
+ "source": "127.0.0.1",
+ "foo": "bar",
+ "name": "str",
+ "uint64": "1234",
+ },
+ map[string]interface{}{
+ "some/path": int64(5678),
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "alias",
+ map[string]string{
+ "path": "type:/model",
+ "source": "127.0.0.1",
+ "foo": "bar",
+ },
+ map[string]interface{}{
+ "other/path": "foobar",
+ "other/this": "that",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "alias",
+ map[string]string{
+ "path": "type:/model",
+ "foo": "bar2",
+ "source": "127.0.0.1",
+ "name": "str2",
+ "uint64": "1234",
+ },
+ map[string]interface{}{
+ "some/path": "123",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "alias",
+ map[string]string{
+ "path": "type:/model",
+ "source": "127.0.0.1",
+ "foo": "bar2",
+ },
+ map[string]interface{}{
+ "other/path": "foobar",
+ "other/this": "that",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "full path field key",
+ plugin: &GNMI{
+ Log: testutil.Logger{},
+ Encoding: "proto",
+ Redial: internal.Duration{Duration: 1 * time.Second},
+ Subscriptions: []Subscription{
+ {
+ Name: "PHY_COUNTERS",
+ Origin: "type",
+ Path: "/state/port[port-id=*]/ethernet/oper-speed",
+ SubscriptionMode: "sample",
+ },
+ },
+ },
+ server: &MockServer{
+ SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
+ response := &gnmi.SubscribeResponse{
+ Response: &gnmi.SubscribeResponse_Update{
+ Update: &gnmi.Notification{
+ Timestamp: 1543236572000000000,
+ Prefix: &gnmi.Path{
+ Origin: "type",
+ Elem: []*gnmi.PathElem{
+ {
+ Name: "state",
+ },
+ {
+ Name: "port",
+ Key: map[string]string{"port-id": "1"},
+ },
+ {
+ Name: "ethernet",
+ },
+ {
+ Name: "oper-speed",
+ },
+ },
+ Target: "subscription",
+ },
+ Update: []*gnmi.Update{
+ {
+ Path: &gnmi.Path{},
+ Val: &gnmi.TypedValue{
+ Value: &gnmi.TypedValue_IntVal{IntVal: 42},
+ },
+ },
+ },
+ },
+ },
+ }
+ server.Send(response)
+ return nil
+ },
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "PHY_COUNTERS",
+ map[string]string{
+ "path": "type:/state/port/ethernet/oper-speed",
+ "source": "127.0.0.1",
+ "port_id": "1",
+ },
+ map[string]interface{}{
+ "oper_speed": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ tt.plugin.Addresses = []string{listener.Addr().String()}
+
+ grpcServer := grpc.NewServer()
+ tt.server.GRPCServer = grpcServer
+ gnmi.RegisterGNMIServer(grpcServer, tt.server)
+
+ var acc testutil.Accumulator
+ err = tt.plugin.Start(&acc)
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := grpcServer.Serve(listener)
+ require.NoError(t, err)
+ }()
+
+ acc.Wait(len(tt.expected))
+ tt.plugin.Stop()
+ grpcServer.Stop()
+ wg.Wait()
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime())
+ })
+ }
+}
+
+func TestRedial(t *testing.T) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ plugin := &GNMI{
+ Log: testutil.Logger{},
+ Addresses: []string{listener.Addr().String()},
+ Encoding: "proto",
+ Redial: internal.Duration{Duration: 10 * time.Millisecond},
+ }
+
+ grpcServer := grpc.NewServer()
+ gnmiServer := &MockServer{
+ SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
+ notification := mockGNMINotification()
+ server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}})
+ return nil
+ },
+ GRPCServer: grpcServer,
+ }
+ gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := grpcServer.Serve(listener)
+ require.NoError(t, err)
+ }()
+
+ var acc testutil.Accumulator
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ acc.Wait(2)
+ grpcServer.Stop()
+ wg.Wait()
+
+ // Restart gNMI server at the same address
+ listener, err = net.Listen("tcp", listener.Addr().String())
+ require.NoError(t, err)
+
+ grpcServer = grpc.NewServer()
+ gnmiServer = &MockServer{
+ SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
+ notification := mockGNMINotification()
+ notification.Prefix.Elem[0].Key["foo"] = "bar2"
+ notification.Update[0].Path.Elem[1].Key["name"] = "str2"
+ notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}}
+ server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}})
+ return nil
+ },
+ GRPCServer: grpcServer,
+ }
+ gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := grpcServer.Serve(listener)
+ require.NoError(t, err)
+ }()
+
+ acc.Wait(4)
+ plugin.Stop()
+ grpcServer.Stop()
+ wg.Wait()
+}
diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md
index 46712ea1e2df4..6a835f1d60a4f 100644
--- a/plugins/inputs/graylog/README.md
+++ b/plugins/inputs/graylog/README.md
@@ -1,4 +1,4 @@
-# GrayLog plugin
+# GrayLog Input Plugin
The Graylog plugin can collect data from remote Graylog service URLs.
@@ -7,7 +7,7 @@ Plugin currently support two type of end points:-
- multiple (Ex http://[graylog-server-ip]:12900/system/metrics/multiple)
- namespace (Ex http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace})
-End Point can be a mixe of one multiple end point and several namespaces end points
+End Point can be a mix of one multiple end point and several namespaces end points
Note: if namespace end point specified metrics array will be ignored for that call.
diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go
index 1e0439a421967..09a7f173b3ba9 100644
--- a/plugins/inputs/graylog/graylog.go
+++ b/plugins/inputs/graylog/graylog.go
@@ -14,7 +14,7 @@ import (
"time"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -47,7 +47,7 @@ type HTTPClient interface {
// req: HTTP request object
//
// Returns:
- // http.Response: HTTP respons object
+ // http.Response: HTTP response object
// error : Any error that may have occurred
MakeRequest(req *http.Request) (*http.Response, error)
diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md
index 35b59524de6b6..86fbb986b696a 100644
--- a/plugins/inputs/haproxy/README.md
+++ b/plugins/inputs/haproxy/README.md
@@ -15,6 +15,10 @@ or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management.
## Make sure you specify the complete path to the stats endpoint
## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
+ ## Credentials for basic HTTP authentication
+ # username = "admin"
+ # password = "admin"
+
## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
servers = ["http://myhaproxy.com:1936/haproxy?stats"]
diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go
index 9c22acad9ef1b..5a3de352822be 100644
--- a/plugins/inputs/haproxy/haproxy.go
+++ b/plugins/inputs/haproxy/haproxy.go
@@ -14,7 +14,7 @@ import (
"time"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -181,6 +181,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
if err != nil {
return fmt.Errorf("Unable to connect to haproxy server '%s': %s", addr, err)
}
+ defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("Unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode)
diff --git a/plugins/inputs/hddtemp/README.md b/plugins/inputs/hddtemp/README.md
index 3bafb4f21ada1..d2d3e4f13ec89 100644
--- a/plugins/inputs/hddtemp/README.md
+++ b/plugins/inputs/hddtemp/README.md
@@ -1,43 +1,41 @@
-# Hddtemp Input Plugin
+# HDDtemp Input Plugin
-This plugin reads data from hddtemp daemon
+This plugin reads data from hddtemp daemon.
-## Requirements
+Hddtemp should be installed and its daemon running.
-Hddtemp should be installed and its daemon running
-
-## Configuration
+### Configuration
```toml
[[inputs.hddtemp]]
-## By default, telegraf gathers temps data from all disks detected by the
-## hddtemp.
-##
-## Only collect temps from the selected disks.
-##
-## A * as the device name will return the temperature values of all disks.
-##
-# address = "127.0.0.1:7634"
-# devices = ["sda", "*"]
+ ## By default, telegraf gathers temps data from all disks detected by the
+ ## hddtemp.
+ ##
+ ## Only collect temps from the selected disks.
+ ##
+ ## A * as the device name will return the temperature values of all disks.
+ ##
+ # address = "127.0.0.1:7634"
+ # devices = ["sda", "*"]
```
-## Measurements
+### Metrics
- hddtemp
- - temperature
-
-Tags:
-- device
-- model
-- unit
-- status
-
+ - tags:
+ - device
+ - model
+ - unit
+ - status
+ - source
+ - fields:
+ - temperature
-## Example output
+### Example output
```
-> hddtemp,unit=C,status=,host=server1,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000
-> hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=38i 148165564700000000
-> hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=36i 1481655647000000000
+hddtemp,source=server1,unit=C,status=,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000
+hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=38i 148165564700000000
+hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=36i 1481655647000000000
```
diff --git a/plugins/inputs/hddtemp/hddtemp.go b/plugins/inputs/hddtemp/hddtemp.go
index dd4622df49e3e..0f084ac219bff 100644
--- a/plugins/inputs/hddtemp/hddtemp.go
+++ b/plugins/inputs/hddtemp/hddtemp.go
@@ -1,6 +1,8 @@
package hddtemp
import (
+ "net"
+
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
gohddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp"
@@ -42,8 +44,12 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
if h.fetcher == nil {
h.fetcher = gohddtemp.New()
}
- disks, err := h.fetcher.Fetch(h.Address)
+ source, _, err := net.SplitHostPort(h.Address)
+ if err != nil {
+ source = h.Address
+ }
+ disks, err := h.fetcher.Fetch(h.Address)
if err != nil {
return err
}
@@ -56,6 +62,7 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
"model": disk.Model,
"unit": disk.Unit,
"status": disk.Status,
+ "source": source,
}
fields := map[string]interface{}{
diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go
index e09e833e73aed..f299c2ac66c4b 100644
--- a/plugins/inputs/hddtemp/hddtemp_test.go
+++ b/plugins/inputs/hddtemp/hddtemp_test.go
@@ -36,6 +36,7 @@ func newMockFetcher() *mockFetcher {
func TestFetch(t *testing.T) {
hddtemp := &HDDTemp{
fetcher: newMockFetcher(),
+ Address: "localhost",
Devices: []string{"*"},
}
@@ -58,6 +59,7 @@ func TestFetch(t *testing.T) {
"model": "Model1",
"unit": "C",
"status": "",
+ "source": "localhost",
},
},
{
@@ -69,6 +71,7 @@ func TestFetch(t *testing.T) {
"model": "Model2",
"unit": "C",
"status": "",
+ "source": "localhost",
},
},
}
diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md
index 240fd90c98b20..59abd82562672 100644
--- a/plugins/inputs/http/README.md
+++ b/plugins/inputs/http/README.md
@@ -26,6 +26,10 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
+ ## Optional file with Bearer token
+ ## file content is added as an Authorization header
+ # bearer_token = "/path/to/file"
+
## Optional HTTP Basic Auth Credentials
# username = "username"
# password = "pa$$word"
@@ -40,6 +44,9 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The
## Amount of time allowed to complete the HTTP request
# timeout = "5s"
+ ## List of success status codes
+ # success_status_codes = [200]
+
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go
index 6d2d528baa820..c247d40076620 100644
--- a/plugins/inputs/http/http.go
+++ b/plugins/inputs/http/http.go
@@ -1,7 +1,6 @@
package http
import (
- "errors"
"fmt"
"io"
"io/ioutil"
@@ -12,7 +11,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -30,6 +29,11 @@ type HTTP struct {
Password string `toml:"password"`
tls.ClientConfig
+ // Absolute path to file with Bearer token
+ BearerToken string `toml:"bearer_token"`
+
+ SuccessStatusCodes []int `toml:"success_status_codes"`
+
Timeout internal.Duration `toml:"timeout"`
client *http.Client
@@ -51,6 +55,10 @@ var sampleConfig = `
## Optional HTTP headers
# headers = {"X-Special-Header" = "Special-Value"}
+ ## Optional file with Bearer token
+ ## file content is added as an Authorization header
+ # bearer_token = "/path/to/file"
+
## Optional HTTP Basic Auth Credentials
# username = "username"
# password = "pa$$word"
@@ -72,6 +80,9 @@ var sampleConfig = `
## Amount of time allowed to complete the HTTP request
# timeout = "5s"
+ ## List of success status codes
+ # success_status_codes = [200]
+
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -89,27 +100,30 @@ func (*HTTP) Description() string {
return "Read formatted metrics from one or more HTTP endpoints"
}
-// Gather takes in an accumulator and adds the metrics that the Input
-// gathers. This is called every "interval"
-func (h *HTTP) Gather(acc telegraf.Accumulator) error {
- if h.parser == nil {
- return errors.New("Parser is not set")
+func (h *HTTP) Init() error {
+ tlsCfg, err := h.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
}
- if h.client == nil {
- tlsCfg, err := h.ClientConfig.TLSConfig()
- if err != nil {
- return err
- }
- h.client = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: tlsCfg,
- Proxy: http.ProxyFromEnvironment,
- },
- Timeout: h.Timeout.Duration,
- }
+ h.client = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ Proxy: http.ProxyFromEnvironment,
+ },
+ Timeout: h.Timeout.Duration,
+ }
+
+ // Set default as [200]
+ if len(h.SuccessStatusCodes) == 0 {
+ h.SuccessStatusCodes = []int{200}
}
+ return nil
+}
+// Gather takes in an accumulator and adds the metrics that the Input
+// gathers. This is called every "interval"
+func (h *HTTP) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
for _, u := range h.URLs {
wg.Add(1)
@@ -146,12 +160,22 @@ func (h *HTTP) gatherURL(
if err != nil {
return err
}
+ defer body.Close()
request, err := http.NewRequest(h.Method, url, body)
if err != nil {
return err
}
+ if h.BearerToken != "" {
+ token, err := ioutil.ReadFile(h.BearerToken)
+ if err != nil {
+ return err
+ }
+ bearer := "Bearer " + strings.Trim(string(token), "\n")
+ request.Header.Set("Authorization", bearer)
+ }
+
if h.ContentEncoding == "gzip" {
request.Header.Set("Content-Encoding", "gzip")
}
@@ -174,12 +198,19 @@ func (h *HTTP) gatherURL(
}
defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("Received status code %d (%s), expected %d (%s)",
+ responseHasSuccessCode := false
+ for _, statusCode := range h.SuccessStatusCodes {
+ if resp.StatusCode == statusCode {
+ responseHasSuccessCode = true
+ break
+ }
+ }
+
+ if !responseHasSuccessCode {
+ return fmt.Errorf("received status code %d (%s), expected any value out of %v",
resp.StatusCode,
http.StatusText(resp.StatusCode),
- http.StatusOK,
- http.StatusText(http.StatusOK))
+ h.SuccessStatusCodes)
}
b, err := ioutil.ReadAll(resp.Body)
@@ -202,16 +233,16 @@ func (h *HTTP) gatherURL(
return nil
}
-func makeRequestBodyReader(contentEncoding, body string) (io.Reader, error) {
- var err error
+func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) {
var reader io.Reader = strings.NewReader(body)
if contentEncoding == "gzip" {
- reader, err = internal.CompressWithGzip(reader)
+ rc, err := internal.CompressWithGzip(reader)
if err != nil {
return nil, err
}
+ return rc, nil
}
- return reader, nil
+ return ioutil.NopCloser(reader), nil
}
func init() {
diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go
index 7ac05e1356c32..993eda7321c0f 100644
--- a/plugins/inputs/http/http_test.go
+++ b/plugins/inputs/http/http_test.go
@@ -37,6 +37,7 @@ func TestHTTPwithJSONFormat(t *testing.T) {
plugin.SetParser(p)
var acc testutil.Accumulator
+ plugin.Init()
require.NoError(t, acc.GatherError(plugin.Gather))
require.Len(t, acc.Metrics, 1)
@@ -78,6 +79,7 @@ func TestHTTPHeaders(t *testing.T) {
plugin.SetParser(p)
var acc testutil.Accumulator
+ plugin.Init()
require.NoError(t, acc.GatherError(plugin.Gather))
}
@@ -100,51 +102,58 @@ func TestInvalidStatusCode(t *testing.T) {
plugin.SetParser(p)
var acc testutil.Accumulator
+ plugin.Init()
require.Error(t, acc.GatherError(plugin.Gather))
}
-func TestMethod(t *testing.T) {
+func TestSuccessStatusCodes(t *testing.T) {
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.Method == "POST" {
- w.WriteHeader(http.StatusOK)
- } else {
- w.WriteHeader(http.StatusNotFound)
- }
+ w.WriteHeader(http.StatusAccepted)
}))
defer fakeServer.Close()
+ url := fakeServer.URL + "/endpoint"
plugin := &plugin.HTTP{
- URLs: []string{fakeServer.URL},
- Method: "POST",
+ URLs: []string{url},
+ SuccessStatusCodes: []int{200, 202},
}
+ metricName := "metricName"
p, _ := parsers.NewParser(&parsers.Config{
DataFormat: "json",
- MetricName: "metricName",
+ MetricName: metricName,
})
plugin.SetParser(p)
var acc testutil.Accumulator
+ plugin.Init()
require.NoError(t, acc.GatherError(plugin.Gather))
}
-func TestParserNotSet(t *testing.T) {
+func TestMethod(t *testing.T) {
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/endpoint" {
- _, _ = w.Write([]byte(simpleJSON))
+ if r.Method == "POST" {
+ w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
defer fakeServer.Close()
- url := fakeServer.URL + "/endpoint"
plugin := &plugin.HTTP{
- URLs: []string{url},
+ URLs: []string{fakeServer.URL},
+ Method: "POST",
}
+ p, _ := parsers.NewParser(&parsers.Config{
+ DataFormat: "json",
+ MetricName: "metricName",
+ })
+ plugin.SetParser(p)
+
var acc testutil.Accumulator
- require.Error(t, acc.GatherError(plugin.Gather))
+ plugin.Init()
+ require.NoError(t, acc.GatherError(plugin.Gather))
}
const simpleJSON = `
@@ -237,6 +246,7 @@ func TestBodyAndContentEncoding(t *testing.T) {
tt.plugin.SetParser(parser)
var acc testutil.Accumulator
+ tt.plugin.Init()
err = tt.plugin.Gather(&acc)
require.NoError(t, err)
})
diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md
index f5a85318979a8..05e48058667ef 100644
--- a/plugins/inputs/http_listener_v2/README.md
+++ b/plugins/inputs/http_listener_v2/README.md
@@ -1,7 +1,7 @@
# HTTP Listener v2 Input Plugin
HTTP Listener v2 is a service input plugin that listens for metrics sent via
-HTTP. Metrics may be sent in any supported [data format][data_format].
+HTTP. Metrics may be sent in any supported [data format][data_format].
**Note:** The plugin previously known as `http_listener` has been renamed
`influxdb_listener`. If you would like Telegraf to act as a proxy/relay for
@@ -31,6 +31,10 @@ This is a sample configuration for the plugin.
## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
# max_body_size = "500MB"
+ ## Part of the request to consume. Available options are "body" and
+ ## "query".
+ # data_source = "body"
+
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
@@ -44,6 +48,11 @@ This is a sample configuration for the plugin.
# basic_username = "foobar"
# basic_password = "barfoo"
+ ## Optional setting to map http headers into tags
+ ## If the http header is not present on the request, no corresponding tag will be added
+ ## If multiple instances of the http header are present, only the first value will be used
+ # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -53,7 +62,7 @@ This is a sample configuration for the plugin.
### Metrics:
-Metrics are created from the request body and are dependant on the value of `data_format`.
+Metrics are collected from the part of the request specified by the `data_source` param and are parsed depending on the value of `data_format`.
### Troubleshooting:
@@ -67,5 +76,10 @@ curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary 'cpu_load_short,ho
curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary '{"value1": 42, "value2": 42}'
```
+**Send query params**
+```
+curl -i -XGET 'http://localhost:8080/telegraf?host=server01&value=0.42'
+```
+
[data_format]: /docs/DATA_FORMATS_INPUT.md
[influxdb_listener]: /plugins/inputs/influxdb_listener/README.md
diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go
index 3fd8989f97124..1023c0d10bcf5 100644
--- a/plugins/inputs/http_listener_v2/http_listener_v2.go
+++ b/plugins/inputs/http_listener_v2/http_listener_v2.go
@@ -5,15 +5,16 @@ import (
"crypto/subtle"
"crypto/tls"
"io/ioutil"
- "log"
"net"
"net/http"
+ "net/url"
+ "strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -23,23 +24,31 @@ import (
// 500 MB
const defaultMaxBodySize = 500 * 1024 * 1024
+const (
+ body = "body"
+ query = "query"
+)
+
+// TimeFunc provides a timestamp for the metrics
type TimeFunc func() time.Time
+// HTTPListenerV2 is an input plugin that collects external metrics sent via HTTP
type HTTPListenerV2 struct {
- ServiceAddress string
- Path string
- Methods []string
- ReadTimeout internal.Duration
- WriteTimeout internal.Duration
- MaxBodySize internal.Size
- Port int
-
+ ServiceAddress string `toml:"service_address"`
+ Path string `toml:"path"`
+ Methods []string `toml:"methods"`
+ DataSource string `toml:"data_source"`
+ ReadTimeout internal.Duration `toml:"read_timeout"`
+ WriteTimeout internal.Duration `toml:"write_timeout"`
+ MaxBodySize internal.Size `toml:"max_body_size"`
+ Port int `toml:"port"`
+ BasicUsername string `toml:"basic_username"`
+ BasicPassword string `toml:"basic_password"`
+ HTTPHeaderTags map[string]string `toml:"http_header_tags"`
tlsint.ServerConfig
- BasicUsername string
- BasicPassword string
-
TimeFunc
+ Log telegraf.Logger
wg sync.WaitGroup
@@ -68,7 +77,11 @@ const sampleConfig = `
## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
# max_body_size = "500MB"
- ## Set one or more allowed client CA certificate file names to
+ ## Part of the request to consume. Available options are "body" and
+ ## "query".
+ # data_source = "body"
+
+ ## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
@@ -81,6 +94,11 @@ const sampleConfig = `
# basic_username = "foobar"
# basic_password = "barfoo"
+ ## Optional setting to map http headers into tags
+ ## If the http header is not present on the request, no corresponding tag will be added
+ ## If multiple instances of the http header are present, only the first value will be used
+ # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -150,7 +168,7 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error {
server.Serve(h.listener)
}()
- log.Printf("I! Started HTTP listener V2 service on %s\n", h.ServiceAddress)
+ h.Log.Infof("Listening on %s", listener.Addr().String())
return nil
}
@@ -159,16 +177,16 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error {
func (h *HTTPListenerV2) Stop() {
h.listener.Close()
h.wg.Wait()
-
- log.Println("I! Stopped HTTP listener V2 service on ", h.ServiceAddress)
}
func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) {
- if req.URL.Path == h.Path {
- h.AuthenticateIfSet(h.serveWrite, res, req)
- } else {
- h.AuthenticateIfSet(http.NotFound, res, req)
+ handler := h.serveWrite
+
+ if req.URL.Path != h.Path {
+ handler = http.NotFound
}
+
+ h.authenticateIfSet(handler, res, req)
}
func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) {
@@ -191,15 +209,52 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request)
return
}
- // Handle gzip request bodies
+ var bytes []byte
+ var ok bool
+
+ switch strings.ToLower(h.DataSource) {
+ case query:
+ bytes, ok = h.collectQuery(res, req)
+ default:
+ bytes, ok = h.collectBody(res, req)
+ }
+
+ if !ok {
+ return
+ }
+
+ metrics, err := h.Parse(bytes)
+ if err != nil {
+ h.Log.Debugf("Parse error: %s", err.Error())
+ badRequest(res)
+ return
+ }
+
+ for _, m := range metrics {
+ for headerName, measurementName := range h.HTTPHeaderTags {
+ headerValues := req.Header.Get(headerName)
+ if len(headerValues) > 0 {
+ m.AddTag(measurementName, headerValues)
+ }
+ }
+
+ h.acc.AddMetric(m)
+ }
+
+ res.WriteHeader(http.StatusNoContent)
+}
+
+func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) ([]byte, bool) {
body := req.Body
+
+ // Handle gzip request bodies
if req.Header.Get("Content-Encoding") == "gzip" {
var err error
body, err = gzip.NewReader(req.Body)
if err != nil {
- log.Println("D! " + err.Error())
+ h.Log.Debug(err.Error())
badRequest(res)
- return
+ return nil, false
}
defer body.Close()
}
@@ -208,19 +263,23 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request)
bytes, err := ioutil.ReadAll(body)
if err != nil {
tooLarge(res)
- return
+ return nil, false
}
- metrics, err := h.Parse(bytes)
+ return bytes, true
+}
+
+func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request) ([]byte, bool) {
+ rawQuery := req.URL.RawQuery
+
+ query, err := url.QueryUnescape(rawQuery)
if err != nil {
- log.Println("D! " + err.Error())
+ h.Log.Debugf("Error parsing query: %s", err.Error())
badRequest(res)
- return
- }
- for _, m := range metrics {
- h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
+ return nil, false
}
- res.WriteHeader(http.StatusNoContent)
+
+ return []byte(query), true
}
func tooLarge(res http.ResponseWriter) {
@@ -246,7 +305,7 @@ func badRequest(res http.ResponseWriter) {
res.Write([]byte(`{"error":"http: bad request"}`))
}
-func (h *HTTPListenerV2) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
+func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
if h.BasicUsername != "" && h.BasicPassword != "" {
reqUsername, reqPassword, ok := req.BasicAuth()
if !ok ||
@@ -269,6 +328,7 @@ func init() {
TimeFunc: time.Now,
Path: "/telegraf",
Methods: []string{"POST", "PUT"},
+ DataSource: body,
}
})
}
diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go
index ab0c89f81bebb..4457fcacda79d 100644
--- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go
+++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go
@@ -16,7 +16,6 @@ import (
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
-
"github.com/stretchr/testify/require"
)
@@ -47,12 +46,14 @@ func newTestHTTPListenerV2() *HTTPListenerV2 {
parser, _ := parsers.NewInfluxParser()
listener := &HTTPListenerV2{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:0",
Path: "/write",
Methods: []string{"POST"},
Parser: parser,
TimeFunc: time.Now,
MaxBodySize: internal.Size{Size: 70000},
+ DataSource: "body",
}
return listener
}
@@ -68,6 +69,7 @@ func newTestHTTPSListenerV2() *HTTPListenerV2 {
parser, _ := parsers.NewInfluxParser()
listener := &HTTPListenerV2{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:0",
Path: "/write",
Methods: []string{"POST"},
@@ -231,6 +233,7 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) {
parser, _ := parsers.NewInfluxParser()
listener := &HTTPListenerV2{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:0",
Path: "/write",
Methods: []string{"POST"},
@@ -253,6 +256,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
parser, _ := parsers.NewInfluxParser()
listener := &HTTPListenerV2{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:0",
Path: "/write",
Methods: []string{"POST"},
@@ -377,5 +381,119 @@ func TestWriteHTTPEmpty(t *testing.T) {
require.EqualValues(t, 204, resp.StatusCode)
}
-const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i
+func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) {
+ listener := newTestHTTPListenerV2()
+ listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"}
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("Present_http_header_1", "PRESENT_HTTP_VALUE_1")
+ req.Header.Set("Present_http_header_2", "PRESENT_HTTP_VALUE_2")
+
+ resp, err := http.DefaultClient.Do(req)
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": "server01", "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"},
+ )
+
+ // post single message to listener
+ resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": "server01", "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"},
+ )
+}
+
+func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) {
+ listener := newTestHTTPListenerV2()
+ listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "Present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"}
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsgs)))
+ require.NoError(t, err)
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("Present_http_header_1", "PRESENT_HTTP_VALUE_1")
+ req.Header.Set("Present_http_header_2", "PRESENT_HTTP_VALUE_2")
+
+ resp, err := http.DefaultClient.Do(req)
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(2)
+ hostTags := []string{"server02", "server03", "server04", "server05", "server06"}
+ for _, hostTag := range hostTags {
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": hostTag, "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"},
+ )
+ }
+}
+
+func TestWriteHTTPQueryParams(t *testing.T) {
+ parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"})
+ listener := newTestHTTPListenerV2()
+ listener.DataSource = "query"
+ listener.Parser = parser
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ resp, err := http.Post(createURL(listener, "http", "/write", "tagKey=tagValue&fieldKey=42"), "", bytes.NewBuffer([]byte(emptyMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "query_measurement",
+ map[string]interface{}{"fieldKey": float64(42)},
+ map[string]string{"tagKey": "tagValue"},
+ )
+}
+
+func TestWriteHTTPFormData(t *testing.T) {
+ parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"})
+ listener := newTestHTTPListenerV2()
+ listener.Parser = parser
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ resp, err := http.PostForm(createURL(listener, "http", "/write", ""), url.Values{
+ "tagKey": {"tagValue"},
+ "fieldKey": {"42"},
+ })
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "query_measurement",
+ map[string]interface{}{"fieldKey": float64(42)},
+ map[string]string{"tagKey": "tagValue"},
+ )
+}
+
+// The term 'master_repl' used here is archaic language from redis
+const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i
`
diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md
index 7c66928e22b50..889b6f4f366e3 100644
--- a/plugins/inputs/http_response/README.md
+++ b/plugins/inputs/http_response/README.md
@@ -4,12 +4,16 @@ This input plugin checks HTTP/HTTPS connections.
### Configuration:
-```
+```toml
# HTTP/HTTPS request given an address a method and a timeout
[[inputs.http_response]]
+ ## Deprecated in 1.12, use 'urls'
## Server address (default http://localhost)
# address = "http://localhost"
+ ## List of urls to query.
+ # urls = ["http://localhost"]
+
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
# http_proxy = "http://localhost:8888"
@@ -22,11 +26,28 @@ This input plugin checks HTTP/HTTPS connections.
## Whether to follow redirects from the server (defaults to false)
# follow_redirects = false
+ ## Optional file with Bearer token
+ ## file content is added as an Authorization header
+ # bearer_token = "/path/to/file"
+
+ ## Optional HTTP Basic Auth Credentials
+ # username = "username"
+ # password = "pa$$word"
+
## Optional HTTP Request Body
# body = '''
# {'fake':'data'}
# '''
+ ## Optional name of the field that will contain the body of the response.
+ ## By default it is set to an empty String indicating that the body's content won't be added
+ # response_body_field = ''
+
+ ## Maximum allowed HTTP response body size in bytes.
+ ## 0 means to use the default of 32MiB.
+ ## If the response body size exceeds this limit a "body_read_error" will be raised
+ # response_body_max_size = "32MiB"
+
## Optional substring or regex match in body of the response (case sensitive)
# response_string_match = "\"service_status\": \"up\""
# response_string_match = "ok"
@@ -42,6 +63,14 @@ This input plugin checks HTTP/HTTPS connections.
## HTTP Request Headers (all values must be strings)
# [inputs.http_response.headers]
# Host = "github.com"
+
+ ## Optional setting to map response http headers into tags
+ ## If the http header is not present on the request, no corresponding tag will be added
+ ## If multiple instances of the http header are present, only the first value will be used
+ # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+
+ ## Interface to use when dialing an address
+ # interface = "eth0"
```
### Metrics:
@@ -54,6 +83,7 @@ This input plugin checks HTTP/HTTPS connections.
- result ([see below](#result--result_code))
- fields:
- response_time (float, seconds)
+ - content_length (int, response body length)
- response_string_match (int, 0 = mismatch / body read error, 1 = match)
- http_response_code (int, response status code)
- result_type (string, deprecated in 1.6: use `result` tag and `result_code` field)
@@ -69,7 +99,7 @@ This tag is used to expose network and plugin errors. HTTP errors are considered
--------------------------|-------------------------|-----------|
|success | 0 |The HTTP request completed, even if the HTTP code represents an error|
|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error|
-|body_read_error | 2 |The option `response_string_match` was used, but the plugin wans't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error|
+|body_read_error | 2 |The option `response_string_match` was used, but the plugin wasn't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error. Or the option `response_body_field` was used and the content of the response body was not a valid utf-8. Or the size of the body of the response exceeded the `response_body_max_size` |
|connection_failed | 3 |Catch all for any network error not specifically handled by the plugin|
|timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete|
|dns_error | 5 |There was a DNS error while attempting to connect to the host|
@@ -78,5 +108,5 @@ This tag is used to expose network and plugin errors. HTTP errors are considered
### Example Output:
```
-http_response,method=GET,server=http://www.github.com,status_code=200,result=success http_response_code=200i,response_time=6.223266528,result_type="success",result_code=0i 1459419354977857955
+http_response,method=GET,result=success,server=http://github.com,status_code=200 content_length=87878i,http_response_code=200i,response_time=0.937655534,result_code=0i,result_type="success" 1565839598000000000
```
diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go
index 1f1f687070f11..33888503b068f 100644
--- a/plugins/inputs/http_response/http_response.go
+++ b/plugins/inputs/http_response/http_response.go
@@ -5,7 +5,6 @@ import (
"fmt"
"io"
"io/ioutil"
- "log"
"net"
"net/http"
"net/url"
@@ -13,25 +12,44 @@ import (
"strconv"
"strings"
"time"
+ "unicode/utf8"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
+const (
+ // defaultResponseBodyMaxSize is the default maximum response body size, in bytes.
+ // if the response body is over this size, we will raise a body_read_error.
+ defaultResponseBodyMaxSize = 32 * 1024 * 1024
+)
+
// HTTPResponse struct
type HTTPResponse struct {
- Address string
- HTTPProxy string `toml:"http_proxy"`
- Body string
- Method string
- ResponseTimeout internal.Duration
- Headers map[string]string
- FollowRedirects bool
+ Address string // deprecated in 1.12
+ URLs []string `toml:"urls"`
+ HTTPProxy string `toml:"http_proxy"`
+ Body string
+ Method string
+ ResponseTimeout internal.Duration
+ HTTPHeaderTags map[string]string `toml:"http_header_tags"`
+ Headers map[string]string
+ FollowRedirects bool
+ // Absolute path to file with Bearer token
+ BearerToken string `toml:"bearer_token"`
+ ResponseBodyField string `toml:"response_body_field"`
+ ResponseBodyMaxSize internal.Size `toml:"response_body_max_size"`
ResponseStringMatch string
+ Interface string
+ // HTTP Basic Auth Credentials
+ Username string `toml:"username"`
+ Password string `toml:"password"`
tls.ClientConfig
+ Log telegraf.Logger
+
compiledStringMatch *regexp.Regexp
client *http.Client
}
@@ -42,9 +60,13 @@ func (h *HTTPResponse) Description() string {
}
var sampleConfig = `
+ ## Deprecated in 1.12, use 'urls'
## Server address (default http://localhost)
# address = "http://localhost"
+ ## List of urls to query.
+ # urls = ["http://localhost"]
+
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
# http_proxy = "http://localhost:8888"
@@ -57,12 +79,29 @@ var sampleConfig = `
## Whether to follow redirects from the server (defaults to false)
# follow_redirects = false
+ ## Optional file with Bearer token
+ ## file content is added as an Authorization header
+ # bearer_token = "/path/to/file"
+
+ ## Optional HTTP Basic Auth Credentials
+ # username = "username"
+ # password = "pa$$word"
+
## Optional HTTP Request Body
# body = '''
# {'fake':'data'}
# '''
- ## Optional substring or regex match in body of the response
+ ## Optional name of the field that will contain the body of the response.
+ ## By default it is set to an empty String indicating that the body's content won't be added
+ # response_body_field = ''
+
+ ## Maximum allowed HTTP response body size in bytes.
+ ## 0 means to use the default of 32MiB.
+ ## If the response body size exceeds this limit a "body_read_error" will be raised
+ # response_body_max_size = "32MiB"
+
+ ## Optional substring or regex match in body of the response (case sensitive)
# response_string_match = "\"service_status\": \"up\""
# response_string_match = "ok"
# response_string_match = "\".*_status\".?:.?\"up\""
@@ -77,6 +116,14 @@ var sampleConfig = `
## HTTP Request Headers (all values must be strings)
# [inputs.http_response.headers]
# Host = "github.com"
+
+ ## Optional setting to map response http headers into tags
+ ## If the http header is not present on the request, no corresponding tag will be added
+ ## If multiple instances of the http header are present, only the first value will be used
+ # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+
+ ## Interface to use when dialing an address
+ # interface = "eth0"
`
// SampleConfig returns the plugin SampleConfig
@@ -103,16 +150,27 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) {
}
}
-// CreateHttpClient creates an http client which will timeout at the specified
+// createHttpClient creates an http client which will timeout at the specified
// timeout period and can follow redirects if specified
func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
tlsCfg, err := h.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
+
+ dialer := &net.Dialer{}
+
+ if h.Interface != "" {
+ dialer.LocalAddr, err = localAddress(h.Interface)
+ if err != nil {
+ return nil, err
+ }
+ }
+
client := &http.Client{
Transport: &http.Transport{
Proxy: getProxyFunc(h.HTTPProxy),
+ DialContext: dialer.DialContext,
DisableKeepAlives: true,
TLSClientConfig: tlsCfg,
},
@@ -121,12 +179,33 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
if h.FollowRedirects == false {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- return ErrRedirectAttempted
+ return http.ErrUseLastResponse
}
}
return client, nil
}
+func localAddress(interfaceName string) (net.Addr, error) {
+ i, err := net.InterfaceByName(interfaceName)
+ if err != nil {
+ return nil, err
+ }
+
+ addrs, err := i.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, addr := range addrs {
+ if naddr, ok := addr.(*net.IPNet); ok {
+ // leaving port set to zero to let kernel pick
+ return &net.TCPAddr{IP: naddr.IP}, nil
+ }
+ }
+
+ return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName)
+}
+
func setResult(result_string string, fields map[string]interface{}, tags map[string]string) {
result_codes := map[string]int{
"success": 0,
@@ -171,20 +250,29 @@ func setError(err error, fields map[string]interface{}, tags map[string]string)
}
// HTTPGather gathers all fields and returns any errors it encounters
-func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, error) {
+func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]string, error) {
// Prepare fields and tags
fields := make(map[string]interface{})
- tags := map[string]string{"server": h.Address, "method": h.Method}
+ tags := map[string]string{"server": u, "method": h.Method}
var body io.Reader
if h.Body != "" {
body = strings.NewReader(h.Body)
}
- request, err := http.NewRequest(h.Method, h.Address, body)
+ request, err := http.NewRequest(h.Method, u, body)
if err != nil {
return nil, nil, err
}
+ if h.BearerToken != "" {
+ token, err := ioutil.ReadFile(h.BearerToken)
+ if err != nil {
+ return nil, nil, err
+ }
+ bearer := "Bearer " + strings.Trim(string(token), "\n")
+ request.Header.Add("Authorization", bearer)
+ }
+
for key, val := range h.Headers {
request.Header.Add(key, val)
if key == "Host" {
@@ -192,6 +280,10 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string,
}
}
+ if h.Username != "" || h.Password != "" {
+ request.SetBasicAuth(h.Username, h.Password)
+ }
+
// Start Timer
start := time.Now()
resp, err := h.client.Do(request)
@@ -201,28 +293,19 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string,
// HTTP error codes do not generate errors in the net/http library
if err != nil {
// Log error
- log.Printf("D! Network error while polling %s: %s", h.Address, err.Error())
+ h.Log.Debugf("Network error while polling %s: %s", u, err.Error())
// Get error details
netErr := setError(err, fields, tags)
- // If recognize the returnded error, get out
+ // If recognize the returned error, get out
if netErr != nil {
return fields, tags, nil
}
// Any error not recognized by `set_error` is considered a "connection_failed"
setResult("connection_failed", fields, tags)
-
- // If the error is a redirect we continue processing and log the HTTP code
- urlError, isUrlError := err.(*url.Error)
- if !h.FollowRedirects && isUrlError && urlError.Err == ErrRedirectAttempted {
- err = nil
- } else {
- // If the error isn't a timeout or a redirect stop
- // processing the request
- return fields, tags, nil
- }
+ return fields, tags, nil
}
if _, ok := fields["response_time"]; !ok {
@@ -231,26 +314,46 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string,
// This function closes the response body, as
// required by the net/http library
- defer func() {
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
- }()
+ defer resp.Body.Close()
+
+ // Add the response headers
+ for headerName, tag := range h.HTTPHeaderTags {
+ headerValues, foundHeader := resp.Header[headerName]
+ if foundHeader && len(headerValues) > 0 {
+ tags[tag] = headerValues[0]
+ }
+ }
// Set log the HTTP response code
tags["status_code"] = strconv.Itoa(resp.StatusCode)
fields["http_response_code"] = resp.StatusCode
- // Check the response for a regex match.
- if h.ResponseStringMatch != "" {
+ if h.ResponseBodyMaxSize.Size == 0 {
+ h.ResponseBodyMaxSize.Size = defaultResponseBodyMaxSize
+ }
+ bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, h.ResponseBodyMaxSize.Size+1))
+ // Check first if the response body size exceeds the limit.
+ if err == nil && int64(len(bodyBytes)) > h.ResponseBodyMaxSize.Size {
+ h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags)
+ return fields, tags, nil
+ } else if err != nil {
+ h.setBodyReadError(fmt.Sprintf("Failed to read body of HTTP Response : %s", err.Error()), bodyBytes, fields, tags)
+ return fields, tags, nil
+ }
- bodyBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- log.Printf("D! Failed to read body of HTTP Response : %s", err)
- setResult("body_read_error", fields, tags)
- fields["response_string_match"] = 0
+ // Add the body of the response if expected
+ if len(h.ResponseBodyField) > 0 {
+ // Check that the content of response contains only valid utf-8 characters.
+ if !utf8.Valid(bodyBytes) {
+ h.setBodyReadError("The body of the HTTP Response is not a valid utf-8 string", bodyBytes, fields, tags)
return fields, tags, nil
}
+ fields[h.ResponseBodyField] = string(bodyBytes)
+ }
+ fields["content_length"] = len(bodyBytes)
+ // Check the response for a regex match.
+ if h.ResponseStringMatch != "" {
if h.compiledStringMatch.Match(bodyBytes) {
setResult("success", fields, tags)
fields["response_string_match"] = 1
@@ -265,6 +368,16 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string,
return fields, tags, nil
}
+// Set result in case of a body read error
+func (h *HTTPResponse) setBodyReadError(error_msg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) {
+ h.Log.Debugf(error_msg)
+ setResult("body_read_error", fields, tags)
+ fields["content_length"] = len(bodyBytes)
+ if h.ResponseStringMatch != "" {
+ fields["response_string_match"] = 0
+ }
+}
+
// Gather gets all metric fields and tags and returns any errors it encounters
func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
// Compile the body regex if it exist
@@ -284,20 +397,15 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
if h.Method == "" {
h.Method = "GET"
}
- if h.Address == "" {
- h.Address = "http://localhost"
- }
- addr, err := url.Parse(h.Address)
- if err != nil {
- return err
- }
- if addr.Scheme != "http" && addr.Scheme != "https" {
- return errors.New("Only http and https are supported")
- }
- // Prepare data
- var fields map[string]interface{}
- var tags map[string]string
+ if len(h.URLs) == 0 {
+ if h.Address == "" {
+ h.URLs = []string{"http://localhost"}
+ } else {
+ h.Log.Warn("'address' deprecated in telegraf 1.12, please use 'urls'")
+ h.URLs = []string{h.Address}
+ }
+ }
if h.client == nil {
client, err := h.createHttpClient()
@@ -307,14 +415,33 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
h.client = client
}
- // Gather data
- fields, tags, err = h.httpGather()
- if err != nil {
- return err
+ for _, u := range h.URLs {
+ addr, err := url.Parse(u)
+ if err != nil {
+ acc.AddError(err)
+ continue
+ }
+
+ if addr.Scheme != "http" && addr.Scheme != "https" {
+ acc.AddError(errors.New("Only http and https are supported"))
+ continue
+ }
+
+ // Prepare data
+ var fields map[string]interface{}
+ var tags map[string]string
+
+ // Gather data
+ fields, tags, err = h.httpGather(u)
+ if err != nil {
+ acc.AddError(err)
+ continue
+ }
+
+ // Add metrics
+ acc.AddFields("http_response", fields, tags)
}
- // Add metrics
- acc.AddFields("http_response", fields, tags)
return nil
}
diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go
index a33805db31efb..5a256e6e58d2a 100644
--- a/plugins/inputs/http_response/http_response_test.go
+++ b/plugins/inputs/http_response/http_response_test.go
@@ -1,16 +1,18 @@
package http_response
import (
+ "errors"
"fmt"
"io/ioutil"
+ "net"
"net/http"
"net/http/httptest"
"testing"
"time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -34,6 +36,7 @@ func checkAbsentTags(t *testing.T, tags []string, acc *testutil.Accumulator) {
// Receives a dictionary and with expected fields and their values. If a value is nil, it will only check
// that the field exists, but not its contents
func checkFields(t *testing.T, fields map[string]interface{}, acc *testutil.Accumulator) {
+ t.Helper()
for key, field := range fields {
switch v := field.(type) {
case int:
@@ -83,6 +86,14 @@ func setUpTestMux() http.Handler {
http.Redirect(w, req, "/good", http.StatusMovedPermanently)
})
mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Server", "MyTestServer")
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprintf(w, "hit the good page!")
+ })
+ mux.HandleFunc("/invalidUTF8", func(w http.ResponseWriter, req *http.Request) {
+ w.Write([]byte{0xff, 0xfe, 0xfd})
+ })
+ mux.HandleFunc("/noheader", func(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "hit the good page!")
})
mux.HandleFunc("/jsonresponse", func(w http.ResponseWriter, req *http.Request) {
@@ -119,6 +130,7 @@ func setUpTestMux() http.Handler {
}
func checkOutput(t *testing.T, acc *testutil.Accumulator, presentFields map[string]interface{}, presentTags map[string]interface{}, absentFields []string, absentTags []string) {
+ t.Helper()
if presentFields != nil {
checkFields(t, presentFields, acc)
}
@@ -146,6 +158,7 @@ func TestHeaders(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL,
Method: "GET",
ResponseTimeout: internal.Duration{Duration: time.Second * 2},
@@ -163,6 +176,7 @@ func TestHeaders(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
}
expectedTags := map[string]interface{}{
"server": nil,
@@ -180,6 +194,7 @@ func TestFields(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/good",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -199,6 +214,266 @@ func TestFields(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
+ }
+ expectedTags := map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "status_code": "200",
+ "result": "success",
+ }
+ absentFields := []string{"response_string_match"}
+ checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
+}
+
+func TestResponseBodyField(t *testing.T) {
+ mux := setUpTestMux()
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+
+ h := &HTTPResponse{
+ Log: testutil.Logger{},
+ Address: ts.URL + "/good",
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ ResponseBodyField: "my_body_field",
+ FollowRedirects: true,
+ }
+
+ var acc testutil.Accumulator
+ err := h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields := map[string]interface{}{
+ "http_response_code": http.StatusOK,
+ "result_type": "success",
+ "result_code": 0,
+ "response_time": nil,
+ "content_length": nil,
+ "my_body_field": "hit the good page!",
+ }
+ expectedTags := map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "status_code": "200",
+ "result": "success",
+ }
+ absentFields := []string{"response_string_match"}
+ checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
+
+ // Invalid UTF-8 String
+ h = &HTTPResponse{
+ Log: testutil.Logger{},
+ Address: ts.URL + "/invalidUTF8",
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ ResponseBodyField: "my_body_field",
+ FollowRedirects: true,
+ }
+
+ acc = testutil.Accumulator{}
+ err = h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields = map[string]interface{}{
+ "result_type": "body_read_error",
+ "result_code": 2,
+ }
+ expectedTags = map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "result": "body_read_error",
+ }
+ checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
+}
+
+func TestResponseBodyMaxSize(t *testing.T) {
+ mux := setUpTestMux()
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+
+ h := &HTTPResponse{
+ Log: testutil.Logger{},
+ Address: ts.URL + "/good",
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ ResponseBodyMaxSize: internal.Size{Size: 5},
+ FollowRedirects: true,
+ }
+
+ var acc testutil.Accumulator
+ err := h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields := map[string]interface{}{
+ "result_type": "body_read_error",
+ "result_code": 2,
+ }
+ expectedTags := map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "result": "body_read_error",
+ }
+ checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
+}
+
+func TestHTTPHeaderTags(t *testing.T) {
+ mux := setUpTestMux()
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+
+ h := &HTTPResponse{
+ Log: testutil.Logger{},
+ Address: ts.URL + "/good",
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"},
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ FollowRedirects: true,
+ }
+
+ var acc testutil.Accumulator
+ err := h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields := map[string]interface{}{
+ "http_response_code": http.StatusOK,
+ "result_type": "success",
+ "result_code": 0,
+ "response_time": nil,
+ "content_length": nil,
+ }
+ expectedTags := map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "status_code": "200",
+ "result": "success",
+ "my_server": "MyTestServer",
+ "content_type": "application/json; charset=utf-8",
+ }
+ absentFields := []string{"response_string_match"}
+ checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
+
+ h = &HTTPResponse{
+ Log: testutil.Logger{},
+ Address: ts.URL + "/noheader",
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"},
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ FollowRedirects: true,
+ }
+
+ acc = testutil.Accumulator{}
+ err = h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedTags = map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "status_code": "200",
+ "result": "success",
+ }
+ checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
+
+ // Connection failed
+ h = &HTTPResponse{
+ Log: testutil.Logger{},
+ Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here
+ Body: "",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 5},
+ HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"},
+ FollowRedirects: false,
+ }
+
+ acc = testutil.Accumulator{}
+ err = h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields = map[string]interface{}{
+ "result_type": "connection_failed",
+ "result_code": 3,
+ }
+ expectedTags = map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "result": "connection_failed",
+ }
+ absentFields = []string{"http_response_code", "response_time", "content_length", "response_string_match"}
+ checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
+}
+
+func findInterface() (net.Interface, error) {
+ potential, _ := net.Interfaces()
+
+ for _, i := range potential {
+ // we are only interest in loopback interfaces which are up
+ if (i.Flags&net.FlagUp == 0) || (i.Flags&net.FlagLoopback == 0) {
+ continue
+ }
+
+ if addrs, _ := i.Addrs(); len(addrs) > 0 {
+ // return interface if it has at least one unicast address
+ return i, nil
+ }
+ }
+
+ return net.Interface{}, errors.New("cannot find suitable loopback interface")
+}
+
+func TestInterface(t *testing.T) {
+ var (
+ mux = setUpTestMux()
+ ts = httptest.NewServer(mux)
+ )
+
+ defer ts.Close()
+
+ intf, err := findInterface()
+ require.NoError(t, err)
+
+ h := &HTTPResponse{
+ Log: testutil.Logger{},
+ Address: ts.URL + "/good",
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ FollowRedirects: true,
+ Interface: intf.Name,
+ }
+
+ var acc testutil.Accumulator
+ err = h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields := map[string]interface{}{
+ "http_response_code": http.StatusOK,
+ "result_type": "success",
+ "result_code": 0,
+ "response_time": nil,
+ "content_length": nil,
}
expectedTags := map[string]interface{}{
"server": nil,
@@ -216,6 +491,7 @@ func TestRedirects(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/redirect",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -234,6 +510,7 @@ func TestRedirects(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
}
expectedTags := map[string]interface{}{
"server": nil,
@@ -245,6 +522,7 @@ func TestRedirects(t *testing.T) {
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
h = &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/badredirect",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -281,6 +559,7 @@ func TestMethod(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/mustbepostmethod",
Body: "{ 'test': 'data'}",
Method: "POST",
@@ -299,6 +578,7 @@ func TestMethod(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
}
expectedTags := map[string]interface{}{
"server": nil,
@@ -310,6 +590,7 @@ func TestMethod(t *testing.T) {
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
h = &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/mustbepostmethod",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -328,6 +609,7 @@ func TestMethod(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
}
expectedTags = map[string]interface{}{
"server": nil,
@@ -340,6 +622,7 @@ func TestMethod(t *testing.T) {
//check that lowercase methods work correctly
h = &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/mustbepostmethod",
Body: "{ 'test': 'data'}",
Method: "head",
@@ -358,6 +641,7 @@ func TestMethod(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
}
expectedTags = map[string]interface{}{
"server": nil,
@@ -375,6 +659,7 @@ func TestBody(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/musthaveabody",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -393,6 +678,7 @@ func TestBody(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
}
expectedTags := map[string]interface{}{
"server": nil,
@@ -404,6 +690,7 @@ func TestBody(t *testing.T) {
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
h = &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/musthaveabody",
Method: "GET",
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
@@ -437,6 +724,7 @@ func TestStringMatch(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/good",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -457,6 +745,7 @@ func TestStringMatch(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
}
expectedTags := map[string]interface{}{
"server": nil,
@@ -473,6 +762,7 @@ func TestStringMatchJson(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/jsonresponse",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -493,6 +783,7 @@ func TestStringMatchJson(t *testing.T) {
"result_type": "success",
"result_code": 0,
"response_time": nil,
+ "content_length": nil,
}
expectedTags := map[string]interface{}{
"server": nil,
@@ -509,6 +800,7 @@ func TestStringMatchFail(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/good",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -530,6 +822,7 @@ func TestStringMatchFail(t *testing.T) {
"result_type": "response_string_mismatch",
"result_code": 1,
"response_time": nil,
+ "content_length": nil,
}
expectedTags := map[string]interface{}{
"server": nil,
@@ -550,6 +843,7 @@ func TestTimeout(t *testing.T) {
defer ts.Close()
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/twosecondnap",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -572,18 +866,18 @@ func TestTimeout(t *testing.T) {
"method": "GET",
"result": "timeout",
}
- absentFields := []string{"http_response_code", "response_time", "response_string_match"}
+ absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match"}
absentTags := []string{"status_code"}
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
}
-func TestPluginErrors(t *testing.T) {
+func TestBadRegex(t *testing.T) {
mux := setUpTestMux()
ts := httptest.NewServer(mux)
defer ts.Close()
- // Bad regex test. Should return an error and return nothing
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: ts.URL + "/good",
Body: "{ 'test': 'data'}",
Method: "GET",
@@ -599,43 +893,15 @@ func TestPluginErrors(t *testing.T) {
err := h.Gather(&acc)
require.Error(t, err)
- absentFields := []string{"http_response_code", "response_time", "response_string_match", "result_type", "result_code"}
+ absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match", "result_type", "result_code"}
absentTags := []string{"status_code", "result", "server", "method"}
checkOutput(t, &acc, nil, nil, absentFields, absentTags)
-
- // Attempt to read empty body test
- h = &HTTPResponse{
- Address: ts.URL + "/redirect",
- Body: "",
- Method: "GET",
- ResponseStringMatch: ".*",
- ResponseTimeout: internal.Duration{Duration: time.Second * 20},
- FollowRedirects: false,
- }
-
- acc = testutil.Accumulator{}
- err = h.Gather(&acc)
- require.NoError(t, err)
-
- expectedFields := map[string]interface{}{
- "http_response_code": http.StatusMovedPermanently,
- "response_string_match": 0,
- "result_type": "body_read_error",
- "result_code": 2,
- "response_time": nil,
- }
- expectedTags := map[string]interface{}{
- "server": nil,
- "method": "GET",
- "status_code": "301",
- "result": "body_read_error",
- }
- checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
}
func TestNetworkErrors(t *testing.T) {
// DNS error
h := &HTTPResponse{
+ Log: testutil.Logger{},
Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here
Body: "",
Method: "GET",
@@ -656,12 +922,13 @@ func TestNetworkErrors(t *testing.T) {
"method": "GET",
"result": "dns_error",
}
- absentFields := []string{"http_response_code", "response_time", "response_string_match"}
+ absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match"}
absentTags := []string{"status_code"}
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
- // Connecton failed
+ // Connection failed
h = &HTTPResponse{
+ Log: testutil.Logger{},
Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here
Body: "",
Method: "GET",
@@ -682,7 +949,164 @@ func TestNetworkErrors(t *testing.T) {
"method": "GET",
"result": "connection_failed",
}
- absentFields = []string{"http_response_code", "response_time", "response_string_match"}
+ absentFields = []string{"http_response_code", "response_time", "content_length", "response_string_match"}
absentTags = []string{"status_code"}
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
}
+
+func TestContentLength(t *testing.T) {
+ mux := setUpTestMux()
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+
+ h := &HTTPResponse{
+ Log: testutil.Logger{},
+ URLs: []string{ts.URL + "/good"},
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ FollowRedirects: true,
+ }
+ var acc testutil.Accumulator
+ err := h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields := map[string]interface{}{
+ "http_response_code": http.StatusOK,
+ "result_type": "success",
+ "result_code": 0,
+ "response_time": nil,
+ "content_length": len([]byte("hit the good page!")),
+ }
+ expectedTags := map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "status_code": "200",
+ "result": "success",
+ }
+ absentFields := []string{"response_string_match"}
+ checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
+
+ h = &HTTPResponse{
+ Log: testutil.Logger{},
+ URLs: []string{ts.URL + "/musthaveabody"},
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ FollowRedirects: true,
+ }
+ acc = testutil.Accumulator{}
+ err = h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields = map[string]interface{}{
+ "http_response_code": http.StatusOK,
+ "result_type": "success",
+ "result_code": 0,
+ "response_time": nil,
+ "content_length": len([]byte("sent a body!")),
+ }
+ expectedTags = map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "status_code": "200",
+ "result": "success",
+ }
+ absentFields = []string{"response_string_match"}
+ checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
+}
+
+func TestRedirect(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add("Location", "http://example.org")
+ w.WriteHeader(http.StatusMovedPermanently)
+ w.Write([]byte("test"))
+ })
+
+ plugin := &HTTPResponse{
+ URLs: []string{ts.URL},
+ ResponseStringMatch: "test",
+ }
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "http_response",
+ map[string]string{
+ "server": ts.URL,
+ "method": "GET",
+ "result": "success",
+ "status_code": "301",
+ },
+ map[string]interface{}{
+ "result_code": 0,
+ "result_type": "success",
+ "http_response_code": 301,
+ "response_string_match": 1,
+ "content_length": 4,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ actual := acc.GetTelegrafMetrics()
+ for _, m := range actual {
+ m.RemoveField("response_time")
+ }
+
+ testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
+}
+
+func TestBasicAuth(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ aHeader := r.Header.Get("Authorization")
+ assert.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader)
+ w.WriteHeader(http.StatusOK)
+ }))
+ defer ts.Close()
+
+ h := &HTTPResponse{
+ Log: testutil.Logger{},
+ Address: ts.URL + "/good",
+ Body: "{ 'test': 'data'}",
+ Method: "GET",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 20},
+ Username: "me",
+ Password: "mypassword",
+ Headers: map[string]string{
+ "Content-Type": "application/json",
+ },
+ }
+
+ var acc testutil.Accumulator
+ err := h.Gather(&acc)
+ require.NoError(t, err)
+
+ expectedFields := map[string]interface{}{
+ "http_response_code": http.StatusOK,
+ "result_type": "success",
+ "result_code": 0,
+ "response_time": nil,
+ "content_length": nil,
+ }
+ expectedTags := map[string]interface{}{
+ "server": nil,
+ "method": "GET",
+ "status_code": "200",
+ "result": "success",
+ }
+ absentFields := []string{"response_string_match"}
+ checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
+}
diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go
index e09eafc94c16a..a5f5e47aad68e 100644
--- a/plugins/inputs/httpjson/httpjson.go
+++ b/plugins/inputs/httpjson/httpjson.go
@@ -12,7 +12,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -42,7 +42,7 @@ type HTTPClient interface {
// req: HTTP request object
//
// Returns:
- // http.Response: HTTP respons object
+ // http.Response: HTTP response object
// error : Any error that may have occurred
MakeRequest(req *http.Request) (*http.Response, error)
diff --git a/plugins/inputs/icinga2/README.md b/plugins/inputs/icinga2/README.md
index 697c6c59cdf9a..fb36d36f3730f 100644
--- a/plugins/inputs/icinga2/README.md
+++ b/plugins/inputs/icinga2/README.md
@@ -11,10 +11,10 @@ services and hosts. You can read Icinga2's documentation for their remote API
```toml
# Description
[[inputs.icinga2]]
- ## Required Icinga2 server address (default: "https://localhost:5665")
+ ## Required Icinga2 server address
# server = "https://localhost:5665"
- ## Required Icinga2 object type ("services" or "hosts, default "services")
+ ## Required Icinga2 object type ("services" or "hosts")
# object_type = "services"
## Credentials for basic HTTP authentication
@@ -41,16 +41,17 @@ services and hosts. You can read Icinga2's documentation for their remote API
### Tags:
- All measurements have the following tags:
- - check_command
- - display_name
- - state
- - source
- - port
- - scheme
+ - check_command - The short name of the check command
+ - display_name - The name of the service or host
+ - state - The state: UP/DOWN for hosts, OK/WARNING/CRITICAL/UNKNOWN for services
+ - source - The icinga2 host
+ - port - The icinga2 port
+ - scheme - The icinga2 protocol (http/https)
+ - server - The server the check_command is running for
### Sample Queries:
-```
+```sql
SELECT * FROM "icinga2_services" WHERE state_code = 0 AND time > now() - 24h // Service with OK status
SELECT * FROM "icinga2_services" WHERE state_code = 1 AND time > now() - 24h // Service with WARNING status
SELECT * FROM "icinga2_services" WHERE state_code = 2 AND time > now() - 24h // Service with CRITICAL status
diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go
index 37590ab8be70d..5ec0bb43db319 100644
--- a/plugins/inputs/icinga2/icinga2.go
+++ b/plugins/inputs/icinga2/icinga2.go
@@ -3,14 +3,13 @@ package icinga2
import (
"encoding/json"
"fmt"
- "log"
"net/http"
"net/url"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -22,6 +21,8 @@ type Icinga2 struct {
ResponseTimeout internal.Duration
tls.ClientConfig
+ Log telegraf.Logger
+
client *http.Client
}
@@ -38,10 +39,11 @@ type Object struct {
}
type Attribute struct {
- CheckCommand string `json:"check_command"`
- DisplayName string `json:"display_name"`
- Name string `json:"name"`
- State int `json:"state"`
+ CheckCommand string `json:"check_command"`
+ DisplayName string `json:"display_name"`
+ Name string `json:"name"`
+ State float64 `json:"state"`
+ HostName string `json:"host_name"`
}
var levels = []string{"ok", "warning", "critical", "unknown"}
@@ -49,10 +51,10 @@ var levels = []string{"ok", "warning", "critical", "unknown"}
type ObjectType string
var sampleConfig = `
- ## Required Icinga2 server address (default: "https://localhost:5665")
+ ## Required Icinga2 server address
# server = "https://localhost:5665"
- ## Required Icinga2 object type ("services" or "hosts, default "services")
+ ## Required Icinga2 object type ("services" or "hosts")
# object_type = "services"
## Credentials for basic HTTP authentication
@@ -80,23 +82,34 @@ func (i *Icinga2) SampleConfig() string {
func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) {
for _, check := range checks {
- fields := make(map[string]interface{})
- tags := make(map[string]string)
-
url, err := url.Parse(i.Server)
if err != nil {
- log.Fatal(err)
+ i.Log.Error(err.Error())
+ continue
+ }
+
+ state := int64(check.Attrs.State)
+
+ fields := map[string]interface{}{
+ "name": check.Attrs.Name,
+ "state_code": state,
}
- fields["name"] = check.Attrs.Name
- fields["state_code"] = check.Attrs.State
+ // source is dependent on 'services' or 'hosts' check
+ source := check.Attrs.Name
+ if i.ObjectType == "services" {
+ source = check.Attrs.HostName
+ }
- tags["display_name"] = check.Attrs.DisplayName
- tags["check_command"] = check.Attrs.CheckCommand
- tags["state"] = levels[check.Attrs.State]
- tags["source"] = url.Hostname()
- tags["scheme"] = url.Scheme
- tags["port"] = url.Port()
+ tags := map[string]string{
+ "display_name": check.Attrs.DisplayName,
+ "check_command": check.Attrs.CheckCommand,
+ "source": source,
+ "state": levels[state],
+ "server": url.Hostname(),
+ "scheme": url.Scheme,
+ "port": url.Port(),
+ }
acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags)
}
@@ -131,7 +144,15 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error {
i.client = client
}
- url := fmt.Sprintf("%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command", i.Server, i.ObjectType)
+ requestUrl := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command"
+
+ // Note: attrs=host_name is only valid for 'services' requests, using check.Attrs.HostName for the host
+ // 'hosts' requests will need to use attrs=name only, using check.Attrs.Name for the host
+ if i.ObjectType == "services" {
+ requestUrl += "&attrs=host_name"
+ }
+
+ url := fmt.Sprintf(requestUrl, i.Server, i.ObjectType)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
@@ -163,8 +184,9 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error {
func init() {
inputs.Add("icinga2", func() telegraf.Input {
return &Icinga2{
- Server: "https://localhost:5665",
- ObjectType: "services",
+ Server: "https://localhost:5665",
+ ObjectType: "services",
+ ResponseTimeout: internal.Duration{Duration: time.Second * 5},
}
})
}
diff --git a/plugins/inputs/icinga2/icinga2_test.go b/plugins/inputs/icinga2/icinga2_test.go
index ad9268347de55..13055ed8c2d16 100644
--- a/plugins/inputs/icinga2/icinga2_test.go
+++ b/plugins/inputs/icinga2/icinga2_test.go
@@ -3,89 +3,117 @@ package icinga2
import (
"encoding/json"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestGatherServicesStatus(t *testing.T) {
-
- s := `{"results":[
- {
- "attrs": {
- "check_command": "check-bgp-juniper-netconf",
- "display_name": "eq-par.dc2.fr",
- "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe",
- "state": 0
- },
- "joins": {},
- "meta": {},
- "name": "eq-par.dc2.fr!ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe",
- "type": "Service"
- }
- ]}`
+ s := `{
+ "results": [
+ {
+ "attrs": {
+ "check_command": "check-bgp-juniper-netconf",
+ "display_name": "eq-par.dc2.fr",
+ "host_name": "someserverfqdn.net",
+ "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe",
+ "state": 0
+ },
+ "joins": {},
+ "meta": {},
+ "name": "eq-par.dc2.fr!ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe",
+ "type": "Service"
+ }
+ ]
+}
+`
checks := Result{}
json.Unmarshal([]byte(s), &checks)
- fields := map[string]interface{}{
- "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe",
- "state_code": 0,
- }
- tags := map[string]string{
- "display_name": "eq-par.dc2.fr",
- "check_command": "check-bgp-juniper-netconf",
- "state": "ok",
- "source": "localhost",
- "port": "5665",
- "scheme": "https",
- }
-
- var acc testutil.Accumulator
icinga2 := new(Icinga2)
+ icinga2.Log = testutil.Logger{}
icinga2.ObjectType = "services"
icinga2.Server = "https://localhost:5665"
+
+ var acc testutil.Accumulator
icinga2.GatherStatus(&acc, checks.Results)
- acc.AssertContainsTaggedFields(t, "icinga2_services", fields, tags)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "icinga2_services",
+ map[string]string{
+ "display_name": "eq-par.dc2.fr",
+ "check_command": "check-bgp-juniper-netconf",
+ "state": "ok",
+ "source": "someserverfqdn.net",
+ "server": "localhost",
+ "port": "5665",
+ "scheme": "https",
+ },
+ map[string]interface{}{
+ "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe",
+ "state_code": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestGatherHostsStatus(t *testing.T) {
-
- s := `{"results":[
- {
- "attrs": {
- "name": "webserver",
- "address": "192.168.1.1",
- "check_command": "ping",
- "display_name": "apache",
- "state": 2
- },
- "joins": {},
- "meta": {},
- "name": "webserver",
- "type": "Host"
- }
- ]}`
+ s := `{
+ "results": [
+ {
+ "attrs": {
+ "address": "192.168.1.1",
+ "check_command": "ping",
+ "display_name": "apache",
+ "name": "webserver",
+ "state": 2.0
+ },
+ "joins": {},
+ "meta": {},
+ "name": "webserver",
+ "type": "Host"
+ }
+ ]
+}
+`
checks := Result{}
json.Unmarshal([]byte(s), &checks)
- fields := map[string]interface{}{
- "name": "webserver",
- "state_code": 2,
- }
- tags := map[string]string{
- "display_name": "apache",
- "check_command": "ping",
- "state": "critical",
- "source": "localhost",
- "port": "5665",
- "scheme": "https",
- }
var acc testutil.Accumulator
icinga2 := new(Icinga2)
+ icinga2.Log = testutil.Logger{}
icinga2.ObjectType = "hosts"
icinga2.Server = "https://localhost:5665"
+
icinga2.GatherStatus(&acc, checks.Results)
- acc.AssertContainsTaggedFields(t, "icinga2_hosts", fields, tags)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "icinga2_hosts",
+ map[string]string{
+ "display_name": "apache",
+ "check_command": "ping",
+ "state": "critical",
+ "source": "webserver",
+ "server": "localhost",
+ "port": "5665",
+ "scheme": "https",
+ },
+ map[string]interface{}{
+ "name": "webserver",
+ "state_code": 2,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
diff --git a/plugins/inputs/infiniband/README.md b/plugins/inputs/infiniband/README.md
new file mode 100644
index 0000000000000..bc5b03543c375
--- /dev/null
+++ b/plugins/inputs/infiniband/README.md
@@ -0,0 +1,58 @@
+# InfiniBand Input Plugin
+
+This plugin gathers statistics for all InfiniBand devices and ports on the
+system. These are the counters that can be found in
+`/sys/class/infiniband//port//counters/`
+
+**Supported Platforms**: Linux
+
+### Configuration
+
+```toml
+[[inputs.infiniband]]
+ # no configuration
+```
+
+### Metrics
+
+Actual metrics depend on the InfiniBand devices, the plugin uses a simple
+mapping from counter -> counter value.
+
+[Information about the counters][counters] collected is provided by Mellanox.
+
+[counters]: https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters
+
+- infiniband
+ - tags:
+ - device
+ - port
+ - fields:
+ - excessive_buffer_overrun_errors (integer)
+ - link_downed (integer)
+ - link_error_recovery (integer)
+ - local_link_integrity_errors (integer)
+ - multicast_rcv_packets (integer)
+ - multicast_xmit_packets (integer)
+ - port_rcv_constraint_errors (integer)
+ - port_rcv_data (integer)
+ - port_rcv_errors (integer)
+ - port_rcv_packets (integer)
+ - port_rcv_remote_physical_errors (integer)
+ - port_rcv_switch_relay_errors (integer)
+ - port_xmit_constraint_errors (integer)
+ - port_xmit_data (integer)
+ - port_xmit_discards (integer)
+ - port_xmit_packets (integer)
+ - port_xmit_wait (integer)
+ - symbol_error (integer)
+ - unicast_rcv_packets (integer)
+ - unicast_xmit_packets (integer)
+ - VL15_dropped (integer)
+
+
+
+### Example Output
+
+```
+infiniband,device=mlx5_0,port=1 VL15_dropped=0i,excessive_buffer_overrun_errors=0i,link_downed=0i,link_error_recovery=0i,local_link_integrity_errors=0i,multicast_rcv_packets=0i,multicast_xmit_packets=0i,port_rcv_constraint_errors=0i,port_rcv_data=237159415345822i,port_rcv_errors=0i,port_rcv_packets=801977655075i,port_rcv_remote_physical_errors=0i,port_rcv_switch_relay_errors=0i,port_xmit_constraint_errors=0i,port_xmit_data=238334949937759i,port_xmit_discards=0i,port_xmit_packets=803162651391i,port_xmit_wait=4294967295i,symbol_error=0i,unicast_rcv_packets=801977655075i,unicast_xmit_packets=803162651391i 1573125558000000000
+```
diff --git a/plugins/inputs/infiniband/infiniband.go b/plugins/inputs/infiniband/infiniband.go
new file mode 100644
index 0000000000000..65e1d6c712998
--- /dev/null
+++ b/plugins/inputs/infiniband/infiniband.go
@@ -0,0 +1,22 @@
+package infiniband
+
+import (
+ "github.com/influxdata/telegraf"
+)
+
+// Stores the configuration values for the infiniband plugin - as there are no
+// config values, this is intentionally empty
+type Infiniband struct {
+ Log telegraf.Logger `toml:"-"`
+}
+
+// Sample configuration for plugin
+var InfinibandConfig = ``
+
+func (_ *Infiniband) SampleConfig() string {
+ return InfinibandConfig
+}
+
+func (_ *Infiniband) Description() string {
+ return "Gets counters from all InfiniBand cards and ports installed"
+}
diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go
new file mode 100644
index 0000000000000..48cd8a428900d
--- /dev/null
+++ b/plugins/inputs/infiniband/infiniband_linux.go
@@ -0,0 +1,59 @@
+// +build linux
+
+package infiniband
+
+import (
+ "fmt"
+ "github.com/Mellanox/rdmamap"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "strconv"
+)
+
+// Gather statistics from our infiniband cards
+func (_ *Infiniband) Gather(acc telegraf.Accumulator) error {
+
+ rdmaDevices := rdmamap.GetRdmaDeviceList()
+
+ if len(rdmaDevices) == 0 {
+ return fmt.Errorf("no InfiniBand devices found in /sys/class/infiniband/")
+ }
+
+ for _, dev := range rdmaDevices {
+ devicePorts := rdmamap.GetPorts(dev)
+ for _, port := range devicePorts {
+ portInt, err := strconv.Atoi(port)
+ if err != nil {
+ return err
+ }
+
+ stats, err := rdmamap.GetRdmaSysfsStats(dev, portInt)
+ if err != nil {
+ return err
+ }
+
+ addStats(dev, port, stats, acc)
+ }
+ }
+
+ return nil
+}
+
+// Add the statistics to the accumulator
+func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) {
+
+ // Allow users to filter by card and port
+ tags := map[string]string{"device": dev, "port": port}
+ fields := make(map[string]interface{})
+
+ for _, entry := range stats {
+ fields[entry.Name] = entry.Value
+ }
+
+ acc.AddFields("infiniband", fields, tags)
+}
+
+// Initialise plugin
+func init() {
+ inputs.Add("infiniband", func() telegraf.Input { return &Infiniband{} })
+}
diff --git a/plugins/inputs/infiniband/infiniband_notlinux.go b/plugins/inputs/infiniband/infiniband_notlinux.go
new file mode 100644
index 0000000000000..5b19672d975d8
--- /dev/null
+++ b/plugins/inputs/infiniband/infiniband_notlinux.go
@@ -0,0 +1,23 @@
+// +build !linux
+
+package infiniband
+
+import (
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+func (i *Infiniband) Init() error {
+ i.Log.Warn("Current platform is not supported")
+ return nil
+}
+
+func (_ *Infiniband) Gather(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func init() {
+ inputs.Add("infiniband", func() telegraf.Input {
+ return &Infiniband{}
+ })
+}
diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go
new file mode 100644
index 0000000000000..6c4bb24587f4a
--- /dev/null
+++ b/plugins/inputs/infiniband/infiniband_test.go
@@ -0,0 +1,134 @@
+// +build linux
+
+package infiniband
+
+import (
+ "github.com/Mellanox/rdmamap"
+ "github.com/influxdata/telegraf/testutil"
+ "testing"
+)
+
+func TestInfiniband(t *testing.T) {
+ fields := map[string]interface{}{
+ "excessive_buffer_overrun_errors": uint64(0),
+ "link_downed": uint64(0),
+ "link_error_recovery": uint64(0),
+ "local_link_integrity_errors": uint64(0),
+ "multicast_rcv_packets": uint64(0),
+ "multicast_xmit_packets": uint64(0),
+ "port_rcv_constraint_errors": uint64(0),
+ "port_rcv_data": uint64(237159415345822),
+ "port_rcv_errors": uint64(0),
+ "port_rcv_packets": uint64(801977655075),
+ "port_rcv_remote_physical_errors": uint64(0),
+ "port_rcv_switch_relay_errors": uint64(0),
+ "port_xmit_constraint_errors": uint64(0),
+ "port_xmit_data": uint64(238334949937759),
+ "port_xmit_discards": uint64(0),
+ "port_xmit_packets": uint64(803162651391),
+ "port_xmit_wait": uint64(4294967295),
+ "symbol_error": uint64(0),
+ "unicast_rcv_packets": uint64(801977655075),
+ "unicast_xmit_packets": uint64(803162651391),
+ "VL15_dropped": uint64(0),
+ }
+
+ tags := map[string]string{
+ "device": "m1x5_0",
+ "port": "1",
+ }
+
+ sample_rdmastats_entries := []rdmamap.RdmaStatEntry{
+ {
+ Name: "excessive_buffer_overrun_errors",
+ Value: uint64(0),
+ },
+ {
+ Name: "link_downed",
+ Value: uint64(0),
+ },
+ {
+ Name: "link_error_recovery",
+ Value: uint64(0),
+ },
+ {
+ Name: "local_link_integrity_errors",
+ Value: uint64(0),
+ },
+ {
+ Name: "multicast_rcv_packets",
+ Value: uint64(0),
+ },
+ {
+ Name: "multicast_xmit_packets",
+ Value: uint64(0),
+ },
+ {
+ Name: "port_rcv_constraint_errors",
+ Value: uint64(0),
+ },
+ {
+ Name: "port_rcv_data",
+ Value: uint64(237159415345822),
+ },
+ {
+ Name: "port_rcv_errors",
+ Value: uint64(0),
+ },
+ {
+ Name: "port_rcv_packets",
+ Value: uint64(801977655075),
+ },
+ {
+ Name: "port_rcv_remote_physical_errors",
+ Value: uint64(0),
+ },
+ {
+ Name: "port_rcv_switch_relay_errors",
+ Value: uint64(0),
+ },
+ {
+ Name: "port_xmit_constraint_errors",
+ Value: uint64(0),
+ },
+ {
+ Name: "port_xmit_data",
+ Value: uint64(238334949937759),
+ },
+ {
+ Name: "port_xmit_discards",
+ Value: uint64(0),
+ },
+ {
+ Name: "port_xmit_packets",
+ Value: uint64(803162651391),
+ },
+ {
+ Name: "port_xmit_wait",
+ Value: uint64(4294967295),
+ },
+ {
+ Name: "symbol_error",
+ Value: uint64(0),
+ },
+ {
+ Name: "unicast_rcv_packets",
+ Value: uint64(801977655075),
+ },
+ {
+ Name: "unicast_xmit_packets",
+ Value: uint64(803162651391),
+ },
+ {
+ Name: "VL15_dropped",
+ Value: uint64(0),
+ },
+ }
+
+ var acc testutil.Accumulator
+
+ addStats("m1x5_0", "1", sample_rdmastats_entries, &acc)
+
+ acc.AssertContainsTaggedFields(t, "infiniband", fields, tags)
+
+}
diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md
index 2bab123f81c0e..e17bd7072438b 100644
--- a/plugins/inputs/influxdb/README.md
+++ b/plugins/inputs/influxdb/README.md
@@ -1,4 +1,4 @@
-# influxdb plugin
+# InfluxDB Input Plugin
The InfluxDB plugin will collect metrics on the given InfluxDB servers.
@@ -20,6 +20,10 @@ InfluxDB-formatted endpoints. See below for more information.
"http://localhost:8086/debug/vars"
]
+ ## Username and password to send using HTTP Basic Authentication.
+ # username = ""
+ # password = ""
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
@@ -33,46 +37,45 @@ InfluxDB-formatted endpoints. See below for more information.
### Measurements & Fields
-**Note:** The measurements and fields are dynamically built from the InfluxDB source,
-and may vary between versions.
+**Note:** The measurements and fields included in this plugin are dynamically built from the InfluxDB source, and may vary between versions:
- influxdb
- - n_shards
-- influxdb_database
-- influxdb_httpd
-- influxdb_measurement
-- influxdb_memstats
- - heap_inuse
- - heap_released
- - mspan_inuse
- - total_alloc
- - sys
- - mallocs
- - frees
- - heap_idle
- - pause_total_ns
- - lookups
- - heap_sys
- - mcache_sys
- - next_gc
- - gcc_pu_fraction
- - other_sys
- - alloc
- - stack_inuse
- - stack_sys
- - buck_hash_sys
- - gc_sys
- - num_gc
- - heap_alloc
- - heap_objects
- - mspan_sys
- - mcache_inuse
- - last_gc
-- influxdb_shard
-- influxdb_subscriber
-- influxdb_tsm1_cache
-- influxdb_tsm1_wal
-- influxdb_write
+ - n_shards: The total number of shards in the specified database.
+- influxdb_database: The database metrics are being collected from.
+- influxdb_httpd: The URL to listen for network requests. By default, `http://localhost:8086/debug/var`.
+- influxdb_measurement: The measurement that metrics are collected from.
+- influxdb_memstats: Statistics about the memory allocator in the specified database.
+ - heap_inuse: The number of bytes in in-use spans.
+ - heap_released: The number of bytes of physical memory returned to the OS.
+ - mspan_inuse: The number of bytes in in-use mspans.
+ - total_alloc: The cumulative bytes allocated for heap objects.
+ - sys: The total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures.
+ - mallocs: The total number of heap objects allocated. (The total number of live objects are frees.)
+ - frees: The cumulative number of freed (live) heap objects.
+ - heap_idle: The number of bytes of idle heap objects.
+ - pause_total_ns: The total time garbage collection cycles are paused in nanoseconds.
+ - lookups: The number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals.
+ - heap_sys: The number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap.
+ - mcache_sys: The bytes of memory obtained from the OS for mcache structures.
+ - next_gc: The target heap size of the next garbage collection cycle.
+ - gc_cpu_fraction: The fraction of CPU time used by the garbage collection cycle.
+ - other_sys: The number of bytes of memory used other than heap_sys, stacks_sys, mspan_sys, mcache_sys, buckhash_sys, and gc_sys.
+ - alloc: The currently allocated number of bytes of heap objects.
+ - stack_inuse: The number of bytes in in-use stacks.
+ - stack_sys: The total number of bytes of memory obtained from the stack in use.
+ - buck_hash_sys: The bytes of memory in profiling bucket hash tables.
+ - gc_sys: The bytes of memory in garbage collection metadata.
+ - num_gc: The number of completed garbage collection cycles.
+ - heap_alloc: The size, in bytes, of all heap objects.
+ - heap_objects: The number of allocated heap objects.
+ - mspan_sys: The bytes of memory obtained from the OS for mspan.
+ - mcache_inuse: The bytes of allocated mcache structures.
+ - last_gc: Time the last garbage collection finished, as nanoseconds since 1970 (the UNIX epoch).
+- influxdb_shard: The shard metrics are collected from.
+- influxdb_subscriber: The InfluxDB subscription that metrics are collected from.
+- influxdb_tsm1_cache: The TSM cache that metrics are collected from.
+- influxdb_tsm1_wal: The TSM Write Ahead Log (WAL) that metrics are collected from.
+- influxdb_write: The total writes to the specified database.
### Example Output:
@@ -91,7 +94,7 @@ telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test
> influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_filestore,url=http://localhost:8086/debug/vars numSeries=2 1463590500247354636
> influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_wal,url=http://localhost:8086/debug/vars numSeries=4 1463590500247354636
> influxdb_measurement,database=_internal,host=tyrion,measurement=write,url=http://localhost:8086/debug/vars numSeries=1 1463590500247354636
-> influxdb_memstats,host=tyrion,url=http://localhost:8086/debug/vars alloc=7642384i,buck_hash_sys=1463471i,frees=1169558i,gc_sys=653312i,gcc_pu_fraction=0.00003825652361068311,heap_alloc=7642384i,heap_idle=9912320i,heap_inuse=9125888i,heap_objects=48276i,heap_released=0i,heap_sys=19038208i,last_gc=1463590480877651621i,lookups=90i,mallocs=1217834i,mcache_inuse=4800i,mcache_sys=16384i,mspan_inuse=70920i,mspan_sys=81920i,next_gc=11679787i,num_gc=141i,other_sys=1244233i,pause_total_ns=24034027i,stack_inuse=884736i,stack_sys=884736i,sys=23382264i,total_alloc=679012200i 1463590500277918755
+> influxdb_memstats,host=tyrion,url=http://localhost:8086/debug/vars alloc=7642384i,buck_hash_sys=1463471i,frees=1169558i,gc_sys=653312i,gc_cpu_fraction=0.00003825652361068311,heap_alloc=7642384i,heap_idle=9912320i,heap_inuse=9125888i,heap_objects=48276i,heap_released=0i,heap_sys=19038208i,last_gc=1463590480877651621i,lookups=90i,mallocs=1217834i,mcache_inuse=4800i,mcache_sys=16384i,mspan_inuse=70920i,mspan_sys=81920i,next_gc=11679787i,num_gc=141i,other_sys=1244233i,pause_total_ns=24034027i,stack_inuse=884736i,stack_sys=884736i,sys=23382264i,total_alloc=679012200i 1463590500277918755
> influxdb_shard,database=_internal,engine=tsm1,host=tyrion,id=4,path=/Users/sparrc/.influxdb/data/_internal/monitor/4,retentionPolicy=monitor,url=http://localhost:8086/debug/vars fieldsCreate=65,seriesCreate=26,writePointsOk=7274,writeReq=280 1463590500247354636
> influxdb_subscriber,host=tyrion,url=http://localhost:8086/debug/vars pointsWritten=7274 1463590500247354636
> influxdb_tsm1_cache,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/1,retentionPolicy=monitor,url=http://localhost:8086/debug/vars WALCompactionTimeMs=0,cacheAgeMs=2809192,cachedBytes=0,diskBytes=0,memBytes=0,snapshotCount=0 1463590500247354636
diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go
index 0bb3ead5ee642..d7eb66153034a 100644
--- a/plugins/inputs/influxdb/influxdb.go
+++ b/plugins/inputs/influxdb/influxdb.go
@@ -1,22 +1,42 @@
package influxdb
import (
+ "bytes"
"encoding/json"
"errors"
- "fmt"
+ "io"
"net/http"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
+const (
+ maxErrorResponseBodyLength = 1024
+)
+
+type APIError struct {
+ StatusCode int
+ Reason string
+ Description string `json:"error"`
+}
+
+func (e *APIError) Error() string {
+ if e.Description != "" {
+ return e.Reason + ": " + e.Description
+ }
+ return e.Reason
+}
+
type InfluxDB struct {
- URLs []string `toml:"urls"`
- Timeout internal.Duration
+ URLs []string `toml:"urls"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ Timeout internal.Duration `toml:"timeout"`
tls.ClientConfig
client *http.Client
@@ -38,6 +58,10 @@ func (*InfluxDB) SampleConfig() string {
"http://localhost:8086/debug/vars"
]
+ ## Username and password to send using HTTP Basic Authentication.
+ # username = ""
+ # password = ""
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
@@ -75,7 +99,7 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
go func(url string) {
defer wg.Done()
if err := i.gatherURL(acc, url); err != nil {
- acc.AddError(fmt.Errorf("[url=%s]: %s", url, err))
+ acc.AddError(err)
}
}(u)
}
@@ -135,12 +159,27 @@ func (i *InfluxDB) gatherURL(
shardCounter := 0
now := time.Now()
- resp, err := i.client.Get(url)
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ if i.Username != "" || i.Password != "" {
+ req.SetBasicAuth(i.Username, i.Password)
+ }
+
+ req.Header.Set("User-Agent", "Telegraf/"+internal.Version())
+
+ resp, err := i.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return readResponseError(resp)
+ }
+
// It would be nice to be able to decode into a map[string]point, but
// we'll get a decoder error like:
// `json: cannot unmarshal array into Go value of type influxdb.point`
@@ -203,7 +242,7 @@ func (i *InfluxDB) gatherURL(
"pause_total_ns": m.PauseTotalNs,
"pause_ns": m.PauseNs[(m.NumGC+255)%256],
"num_gc": m.NumGC,
- "gcc_pu_fraction": m.GCCPUFraction,
+ "gc_cpu_fraction": m.GCCPUFraction,
},
map[string]string{
"url": url,
@@ -255,6 +294,27 @@ func (i *InfluxDB) gatherURL(
return nil
}
+func readResponseError(resp *http.Response) error {
+ apiError := &APIError{
+ StatusCode: resp.StatusCode,
+ Reason: resp.Status,
+ }
+
+ var buf bytes.Buffer
+ r := io.LimitReader(resp.Body, maxErrorResponseBodyLength)
+ _, err := buf.ReadFrom(r)
+ if err != nil {
+ return apiError
+ }
+
+ err = json.Unmarshal(buf.Bytes(), apiError)
+ if err != nil {
+ return apiError
+ }
+
+ return apiError
+}
+
func init() {
inputs.Add("influxdb", func() telegraf.Input {
return &InfluxDB{
diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go
index f24ecc24c11cf..27ea81b6d7dd6 100644
--- a/plugins/inputs/influxdb/influxdb_test.go
+++ b/plugins/inputs/influxdb/influxdb_test.go
@@ -1,6 +1,7 @@
package influxdb_test
import (
+ "fmt"
"net/http"
"net/http/httptest"
"testing"
@@ -91,7 +92,7 @@ func TestInfluxDB(t *testing.T) {
"heap_sys": int64(33849344),
"mcache_sys": int64(16384),
"next_gc": int64(20843042),
- "gcc_pu_fraction": float64(4.287178819113636e-05),
+ "gc_cpu_fraction": float64(4.287178819113636e-05),
"other_sys": int64(1229737),
"alloc": int64(17034016),
"stack_inuse": int64(753664),
@@ -178,6 +179,31 @@ func TestErrorHandling404(t *testing.T) {
require.Error(t, acc.GatherError(plugin.Gather))
}
+func TestErrorResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusUnauthorized)
+ w.Write([]byte(`{"error": "unable to parse authentication credentials"}`))
+ }))
+ defer ts.Close()
+
+ plugin := &influxdb.InfluxDB{
+ URLs: []string{ts.URL},
+ }
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []error{
+ &influxdb.APIError{
+ StatusCode: http.StatusUnauthorized,
+ Reason: fmt.Sprintf("%d %s", http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized)),
+ Description: "unable to parse authentication credentials",
+ },
+ }
+ require.Equal(t, expected, acc.Errors)
+}
+
const basicJSON = `
{
"_1": {
diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md
index 8b6d2ad51c538..aae77fb965f7a 100644
--- a/plugins/inputs/influxdb_listener/README.md
+++ b/plugins/inputs/influxdb_listener/README.md
@@ -30,13 +30,13 @@ submits data to InfluxDB determines the destination database.
## maximum duration before timing out write of the response
write_timeout = "10s"
- ## Maximum allowed http request body size in bytes.
- ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
+ ## Maximum allowed HTTP request body size in bytes.
+ ## 0 means to use the default of 32MiB.
max_body_size = 0
## Maximum line size allowed to be sent in bytes.
- ## 0 means to use the default of 65536 bytes (64 kibibytes)
- max_line_size = 0
+ ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored
+ # max_line_size = 0
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
@@ -46,6 +46,18 @@ submits data to InfluxDB determines the destination database.
tls_cert = "/etc/telegraf/cert.pem"
tls_key = "/etc/telegraf/key.pem"
+ ## Optional tag name used to store the database name.
+ ## If the write has a database in the query string then it will be kept in this tag name.
+ ## This tag can be used in downstream outputs.
+ ## The default value of nothing means it will be off and the database will not be recorded.
+ ## If you have a tag that is the same as the one specified below, and supply a database,
+ ## the tag will be overwritten with the database supplied.
+ # database_tag = ""
+
+ ## If set the retention policy specified in the write query will be added as
+ ## the value of this tag name.
+ # retention_policy_tag = ""
+
## Optional username and password to accept for HTTP basic authentication.
## You probably want to make sure you have TLS configured above for this.
# basic_username = "foobar"
diff --git a/plugins/inputs/influxdb_listener/bufferpool.go b/plugins/inputs/influxdb_listener/bufferpool.go
deleted file mode 100644
index 00a93652db2fb..0000000000000
--- a/plugins/inputs/influxdb_listener/bufferpool.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package http_listener
-
-import (
- "sync/atomic"
-)
-
-type pool struct {
- buffers chan []byte
- size int
-
- created int64
-}
-
-// NewPool returns a new pool object.
-// n is the number of buffers
-// bufSize is the size (in bytes) of each buffer
-func NewPool(n, bufSize int) *pool {
- return &pool{
- buffers: make(chan []byte, n),
- size: bufSize,
- }
-}
-
-func (p *pool) get() []byte {
- select {
- case b := <-p.buffers:
- return b
- default:
- atomic.AddInt64(&p.created, 1)
- return make([]byte, p.size)
- }
-}
-
-func (p *pool) put(b []byte) {
- select {
- case p.buffers <- b:
- default:
- }
-}
-
-func (p *pool) ncreated() int64 {
- return atomic.LoadInt64(&p.created)
-}
diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go
deleted file mode 100644
index 7e55447869018..0000000000000
--- a/plugins/inputs/influxdb_listener/http_listener.go
+++ /dev/null
@@ -1,447 +0,0 @@
-package http_listener
-
-import (
- "bytes"
- "compress/gzip"
- "crypto/subtle"
- "crypto/tls"
- "encoding/json"
- "fmt"
- "io"
- "log"
- "net"
- "net/http"
- "sync"
- "time"
-
- "github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal"
- tlsint "github.com/influxdata/telegraf/internal/tls"
- "github.com/influxdata/telegraf/plugins/inputs"
- "github.com/influxdata/telegraf/plugins/parsers/influx"
- "github.com/influxdata/telegraf/selfstat"
-)
-
-const (
- // DEFAULT_MAX_BODY_SIZE is the default maximum request body size, in bytes.
- // if the request body is over this size, we will return an HTTP 413 error.
- // 500 MB
- DEFAULT_MAX_BODY_SIZE = 500 * 1024 * 1024
-
- // MAX_LINE_SIZE is the maximum size, in bytes, that can be allocated for
- // a single InfluxDB point.
- // 64 KB
- DEFAULT_MAX_LINE_SIZE = 64 * 1024
-)
-
-type TimeFunc func() time.Time
-
-type HTTPListener struct {
- ServiceAddress string
- ReadTimeout internal.Duration
- WriteTimeout internal.Duration
- MaxBodySize internal.Size
- MaxLineSize internal.Size
- Port int
-
- tlsint.ServerConfig
-
- BasicUsername string
- BasicPassword string
-
- TimeFunc
-
- mu sync.Mutex
- wg sync.WaitGroup
-
- listener net.Listener
-
- handler *influx.MetricHandler
- parser *influx.Parser
- acc telegraf.Accumulator
- pool *pool
-
- BytesRecv selfstat.Stat
- RequestsServed selfstat.Stat
- WritesServed selfstat.Stat
- QueriesServed selfstat.Stat
- PingsServed selfstat.Stat
- RequestsRecv selfstat.Stat
- WritesRecv selfstat.Stat
- QueriesRecv selfstat.Stat
- PingsRecv selfstat.Stat
- NotFoundsServed selfstat.Stat
- BuffersCreated selfstat.Stat
- AuthFailures selfstat.Stat
-
- longLines selfstat.Stat
-}
-
-const sampleConfig = `
- ## Address and port to host HTTP listener on
- service_address = ":8186"
-
- ## maximum duration before timing out read of the request
- read_timeout = "10s"
- ## maximum duration before timing out write of the response
- write_timeout = "10s"
-
- ## Maximum allowed http request body size in bytes.
- ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
- max_body_size = "500MiB"
-
- ## Maximum line size allowed to be sent in bytes.
- ## 0 means to use the default of 65536 bytes (64 kibibytes)
- max_line_size = "64KiB"
-
- ## Set one or more allowed client CA certificate file names to
- ## enable mutually authenticated TLS connections
- tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
-
- ## Add service certificate and key
- tls_cert = "/etc/telegraf/cert.pem"
- tls_key = "/etc/telegraf/key.pem"
-
- ## Optional username and password to accept for HTTP basic authentication.
- ## You probably want to make sure you have TLS configured above for this.
- # basic_username = "foobar"
- # basic_password = "barfoo"
-`
-
-func (h *HTTPListener) SampleConfig() string {
- return sampleConfig
-}
-
-func (h *HTTPListener) Description() string {
- return "Influx HTTP write listener"
-}
-
-func (h *HTTPListener) Gather(_ telegraf.Accumulator) error {
- h.BuffersCreated.Set(h.pool.ncreated())
- return nil
-}
-
-// Start starts the http listener service.
-func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
- h.mu.Lock()
- defer h.mu.Unlock()
-
- tags := map[string]string{
- "address": h.ServiceAddress,
- }
- h.BytesRecv = selfstat.Register("http_listener", "bytes_received", tags)
- h.RequestsServed = selfstat.Register("http_listener", "requests_served", tags)
- h.WritesServed = selfstat.Register("http_listener", "writes_served", tags)
- h.QueriesServed = selfstat.Register("http_listener", "queries_served", tags)
- h.PingsServed = selfstat.Register("http_listener", "pings_served", tags)
- h.RequestsRecv = selfstat.Register("http_listener", "requests_received", tags)
- h.WritesRecv = selfstat.Register("http_listener", "writes_received", tags)
- h.QueriesRecv = selfstat.Register("http_listener", "queries_received", tags)
- h.PingsRecv = selfstat.Register("http_listener", "pings_received", tags)
- h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags)
- h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags)
- h.AuthFailures = selfstat.Register("http_listener", "auth_failures", tags)
- h.longLines = selfstat.Register("http_listener", "long_lines", tags)
-
- if h.MaxBodySize.Size == 0 {
- h.MaxBodySize.Size = DEFAULT_MAX_BODY_SIZE
- }
- if h.MaxLineSize.Size == 0 {
- h.MaxLineSize.Size = DEFAULT_MAX_LINE_SIZE
- }
-
- if h.ReadTimeout.Duration < time.Second {
- h.ReadTimeout.Duration = time.Second * 10
- }
- if h.WriteTimeout.Duration < time.Second {
- h.WriteTimeout.Duration = time.Second * 10
- }
-
- h.acc = acc
- h.pool = NewPool(200, int(h.MaxLineSize.Size))
-
- tlsConf, err := h.ServerConfig.TLSConfig()
- if err != nil {
- return err
- }
-
- server := &http.Server{
- Addr: h.ServiceAddress,
- Handler: h,
- ReadTimeout: h.ReadTimeout.Duration,
- WriteTimeout: h.WriteTimeout.Duration,
- TLSConfig: tlsConf,
- }
-
- var listener net.Listener
- if tlsConf != nil {
- listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
- } else {
- listener, err = net.Listen("tcp", h.ServiceAddress)
- }
- if err != nil {
- return err
- }
- h.listener = listener
- h.Port = listener.Addr().(*net.TCPAddr).Port
-
- h.handler = influx.NewMetricHandler()
- h.parser = influx.NewParser(h.handler)
-
- h.wg.Add(1)
- go func() {
- defer h.wg.Done()
- server.Serve(h.listener)
- }()
-
- log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress)
-
- return nil
-}
-
-// Stop cleans up all resources
-func (h *HTTPListener) Stop() {
- h.mu.Lock()
- defer h.mu.Unlock()
-
- h.listener.Close()
- h.wg.Wait()
-
- log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress)
-}
-
-func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
- h.RequestsRecv.Incr(1)
- defer h.RequestsServed.Incr(1)
- switch req.URL.Path {
- case "/write":
- h.WritesRecv.Incr(1)
- defer h.WritesServed.Incr(1)
- h.AuthenticateIfSet(h.serveWrite, res, req)
- case "/query":
- h.QueriesRecv.Incr(1)
- defer h.QueriesServed.Incr(1)
- // Deliver a dummy response to the query endpoint, as some InfluxDB
- // clients test endpoint availability with a query
- h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) {
- res.Header().Set("Content-Type", "application/json")
- res.Header().Set("X-Influxdb-Version", "1.0")
- res.WriteHeader(http.StatusOK)
- res.Write([]byte("{\"results\":[]}"))
- }, res, req)
- case "/ping":
- h.PingsRecv.Incr(1)
- defer h.PingsServed.Incr(1)
- verbose := req.URL.Query().Get("verbose")
-
- // respond to ping requests
- if verbose != "" && verbose != "0" && verbose != "false" {
- res.WriteHeader(http.StatusOK)
- b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above
- res.Write(b)
- } else {
- res.WriteHeader(http.StatusNoContent)
- }
- default:
- defer h.NotFoundsServed.Incr(1)
- // Don't know how to respond to calls to other endpoints
- h.AuthenticateIfSet(http.NotFound, res, req)
- }
-}
-
-func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
- // Check that the content length is not too large for us to handle.
- if req.ContentLength > h.MaxBodySize.Size {
- tooLarge(res)
- return
- }
- now := h.TimeFunc()
-
- precision := req.URL.Query().Get("precision")
-
- // Handle gzip request bodies
- body := req.Body
- if req.Header.Get("Content-Encoding") == "gzip" {
- var err error
- body, err = gzip.NewReader(req.Body)
- if err != nil {
- log.Println("D! " + err.Error())
- badRequest(res, err.Error())
- return
- }
- defer body.Close()
- }
- body = http.MaxBytesReader(res, body, h.MaxBodySize.Size)
-
- var return400 bool
- var hangingBytes bool
- buf := h.pool.get()
- defer h.pool.put(buf)
- bufStart := 0
- for {
- n, err := io.ReadFull(body, buf[bufStart:])
- if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
- log.Println("D! " + err.Error())
- // problem reading the request body
- badRequest(res, err.Error())
- return
- }
- h.BytesRecv.Incr(int64(n))
-
- if err == io.EOF {
- if return400 {
- badRequest(res, "")
- } else {
- res.WriteHeader(http.StatusNoContent)
- }
- return
- }
-
- if hangingBytes {
- i := bytes.IndexByte(buf, '\n')
- if i == -1 {
- // still didn't find a newline, keep scanning
- continue
- }
- // rotate the bit remaining after the first newline to the front of the buffer
- i++ // start copying after the newline
- bufStart = len(buf) - i
- if bufStart > 0 {
- copy(buf, buf[i:])
- }
- hangingBytes = false
- continue
- }
-
- if err == io.ErrUnexpectedEOF {
- // finished reading the request body
- err = h.parse(buf[:n+bufStart], now, precision)
- if err != nil {
- log.Println("D! "+err.Error(), bufStart+n)
- return400 = true
- }
- if return400 {
- if err != nil {
- badRequest(res, err.Error())
- } else {
- badRequest(res, "")
- }
- } else {
- res.WriteHeader(http.StatusNoContent)
- }
- return
- }
-
- // if we got down here it means that we filled our buffer, and there
- // are still bytes remaining to be read. So we will parse up until the
- // final newline, then push the rest of the bytes into the next buffer.
- i := bytes.LastIndexByte(buf, '\n')
- if i == -1 {
- h.longLines.Incr(1)
- // drop any line longer than the max buffer size
- log.Printf("D! http_listener received a single line longer than the maximum of %d bytes",
- len(buf))
- hangingBytes = true
- return400 = true
- bufStart = 0
- continue
- }
- if err := h.parse(buf[:i+1], now, precision); err != nil {
- log.Println("D! " + err.Error())
- return400 = true
- }
- // rotate the bit remaining after the last newline to the front of the buffer
- i++ // start copying after the newline
- bufStart = len(buf) - i
- if bufStart > 0 {
- copy(buf, buf[i:])
- }
- }
-}
-
-func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
- h.mu.Lock()
- defer h.mu.Unlock()
-
- h.handler.SetTimePrecision(getPrecisionMultiplier(precision))
- h.handler.SetTimeFunc(func() time.Time { return t })
- metrics, err := h.parser.Parse(b)
- if err != nil {
- return fmt.Errorf("unable to parse: %s", err.Error())
- }
-
- for _, m := range metrics {
- h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
- }
-
- return nil
-}
-
-func tooLarge(res http.ResponseWriter) {
- res.Header().Set("Content-Type", "application/json")
- res.Header().Set("X-Influxdb-Version", "1.0")
- res.Header().Set("X-Influxdb-Error", "http: request body too large")
- res.WriteHeader(http.StatusRequestEntityTooLarge)
- res.Write([]byte(`{"error":"http: request body too large"}`))
-}
-
-func badRequest(res http.ResponseWriter, errString string) {
- res.Header().Set("Content-Type", "application/json")
- res.Header().Set("X-Influxdb-Version", "1.0")
- if errString == "" {
- errString = "http: bad request"
- }
- res.Header().Set("X-Influxdb-Error", errString)
- res.WriteHeader(http.StatusBadRequest)
- res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString)))
-}
-
-func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
- if h.BasicUsername != "" && h.BasicPassword != "" {
- reqUsername, reqPassword, ok := req.BasicAuth()
- if !ok ||
- subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 ||
- subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 {
-
- h.AuthFailures.Incr(1)
- http.Error(res, "Unauthorized.", http.StatusUnauthorized)
- return
- }
- handler(res, req)
- } else {
- handler(res, req)
- }
-}
-
-func getPrecisionMultiplier(precision string) time.Duration {
- d := time.Nanosecond
- switch precision {
- case "u":
- d = time.Microsecond
- case "ms":
- d = time.Millisecond
- case "s":
- d = time.Second
- case "m":
- d = time.Minute
- case "h":
- d = time.Hour
- }
- return d
-}
-
-func init() {
- // http_listener deprecated in 1.9
- inputs.Add("http_listener", func() telegraf.Input {
- return &HTTPListener{
- ServiceAddress: ":8186",
- TimeFunc: time.Now,
- }
- })
- inputs.Add("influxdb_listener", func() telegraf.Input {
- return &HTTPListener{
- ServiceAddress: ":8186",
- TimeFunc: time.Now,
- }
- })
-}
diff --git a/plugins/inputs/influxdb_listener/http_listener_test.go b/plugins/inputs/influxdb_listener/http_listener_test.go
deleted file mode 100644
index 9642950613840..0000000000000
--- a/plugins/inputs/influxdb_listener/http_listener_test.go
+++ /dev/null
@@ -1,475 +0,0 @@
-package http_listener
-
-import (
- "bytes"
- "crypto/tls"
- "crypto/x509"
- "io/ioutil"
- "net/http"
- "net/url"
- "runtime"
- "strconv"
- "sync"
- "testing"
- "time"
-
- "github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/testutil"
-
- "github.com/stretchr/testify/require"
-)
-
-const (
- testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n"
-
- testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257"
-
- testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257
-cpu_load_short,host=server03 value=12.0 1422568543702900257
-cpu_load_short,host=server04 value=12.0 1422568543702900257
-cpu_load_short,host=server05 value=12.0 1422568543702900257
-cpu_load_short,host=server06 value=12.0 1422568543702900257
-`
- badMsg = "blahblahblah: 42\n"
-
- emptyMsg = ""
-
- basicUsername = "test-username-please-ignore"
- basicPassword = "super-secure-password!"
-)
-
-var (
- pki = testutil.NewPKI("../../../testutil/pki")
-)
-
-func newTestHTTPListener() *HTTPListener {
- listener := &HTTPListener{
- ServiceAddress: "localhost:0",
- TimeFunc: time.Now,
- }
- return listener
-}
-
-func newTestHTTPAuthListener() *HTTPListener {
- listener := newTestHTTPListener()
- listener.BasicUsername = basicUsername
- listener.BasicPassword = basicPassword
- return listener
-}
-
-func newTestHTTPSListener() *HTTPListener {
- listener := &HTTPListener{
- ServiceAddress: "localhost:0",
- ServerConfig: *pki.TLSServerConfig(),
- TimeFunc: time.Now,
- }
-
- return listener
-}
-
-func getHTTPSClient() *http.Client {
- tlsConfig, err := pki.TLSClientConfig().TLSConfig()
- if err != nil {
- panic(err)
- }
- return &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: tlsConfig,
- },
- }
-}
-
-func createURL(listener *HTTPListener, scheme string, path string, rawquery string) string {
- u := url.URL{
- Scheme: scheme,
- Host: "localhost:" + strconv.Itoa(listener.Port),
- Path: path,
- RawQuery: rawquery,
- }
- return u.String()
-}
-
-func TestWriteHTTPSNoClientAuth(t *testing.T) {
- listener := newTestHTTPSListener()
- listener.TLSAllowedCACerts = nil
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- cas := x509.NewCertPool()
- cas.AppendCertsFromPEM([]byte(pki.ReadServerCert()))
- noClientAuthClient := &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{
- RootCAs: cas,
- },
- },
- }
-
- // post single message to listener
- resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-}
-
-func TestWriteHTTPSWithClientAuth(t *testing.T) {
- listener := newTestHTTPSListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // post single message to listener
- resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-}
-
-func TestWriteHTTPBasicAuth(t *testing.T) {
- listener := newTestHTTPAuthListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- client := &http.Client{}
-
- req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg)))
- require.NoError(t, err)
- req.SetBasicAuth(basicUsername, basicPassword)
- resp, err := client.Do(req)
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, http.StatusNoContent, resp.StatusCode)
-}
-
-func TestWriteHTTP(t *testing.T) {
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // post single message to listener
- resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-
- acc.Wait(1)
- acc.AssertContainsTaggedFields(t, "cpu_load_short",
- map[string]interface{}{"value": float64(12)},
- map[string]string{"host": "server01"},
- )
-
- // post multiple message to listener
- resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-
- acc.Wait(2)
- hostTags := []string{"server02", "server03",
- "server04", "server05", "server06"}
- for _, hostTag := range hostTags {
- acc.AssertContainsTaggedFields(t, "cpu_load_short",
- map[string]interface{}{"value": float64(12)},
- map[string]string{"host": hostTag},
- )
- }
-
- // Post a gigantic metric to the listener and verify that an error is returned:
- resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 400, resp.StatusCode)
-
- acc.Wait(3)
- acc.AssertContainsTaggedFields(t, "cpu_load_short",
- map[string]interface{}{"value": float64(12)},
- map[string]string{"host": "server01"},
- )
-}
-
-// http listener should add a newline at the end of the buffer if it's not there
-func TestWriteHTTPNoNewline(t *testing.T) {
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // post single message to listener
- resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-
- acc.Wait(1)
- acc.AssertContainsTaggedFields(t, "cpu_load_short",
- map[string]interface{}{"value": float64(12)},
- map[string]string{"host": "server01"},
- )
-}
-
-func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
- listener := &HTTPListener{
- ServiceAddress: "localhost:0",
- MaxLineSize: internal.Size{Size: 128 * 1000},
- TimeFunc: time.Now,
- }
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // Post a gigantic metric to the listener and verify that it writes OK this time:
- resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-}
-
-func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
- listener := &HTTPListener{
- ServiceAddress: "localhost:0",
- MaxBodySize: internal.Size{Size: 4096},
- TimeFunc: time.Now,
- }
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 413, resp.StatusCode)
-}
-
-func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
- listener := &HTTPListener{
- ServiceAddress: "localhost:0",
- MaxLineSize: internal.Size{Size: 70},
- TimeFunc: time.Now,
- }
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(testMsgs)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-
- hostTags := []string{"server02", "server03",
- "server04", "server05", "server06"}
- acc.Wait(len(hostTags))
- for _, hostTag := range hostTags {
- acc.AssertContainsTaggedFields(t, "cpu_load_short",
- map[string]interface{}{"value": float64(12)},
- map[string]string{"host": hostTag},
- )
- }
-}
-
-func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
- listener := &HTTPListener{
- ServiceAddress: "localhost:0",
- MaxLineSize: internal.Size{Size: 100},
- TimeFunc: time.Now,
- }
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 400, resp.StatusCode)
-
- hostTags := []string{"server02", "server03",
- "server04", "server05", "server06"}
- acc.Wait(len(hostTags))
- for _, hostTag := range hostTags {
- acc.AssertContainsTaggedFields(t, "cpu_load_short",
- map[string]interface{}{"value": float64(12)},
- map[string]string{"host": hostTag},
- )
- }
-}
-
-// test that writing gzipped data works
-func TestWriteHTTPGzippedData(t *testing.T) {
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- data, err := ioutil.ReadFile("./testdata/testmsgs.gz")
- require.NoError(t, err)
-
- req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data))
- require.NoError(t, err)
- req.Header.Set("Content-Encoding", "gzip")
-
- client := &http.Client{}
- resp, err := client.Do(req)
- require.NoError(t, err)
- require.EqualValues(t, 204, resp.StatusCode)
-
- hostTags := []string{"server02", "server03",
- "server04", "server05", "server06"}
- acc.Wait(len(hostTags))
- for _, hostTag := range hostTags {
- acc.AssertContainsTaggedFields(t, "cpu_load_short",
- map[string]interface{}{"value": float64(12)},
- map[string]string{"host": hostTag},
- )
- }
-}
-
-// writes 25,000 metrics to the listener with 10 different writers
-func TestWriteHTTPHighTraffic(t *testing.T) {
- if runtime.GOOS == "darwin" {
- t.Skip("Skipping due to hang on darwin")
- }
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // post many messages to listener
- var wg sync.WaitGroup
- for i := 0; i < 10; i++ {
- wg.Add(1)
- go func(innerwg *sync.WaitGroup) {
- defer innerwg.Done()
- for i := 0; i < 500; i++ {
- resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
- }
- }(&wg)
- }
-
- wg.Wait()
- listener.Gather(acc)
-
- acc.Wait(25000)
- require.Equal(t, int64(25000), int64(acc.NMetrics()))
-}
-
-func TestReceive404ForInvalidEndpoint(t *testing.T) {
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // post single message to listener
- resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 404, resp.StatusCode)
-}
-
-func TestWriteHTTPInvalid(t *testing.T) {
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // post single message to listener
- resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 400, resp.StatusCode)
-}
-
-func TestWriteHTTPEmpty(t *testing.T) {
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // post single message to listener
- resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-}
-
-func TestQueryAndPingHTTP(t *testing.T) {
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- // post query to listener
- resp, err := http.Post(
- createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
- require.NoError(t, err)
- require.EqualValues(t, 200, resp.StatusCode)
-
- // post ping to listener
- resp, err = http.Post(createURL(listener, "http", "/ping", ""), "", nil)
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-}
-
-func TestWriteWithPrecision(t *testing.T) {
- listener := newTestHTTPListener()
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- msg := "xyzzy value=42 1422568543\n"
- resp, err := http.Post(
- createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-
- acc.Wait(1)
- require.Equal(t, 1, len(acc.Metrics))
- require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time)
-}
-
-func TestWriteWithPrecisionNoTimestamp(t *testing.T) {
- listener := newTestHTTPListener()
- listener.TimeFunc = func() time.Time {
- return time.Unix(42, 123456789)
- }
-
- acc := &testutil.Accumulator{}
- require.NoError(t, listener.Start(acc))
- defer listener.Stop()
-
- msg := "xyzzy value=42\n"
- resp, err := http.Post(
- createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
- require.NoError(t, err)
- resp.Body.Close()
- require.EqualValues(t, 204, resp.StatusCode)
-
- acc.Wait(1)
- require.Equal(t, 1, len(acc.Metrics))
- require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time)
-}
-
-const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i
-`
diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go
new file mode 100644
index 0000000000000..07d27ebbd934d
--- /dev/null
+++ b/plugins/inputs/influxdb_listener/influxdb_listener.go
@@ -0,0 +1,418 @@
+package influxdb_listener
+
+import (
+ "compress/gzip"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/parsers/influx"
+ "github.com/influxdata/telegraf/selfstat"
+)
+
+const (
+ // defaultMaxBodySize is the default maximum request body size, in bytes.
+ // if the request body is over this size, we will return an HTTP 413 error.
+ defaultMaxBodySize = 32 * 1024 * 1024
+)
+
+type InfluxDBListener struct {
+ ServiceAddress string `toml:"service_address"`
+ port int
+ tlsint.ServerConfig
+
+ ReadTimeout internal.Duration `toml:"read_timeout"`
+ WriteTimeout internal.Duration `toml:"write_timeout"`
+ MaxBodySize internal.Size `toml:"max_body_size"`
+ MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored
+ BasicUsername string `toml:"basic_username"`
+ BasicPassword string `toml:"basic_password"`
+ DatabaseTag string `toml:"database_tag"`
+ RetentionPolicyTag string `toml:"retention_policy_tag"`
+
+ timeFunc influx.TimeFunc
+
+ listener net.Listener
+ server http.Server
+
+ acc telegraf.Accumulator
+
+ bytesRecv selfstat.Stat
+ requestsServed selfstat.Stat
+ writesServed selfstat.Stat
+ queriesServed selfstat.Stat
+ pingsServed selfstat.Stat
+ requestsRecv selfstat.Stat
+ notFoundsServed selfstat.Stat
+ buffersCreated selfstat.Stat
+ authFailures selfstat.Stat
+
+ Log telegraf.Logger `toml:"-"`
+
+ mux http.ServeMux
+}
+
+const sampleConfig = `
+ ## Address and port to host InfluxDB listener on
+ service_address = ":8186"
+
+ ## maximum duration before timing out read of the request
+ read_timeout = "10s"
+ ## maximum duration before timing out write of the response
+ write_timeout = "10s"
+
+ ## Maximum allowed HTTP request body size in bytes.
+ ## 0 means to use the default of 32MiB.
+ max_body_size = "32MiB"
+
+ ## Optional tag name used to store the database.
+ ## If the write has a database in the query string then it will be kept in this tag name.
+ ## This tag can be used in downstream outputs.
+ ## The default value of nothing means it will be off and the database will not be recorded.
+ # database_tag = ""
+
+ ## If set the retention policy specified in the write query will be added as
+ ## the value of this tag name.
+ # retention_policy_tag = ""
+
+ ## Set one or more allowed client CA certificate file names to
+ ## enable mutually authenticated TLS connections
+ tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+ ## Add service certificate and key
+ tls_cert = "/etc/telegraf/cert.pem"
+ tls_key = "/etc/telegraf/key.pem"
+
+ ## Optional username and password to accept for HTTP basic authentication.
+ ## You probably want to make sure you have TLS configured above for this.
+ # basic_username = "foobar"
+ # basic_password = "barfoo"
+`
+
+func (h *InfluxDBListener) SampleConfig() string {
+ return sampleConfig
+}
+
+func (h *InfluxDBListener) Description() string {
+ return "Accept metrics over InfluxDB 1.x HTTP API"
+}
+
+func (h *InfluxDBListener) Gather(_ telegraf.Accumulator) error {
+ return nil
+}
+
+func (h *InfluxDBListener) routes() {
+ authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, "influxdb",
+ func(_ http.ResponseWriter) {
+ h.authFailures.Incr(1)
+ },
+ )
+
+ h.mux.Handle("/write", authHandler(h.handleWrite()))
+ h.mux.Handle("/query", authHandler(h.handleQuery()))
+ h.mux.Handle("/ping", h.handlePing())
+ h.mux.Handle("/", authHandler(h.handleDefault()))
+}
+
+func (h *InfluxDBListener) Init() error {
+ tags := map[string]string{
+ "address": h.ServiceAddress,
+ }
+ h.bytesRecv = selfstat.Register("influxdb_listener", "bytes_received", tags)
+ h.requestsServed = selfstat.Register("influxdb_listener", "requests_served", tags)
+ h.writesServed = selfstat.Register("influxdb_listener", "writes_served", tags)
+ h.queriesServed = selfstat.Register("influxdb_listener", "queries_served", tags)
+ h.pingsServed = selfstat.Register("influxdb_listener", "pings_served", tags)
+ h.requestsRecv = selfstat.Register("influxdb_listener", "requests_received", tags)
+ h.notFoundsServed = selfstat.Register("influxdb_listener", "not_founds_served", tags)
+ h.buffersCreated = selfstat.Register("influxdb_listener", "buffers_created", tags)
+ h.authFailures = selfstat.Register("influxdb_listener", "auth_failures", tags)
+ h.routes()
+
+ if h.MaxBodySize.Size == 0 {
+ h.MaxBodySize.Size = defaultMaxBodySize
+ }
+
+ if h.MaxLineSize.Size != 0 {
+ h.Log.Warnf("Use of deprecated configuration: 'max_line_size'; parser now handles lines of unlimited length and option is ignored")
+ }
+
+ if h.ReadTimeout.Duration < time.Second {
+ h.ReadTimeout.Duration = time.Second * 10
+ }
+ if h.WriteTimeout.Duration < time.Second {
+ h.WriteTimeout.Duration = time.Second * 10
+ }
+
+ return nil
+}
+
+// Start starts the InfluxDB listener service.
+func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error {
+ h.acc = acc
+
+ tlsConf, err := h.ServerConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ h.server = http.Server{
+ Addr: h.ServiceAddress,
+ Handler: h,
+ ReadTimeout: h.ReadTimeout.Duration,
+ WriteTimeout: h.WriteTimeout.Duration,
+ TLSConfig: tlsConf,
+ }
+
+ var listener net.Listener
+ if tlsConf != nil {
+ listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
+ if err != nil {
+ return err
+ }
+ } else {
+ listener, err = net.Listen("tcp", h.ServiceAddress)
+ if err != nil {
+ return err
+ }
+ }
+ h.listener = listener
+ h.port = listener.Addr().(*net.TCPAddr).Port
+
+ go func() {
+ err = h.server.Serve(h.listener)
+ if err != http.ErrServerClosed {
+ h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress)
+ }
+ }()
+
+ h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress)
+
+ return nil
+}
+
+// Stop cleans up all resources
+func (h *InfluxDBListener) Stop() {
+ err := h.server.Shutdown(context.Background())
+ if err != nil {
+ h.Log.Infof("Error shutting down HTTP server: %v", err.Error())
+ }
+}
+
+func (h *InfluxDBListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
+ h.requestsRecv.Incr(1)
+ h.mux.ServeHTTP(res, req)
+ h.requestsServed.Incr(1)
+}
+
+func (h *InfluxDBListener) handleQuery() http.HandlerFunc {
+ return func(res http.ResponseWriter, req *http.Request) {
+ defer h.queriesServed.Incr(1)
+ // Deliver a dummy response to the query endpoint, as some InfluxDB
+ // clients test endpoint availability with a query
+ res.Header().Set("Content-Type", "application/json")
+ res.Header().Set("X-Influxdb-Version", "1.0")
+ res.WriteHeader(http.StatusOK)
+ res.Write([]byte("{\"results\":[]}"))
+ }
+}
+
+func (h *InfluxDBListener) handlePing() http.HandlerFunc {
+ return func(res http.ResponseWriter, req *http.Request) {
+ defer h.pingsServed.Incr(1)
+ verbose := req.URL.Query().Get("verbose")
+
+ // respond to ping requests
+ res.Header().Set("X-Influxdb-Version", "1.0")
+ if verbose != "" && verbose != "0" && verbose != "false" {
+ res.Header().Set("Content-Type", "application/json")
+ res.WriteHeader(http.StatusOK)
+ b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above
+ res.Write(b)
+ } else {
+ res.WriteHeader(http.StatusNoContent)
+ }
+ }
+}
+
+func (h *InfluxDBListener) handleDefault() http.HandlerFunc {
+ return func(res http.ResponseWriter, req *http.Request) {
+ defer h.notFoundsServed.Incr(1)
+ http.NotFound(res, req)
+ }
+}
+
+func (h *InfluxDBListener) handleWrite() http.HandlerFunc {
+ return func(res http.ResponseWriter, req *http.Request) {
+ defer h.writesServed.Incr(1)
+ // Check that the content length is not too large for us to handle.
+ if req.ContentLength > h.MaxBodySize.Size {
+ tooLarge(res)
+ return
+ }
+
+ db := req.URL.Query().Get("db")
+ rp := req.URL.Query().Get("rp")
+
+ body := req.Body
+ body = http.MaxBytesReader(res, body, h.MaxBodySize.Size)
+ // Handle gzip request bodies
+ if req.Header.Get("Content-Encoding") == "gzip" {
+ var err error
+ body, err = gzip.NewReader(body)
+ if err != nil {
+ h.Log.Debugf("Error decompressing request body: %v", err.Error())
+ badRequest(res, err.Error())
+ return
+ }
+ defer body.Close()
+ }
+
+ parser := influx.NewStreamParser(body)
+ parser.SetTimeFunc(h.timeFunc)
+
+ precisionStr := req.URL.Query().Get("precision")
+ if precisionStr != "" {
+ precision := getPrecisionMultiplier(precisionStr)
+ parser.SetTimePrecision(precision)
+ }
+
+ var m telegraf.Metric
+ var err error
+ var parseErrorCount int
+ var lastPos int = 0
+ var firstParseErrorStr string
+ for {
+ select {
+ case <-req.Context().Done():
+ // Shutting down before parsing is finished.
+ res.WriteHeader(http.StatusServiceUnavailable)
+ return
+ default:
+ }
+
+ m, err = parser.Next()
+ pos := parser.Position()
+ h.bytesRecv.Incr(int64(pos - lastPos))
+ lastPos = pos
+
+ // Continue parsing metrics even if some are malformed
+ if parseErr, ok := err.(*influx.ParseError); ok {
+ parseErrorCount += 1
+ errStr := parseErr.Error()
+ if firstParseErrorStr == "" {
+ firstParseErrorStr = errStr
+ }
+ continue
+ } else if err != nil {
+ // Either we're exiting cleanly (err ==
+ // influx.EOF) or there's an unexpected error
+ break
+ }
+
+ if h.DatabaseTag != "" && db != "" {
+ m.AddTag(h.DatabaseTag, db)
+ }
+
+ if h.RetentionPolicyTag != "" && rp != "" {
+ m.AddTag(h.RetentionPolicyTag, rp)
+ }
+
+ h.acc.AddMetric(m)
+
+ }
+ if err != influx.EOF {
+ h.Log.Debugf("Error parsing the request body: %v", err.Error())
+ badRequest(res, err.Error())
+ return
+ }
+ if parseErrorCount > 0 {
+ var partialErrorString string
+ switch parseErrorCount {
+ case 1:
+ partialErrorString = fmt.Sprintf("%s", firstParseErrorStr)
+ case 2:
+ partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr)
+ default:
+ partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1)
+ }
+ partialWrite(res, partialErrorString)
+ return
+ }
+
+ // http request success
+ res.WriteHeader(http.StatusNoContent)
+ }
+}
+
+func tooLarge(res http.ResponseWriter) {
+ res.Header().Set("Content-Type", "application/json")
+ res.Header().Set("X-Influxdb-Version", "1.0")
+ res.Header().Set("X-Influxdb-Error", "http: request body too large")
+ res.WriteHeader(http.StatusRequestEntityTooLarge)
+ res.Write([]byte(`{"error":"http: request body too large"}`))
+}
+
+func badRequest(res http.ResponseWriter, errString string) {
+ res.Header().Set("Content-Type", "application/json")
+ res.Header().Set("X-Influxdb-Version", "1.0")
+ if errString == "" {
+ errString = "http: bad request"
+ }
+ res.Header().Set("X-Influxdb-Error", errString)
+ res.WriteHeader(http.StatusBadRequest)
+ res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString)))
+}
+
+func partialWrite(res http.ResponseWriter, errString string) {
+ res.Header().Set("Content-Type", "application/json")
+ res.Header().Set("X-Influxdb-Version", "1.0")
+ res.Header().Set("X-Influxdb-Error", errString)
+ res.WriteHeader(http.StatusBadRequest)
+ res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString)))
+}
+
+func getPrecisionMultiplier(precision string) time.Duration {
+ // Influxdb defaults silently to nanoseconds if precision isn't
+ // one of the following:
+ var d time.Duration
+ switch precision {
+ case "u":
+ d = time.Microsecond
+ case "ms":
+ d = time.Millisecond
+ case "s":
+ d = time.Second
+ case "m":
+ d = time.Minute
+ case "h":
+ d = time.Hour
+ default:
+ d = time.Nanosecond
+ }
+ return d
+}
+
+func init() {
+ // http_listener deprecated in 1.9
+ inputs.Add("http_listener", func() telegraf.Input {
+ return &InfluxDBListener{
+ ServiceAddress: ":8186",
+ timeFunc: time.Now,
+ }
+ })
+ inputs.Add("influxdb_listener", func() telegraf.Input {
+ return &InfluxDBListener{
+ ServiceAddress: ":8186",
+ timeFunc: time.Now,
+ }
+ })
+}
diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go
new file mode 100644
index 0000000000000..d3dc552192007
--- /dev/null
+++ b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go
@@ -0,0 +1,108 @@
+package influxdb_listener
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/selfstat"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+// newListener is the minimal InfluxDBListener construction to serve writes.
+func newListener() *InfluxDBListener {
+ listener := &InfluxDBListener{
+ timeFunc: time.Now,
+ acc: &testutil.NopAccumulator{},
+ bytesRecv: selfstat.Register("influxdb_listener", "bytes_received", map[string]string{}),
+ writesServed: selfstat.Register("influxdb_listener", "writes_served", map[string]string{}),
+ MaxBodySize: internal.Size{
+ Size: defaultMaxBodySize,
+ },
+ }
+ return listener
+}
+
+func BenchmarkInfluxDBListener_serveWrite(b *testing.B) {
+ res := httptest.NewRecorder()
+ addr := "http://localhost/write?db=mydb"
+
+ benchmarks := []struct {
+ name string
+ lines string
+ }{
+ {
+ name: "single line, tag, and field",
+ lines: lines(1, 1, 1),
+ },
+ {
+ name: "single line, 10 tags and fields",
+ lines: lines(1, 10, 10),
+ },
+ {
+ name: "single line, 100 tags and fields",
+ lines: lines(1, 100, 100),
+ },
+ {
+ name: "1k lines, single tag and field",
+ lines: lines(1000, 1, 1),
+ },
+ {
+ name: "1k lines, 10 tags and fields",
+ lines: lines(1000, 10, 10),
+ },
+ {
+ name: "10k lines, 10 tags and fields",
+ lines: lines(10000, 10, 10),
+ },
+ {
+ name: "100k lines, 10 tags and fields",
+ lines: lines(100000, 10, 10),
+ },
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ listener := newListener()
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ req, err := http.NewRequest("POST", addr, strings.NewReader(bm.lines))
+ if err != nil {
+ b.Error(err)
+ }
+ listener.handleWrite()(res, req)
+ if res.Code != http.StatusNoContent {
+ b.Errorf("unexpected status %d", res.Code)
+ }
+ }
+ })
+ }
+}
+
+func lines(lines, numTags, numFields int) string {
+ lp := make([]string, lines)
+ for i := 0; i < lines; i++ {
+ tags := make([]string, numTags)
+ for j := 0; j < numTags; j++ {
+ tags[j] = fmt.Sprintf("t%d=v%d", j, j)
+ }
+
+ fields := make([]string, numFields)
+ for k := 0; k < numFields; k++ {
+ fields[k] = fmt.Sprintf("f%d=%d", k, k)
+ }
+
+ lp[i] = fmt.Sprintf("m%d,%s %s",
+ i,
+ strings.Join(tags, ","),
+ strings.Join(fields, ","),
+ )
+ }
+
+ return strings.Join(lp, "\n")
+}
diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go
new file mode 100644
index 0000000000000..5c934e371bfc7
--- /dev/null
+++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go
@@ -0,0 +1,650 @@
+package influxdb_listener
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "runtime"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n"
+
+ testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257"
+
+ testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257
+cpu_load_short,host=server03 value=12.0 1422568543702900257
+cpu_load_short,host=server04 value=12.0 1422568543702900257
+cpu_load_short,host=server05 value=12.0 1422568543702900257
+cpu_load_short,host=server06 value=12.0 1422568543702900257
+`
+ testPartial = `cpu,host=a value1=1
+cpu,host=b value1=1,value2=+Inf,value3=3
+cpu,host=c value1=1`
+
+ badMsg = "blahblahblah: 42\n"
+
+ emptyMsg = ""
+
+ basicUsername = "test-username-please-ignore"
+ basicPassword = "super-secure-password!"
+)
+
+var (
+ pki = testutil.NewPKI("../../../testutil/pki")
+)
+
+func newTestListener() *InfluxDBListener {
+ listener := &InfluxDBListener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ timeFunc: time.Now,
+ }
+ return listener
+}
+
+func newTestAuthListener() *InfluxDBListener {
+ listener := newTestListener()
+ listener.BasicUsername = basicUsername
+ listener.BasicPassword = basicPassword
+ return listener
+}
+
+func newTestSecureListener() *InfluxDBListener {
+ listener := &InfluxDBListener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ ServerConfig: *pki.TLSServerConfig(),
+ timeFunc: time.Now,
+ }
+
+ return listener
+}
+
+func getSecureClient() *http.Client {
+ tlsConfig, err := pki.TLSClientConfig().TLSConfig()
+ if err != nil {
+ panic(err)
+ }
+ return &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConfig,
+ },
+ }
+}
+
+func createURL(listener *InfluxDBListener, scheme string, path string, rawquery string) string {
+ u := url.URL{
+ Scheme: scheme,
+ Host: "localhost:" + strconv.Itoa(listener.port),
+ Path: path,
+ RawQuery: rawquery,
+ }
+ return u.String()
+}
+
+func TestWriteSecureNoClientAuth(t *testing.T) {
+ listener := newTestSecureListener()
+ listener.TLSAllowedCACerts = nil
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ cas := x509.NewCertPool()
+ cas.AppendCertsFromPEM([]byte(pki.ReadServerCert()))
+ noClientAuthClient := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ RootCAs: cas,
+ },
+ },
+ }
+
+ // post single message to listener
+ resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestWriteSecureWithClientAuth(t *testing.T) {
+ listener := newTestSecureListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := getSecureClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestWriteBasicAuth(t *testing.T) {
+ listener := newTestAuthListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ client := &http.Client{}
+
+ req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ req.SetBasicAuth(basicUsername, basicPassword)
+ resp, err := client.Do(req)
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, http.StatusNoContent, resp.StatusCode)
+}
+
+func TestWriteKeepDatabase(t *testing.T) {
+ testMsgWithDB := "cpu_load_short,host=server01,database=wrongdb value=12.0 1422568543702900257\n"
+
+ listener := newTestListener()
+ listener.DatabaseTag = "database"
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": "server01", "database": "mydb"},
+ )
+
+ // post single message to listener with a database tag in it already. It should be clobbered.
+ resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": "server01", "database": "mydb"},
+ )
+
+ // post multiple message to listener
+ resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(2)
+ hostTags := []string{"server02", "server03",
+ "server04", "server05", "server06"}
+ for _, hostTag := range hostTags {
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": hostTag, "database": "mydb"},
+ )
+ }
+}
+
+func TestWriteRetentionPolicyTag(t *testing.T) {
+ listener := newTestListener()
+ listener.RetentionPolicyTag = "rp"
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ resp, err := http.Post(createURL(listener, "http", "/write", "rp=myrp"), "", bytes.NewBuffer([]byte("cpu time_idle=42")))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.Equal(t, 204, resp.StatusCode)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "rp": "myrp",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ acc.Wait(1)
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
+}
+
+// http listener should add a newline at the end of the buffer if it's not there
+func TestWriteNoNewline(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": "server01"},
+ )
+}
+
+func TestPartialWrite(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testPartial)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 400, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu",
+ map[string]interface{}{"value1": float64(1)},
+ map[string]string{"host": "a"},
+ )
+ acc.AssertContainsTaggedFields(t, "cpu",
+ map[string]interface{}{"value1": float64(1)},
+ map[string]string{"host": "c"},
+ )
+}
+
+func TestWriteMaxLineSizeIncrease(t *testing.T) {
+ listener := &InfluxDBListener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ timeFunc: time.Now,
+ }
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // Post a gigantic metric to the listener and verify that it writes OK this time:
+ resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestWriteVerySmallMaxBody(t *testing.T) {
+ listener := &InfluxDBListener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ MaxBodySize: internal.Size{Size: 4096},
+ timeFunc: time.Now,
+ }
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 413, resp.StatusCode)
+}
+
+func TestWriteLargeLine(t *testing.T) {
+ listener := &InfluxDBListener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ timeFunc: func() time.Time {
+ return time.Unix(123456789, 0)
+ },
+ }
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ //todo: with the new parser, long lines aren't a problem. Do we need to skip them?
+ //require.EqualValues(t, 400, resp.StatusCode)
+
+ expected := testutil.MustMetric(
+ "super_long_metric",
+ map[string]string{"foo": "bar"},
+ map[string]interface{}{
+ "clients": 42,
+ "connected_followers": 43,
+ "evicted_keys": 44,
+ "expired_keys": 45,
+ "instantaneous_ops_per_sec": 46,
+ "keyspace_hitrate": 47.0,
+ "keyspace_hits": 48,
+ "keyspace_misses": 49,
+ "latest_fork_usec": 50,
+ "master_repl_offset": 51,
+ "mem_fragmentation_ratio": 52.58,
+ "pubsub_channels": 53,
+ "pubsub_patterns": 54,
+ "rdb_changes_since_last_save": 55,
+ "repl_backlog_active": 56,
+ "repl_backlog_histlen": 57,
+ "repl_backlog_size": 58,
+ "sync_full": 59,
+ "sync_partial_err": 60,
+ "sync_partial_ok": 61,
+ "total_commands_processed": 62,
+ "total_connections_received": 63,
+ "uptime": 64,
+ "used_cpu_sys": 65.07,
+ "used_cpu_sys_children": 66.0,
+ "used_cpu_user": 67.1,
+ "used_cpu_user_children": 68.0,
+ "used_memory": 692048,
+ "used_memory_lua": 70792,
+ "used_memory_peak": 711128,
+ "used_memory_rss": 7298144,
+ },
+ time.Unix(123456789, 0),
+ )
+
+ m, ok := acc.Get("super_long_metric")
+ require.True(t, ok)
+ testutil.RequireMetricEqual(t, expected, testutil.FromTestMetric(m))
+
+ hostTags := []string{"server02", "server03",
+ "server04", "server05", "server06"}
+ acc.Wait(len(hostTags))
+ for _, hostTag := range hostTags {
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": hostTag},
+ )
+ }
+}
+
+// test that writing gzipped data works
+func TestWriteGzippedData(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ data, err := ioutil.ReadFile("./testdata/testmsgs.gz")
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data))
+ require.NoError(t, err)
+ req.Header.Set("Content-Encoding", "gzip")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ require.NoError(t, err)
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ hostTags := []string{"server02", "server03",
+ "server04", "server05", "server06"}
+ acc.Wait(len(hostTags))
+ for _, hostTag := range hostTags {
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": hostTag},
+ )
+ }
+}
+
+// writes 25,000 metrics to the listener with 10 different writers
+func TestWriteHighTraffic(t *testing.T) {
+ if runtime.GOOS == "darwin" {
+ t.Skip("Skipping due to hang on darwin")
+ }
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post many messages to listener
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func(innerwg *sync.WaitGroup) {
+ defer innerwg.Done()
+ for i := 0; i < 500; i++ {
+ resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+ }
+ }(&wg)
+ }
+
+ wg.Wait()
+ listener.Gather(acc)
+
+ acc.Wait(25000)
+ require.Equal(t, int64(25000), int64(acc.NMetrics()))
+}
+
+func TestReceive404ForInvalidEndpoint(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 404, resp.StatusCode)
+}
+
+func TestWriteInvalid(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 400, resp.StatusCode)
+}
+
+func TestWriteEmpty(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestQuery(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post query to listener
+ resp, err := http.Post(
+ createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
+ require.NoError(t, err)
+ require.EqualValues(t, 200, resp.StatusCode)
+}
+
+func TestPing(t *testing.T) {
+ listener := newTestListener()
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post ping to listener
+ resp, err := http.Post(createURL(listener, "http", "/ping", ""), "", nil)
+ require.NoError(t, err)
+ require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0])
+ require.Len(t, resp.Header["Content-Type"], 0)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestPingVerbose(t *testing.T) {
+ listener := newTestListener()
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post ping to listener
+ resp, err := http.Post(createURL(listener, "http", "/ping", "verbose=1"), "", nil)
+ require.NoError(t, err)
+ require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0])
+ require.Equal(t, "application/json", resp.Header["Content-Type"][0])
+ resp.Body.Close()
+ require.EqualValues(t, 200, resp.StatusCode)
+}
+
+func TestWriteWithPrecision(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ msg := "xyzzy value=42 1422568543\n"
+ resp, err := http.Post(
+ createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ require.Equal(t, 1, len(acc.Metrics))
+ // When timestamp is provided, the precision parameter is
+ // overloaded to specify the timestamp's unit
+ require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time)
+}
+
+func TestWriteWithPrecisionNoTimestamp(t *testing.T) {
+ listener := newTestListener()
+ listener.timeFunc = func() time.Time {
+ return time.Unix(42, 123456789)
+ }
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ msg := "xyzzy value=42\n"
+ resp, err := http.Post(
+ createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ require.Equal(t, 1, len(acc.Metrics))
+ // When timestamp is omitted, the precision parameter actually
+ // specifies the precision. The timestamp is set to the greatest
+ // integer unit less than the provided timestamp (floor).
+ require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time)
+}
+
+func TestWriteParseErrors(t *testing.T) {
+ var tests = []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "one parse error",
+ input: "foo value=1.0\nfoo value=2asdf2.0\nfoo value=3.0\nfoo value=4.0",
+ expected: `metric parse error: expected field at 2:12: "foo value=2"`,
+ },
+ {
+ name: "two parse errors",
+ input: "foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4.0",
+ expected: `metric parse error: expected field at 1:12: "foo value=1" (and 1 other parse error)`,
+ },
+ {
+ name: "three or more parse errors",
+ input: "foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4asdf2.0",
+ expected: `metric parse error: expected field at 1:12: "foo value=1" (and 2 other parse errors)`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.NopAccumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(tt.input)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 400, resp.StatusCode)
+ require.Equal(t, tt.expected, resp.Header["X-Influxdb-Error"][0])
+ })
+ }
+}
+
+// The term 'master_repl' used here is archaic language from redis
+const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=42i,connected_followers=43i,evicted_keys=44i,expired_keys=45i,instantaneous_ops_per_sec=46i,keyspace_hitrate=47,keyspace_hits=48i,keyspace_misses=49i,latest_fork_usec=50i,master_repl_offset=51i,mem_fragmentation_ratio=52.58,pubsub_channels=53i,pubsub_patterns=54i,rdb_changes_since_last_save=55i,repl_backlog_active=56i,repl_backlog_histlen=57i,repl_backlog_size=58i,sync_full=59i,sync_partial_err=60i,sync_partial_ok=61i,total_commands_processed=62i,total_connections_received=63i,uptime=64i,used_cpu_sys=65.07,used_cpu_sys_children=66,used_cpu_user=67.1,used_cpu_user_children=68,used_memory=692048i,used_memory_lua=70792i,used_memory_peak=711128i,used_memory_rss=7298144i
+`
diff --git a/plugins/inputs/influxdb_v2_listener/README.md b/plugins/inputs/influxdb_v2_listener/README.md
new file mode 100644
index 0000000000000..cef11e9ae380a
--- /dev/null
+++ b/plugins/inputs/influxdb_v2_listener/README.md
@@ -0,0 +1,53 @@
+# InfluxDB V2 Listener Input Plugin
+
+InfluxDB V2 Listener is a service input plugin that listens for requests sent
+according to the [InfluxDB HTTP API][influxdb_http_api]. The intent of the
+plugin is to allow Telegraf to serve as a proxy/router for the `/api/v2/write`
+endpoint of the InfluxDB HTTP API.
+
+The `/api/v2/write` endpoint supports the `precision` query parameter and can be set
+to one of `ns`, `us`, `ms`, `s`. All other parameters are ignored and
+defer to the output plugins configuration.
+
+### Configuration:
+
+```toml
+[[inputs.influxdb_v2_listener]]
+ ## Address and port to host InfluxDB listener on
+ service_address = ":9999"
+
+ ## Maximum allowed HTTP request body size in bytes.
+ ## 0 means to use the default of 32MiB.
+ # max_body_size = "32MiB"
+
+ ## Optional tag to determine the bucket.
+ ## If the write has a bucket in the query string then it will be kept in this tag name.
+ ## This tag can be used in downstream outputs.
+ ## The default value of nothing means it will be off and the database will not be recorded.
+ # bucket_tag = ""
+
+ ## Set one or more allowed client CA certificate file names to
+ ## enable mutually authenticated TLS connections
+ # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+ ## Add service certificate and key
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## Optional token to accept for HTTP authentication.
+ ## You probably want to make sure you have TLS configured above for this.
+ # token = "some-long-shared-secret-token"
+```
+
+### Metrics:
+
+Metrics are created from InfluxDB Line Protocol in the request body.
+
+### Troubleshooting:
+
+**Example Query:**
+```
+curl -i -XPOST 'http://localhost:8186/api/v2/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'
+```
+
+[influxdb_http_api]: https://v2.docs.influxdata.com/v2.0/api/
diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go
new file mode 100644
index 0000000000000..01d47b201f502
--- /dev/null
+++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go
@@ -0,0 +1,343 @@
+package influxdb_v2_listener
+
+import (
+ "compress/gzip"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/parsers/influx"
+ "github.com/influxdata/telegraf/selfstat"
+)
+
+const (
+ // defaultMaxBodySize is the default maximum request body size, in bytes.
+ // if the request body is over this size, we will return an HTTP 413 error.
+ defaultMaxBodySize = 32 * 1024 * 1024
+)
+
+// The BadRequestCode constants keep standard error messages
+// see: https://v2.docs.influxdata.com/v2.0/api/#operation/PostWrite
+type BadRequestCode string
+
+const (
+ InternalError BadRequestCode = "internal error"
+ Invalid BadRequestCode = "invalid"
+)
+
+type InfluxDBV2Listener struct {
+ ServiceAddress string `toml:"service_address"`
+ port int
+ tlsint.ServerConfig
+
+ MaxBodySize internal.Size `toml:"max_body_size"`
+ Token string `toml:"token"`
+ BucketTag string `toml:"bucket_tag"`
+
+ timeFunc influx.TimeFunc
+
+ listener net.Listener
+ server http.Server
+
+ acc telegraf.Accumulator
+
+ bytesRecv selfstat.Stat
+ requestsServed selfstat.Stat
+ writesServed selfstat.Stat
+ readysServed selfstat.Stat
+ requestsRecv selfstat.Stat
+ notFoundsServed selfstat.Stat
+ authFailures selfstat.Stat
+
+ startTime time.Time
+
+ Log telegraf.Logger `toml:"-"`
+
+ mux http.ServeMux
+}
+
+const sampleConfig = `
+ ## Address and port to host InfluxDB listener on
+ service_address = ":9999"
+
+ ## Maximum allowed HTTP request body size in bytes.
+ ## 0 means to use the default of 32MiB.
+ # max_body_size = "32MiB"
+
+ ## Optional tag to determine the bucket.
+ ## If the write has a bucket in the query string then it will be kept in this tag name.
+ ## This tag can be used in downstream outputs.
+ ## The default value of nothing means it will be off and the database will not be recorded.
+ # bucket_tag = ""
+
+ ## Set one or more allowed client CA certificate file names to
+ ## enable mutually authenticated TLS connections
+ # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+ ## Add service certificate and key
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## Optional token to accept for HTTP authentication.
+ ## You probably want to make sure you have TLS configured above for this.
+ # token = "some-long-shared-secret-token"
+`
+
+func (h *InfluxDBV2Listener) SampleConfig() string {
+ return sampleConfig
+}
+
+func (h *InfluxDBV2Listener) Description() string {
+ return "Accept metrics over InfluxDB 2.x HTTP API"
+}
+
+func (h *InfluxDBV2Listener) Gather(_ telegraf.Accumulator) error {
+ return nil
+}
+
+func (h *InfluxDBV2Listener) routes() {
+ credentials := ""
+ if h.Token != "" {
+ credentials = fmt.Sprintf("Token %s", h.Token)
+ }
+ authHandler := internal.GenericAuthHandler(credentials,
+ func(_ http.ResponseWriter) {
+ h.authFailures.Incr(1)
+ },
+ )
+
+ h.mux.Handle("/api/v2/write", authHandler(h.handleWrite()))
+ h.mux.Handle("/api/v2/ready", h.handleReady())
+ h.mux.Handle("/", authHandler(h.handleDefault()))
+}
+
+func (h *InfluxDBV2Listener) Init() error {
+ tags := map[string]string{
+ "address": h.ServiceAddress,
+ }
+ h.bytesRecv = selfstat.Register("influxdb_v2_listener", "bytes_received", tags)
+ h.requestsServed = selfstat.Register("influxdb_v2_listener", "requests_served", tags)
+ h.writesServed = selfstat.Register("influxdb_v2_listener", "writes_served", tags)
+ h.readysServed = selfstat.Register("influxdb_v2_listener", "readys_served", tags)
+ h.requestsRecv = selfstat.Register("influxdb_v2_listener", "requests_received", tags)
+ h.notFoundsServed = selfstat.Register("influxdb_v2_listener", "not_founds_served", tags)
+ h.authFailures = selfstat.Register("influxdb_v2_listener", "auth_failures", tags)
+ h.routes()
+
+ if h.MaxBodySize.Size == 0 {
+ h.MaxBodySize.Size = defaultMaxBodySize
+ }
+
+ return nil
+}
+
+// Start starts the InfluxDB listener service.
+func (h *InfluxDBV2Listener) Start(acc telegraf.Accumulator) error {
+ h.acc = acc
+
+ tlsConf, err := h.ServerConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ h.server = http.Server{
+ Addr: h.ServiceAddress,
+ Handler: h,
+ TLSConfig: tlsConf,
+ }
+
+ var listener net.Listener
+ if tlsConf != nil {
+ listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
+ if err != nil {
+ return err
+ }
+ } else {
+ listener, err = net.Listen("tcp", h.ServiceAddress)
+ if err != nil {
+ return err
+ }
+ }
+ h.listener = listener
+ h.port = listener.Addr().(*net.TCPAddr).Port
+
+ go func() {
+ err = h.server.Serve(h.listener)
+ if err != http.ErrServerClosed {
+ h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress)
+ }
+ }()
+
+ h.startTime = h.timeFunc()
+
+ h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress)
+
+ return nil
+}
+
+// Stop cleans up all resources
+func (h *InfluxDBV2Listener) Stop() {
+ err := h.server.Shutdown(context.Background())
+ if err != nil {
+ h.Log.Infof("Error shutting down HTTP server: %v", err.Error())
+ }
+}
+
+func (h *InfluxDBV2Listener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
+ h.requestsRecv.Incr(1)
+ h.mux.ServeHTTP(res, req)
+ h.requestsServed.Incr(1)
+}
+
+func (h *InfluxDBV2Listener) handleReady() http.HandlerFunc {
+ return func(res http.ResponseWriter, req *http.Request) {
+ defer h.readysServed.Incr(1)
+
+ // respond to ready requests
+ res.Header().Set("Content-Type", "application/json")
+ res.WriteHeader(http.StatusOK)
+ b, _ := json.Marshal(map[string]string{
+ "started": h.startTime.Format(time.RFC3339Nano),
+ "status": "ready",
+ "up": h.timeFunc().Sub(h.startTime).String()})
+ res.Write(b)
+ }
+}
+
+func (h *InfluxDBV2Listener) handleDefault() http.HandlerFunc {
+ return func(res http.ResponseWriter, req *http.Request) {
+ defer h.notFoundsServed.Incr(1)
+ http.NotFound(res, req)
+ }
+}
+
+func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
+ return func(res http.ResponseWriter, req *http.Request) {
+ defer h.writesServed.Incr(1)
+ // Check that the content length is not too large for us to handle.
+ if req.ContentLength > h.MaxBodySize.Size {
+ tooLarge(res, h.MaxBodySize.Size)
+ return
+ }
+
+ bucket := req.URL.Query().Get("bucket")
+
+ body := req.Body
+ body = http.MaxBytesReader(res, body, h.MaxBodySize.Size)
+ // Handle gzip request bodies
+ if req.Header.Get("Content-Encoding") == "gzip" {
+ var err error
+ body, err = gzip.NewReader(body)
+ if err != nil {
+ h.Log.Debugf("Error decompressing request body: %v", err.Error())
+ badRequest(res, Invalid, err.Error())
+ return
+ }
+ defer body.Close()
+ }
+
+ var readErr error
+ var bytes []byte
+ //body = http.MaxBytesReader(res, req.Body, 1000000) //p.MaxBodySize.Size)
+ bytes, readErr = ioutil.ReadAll(body)
+ if readErr != nil {
+ h.Log.Debugf("Error parsing the request body: %v", readErr.Error())
+ badRequest(res, InternalError, readErr.Error())
+ return
+ }
+ metricHandler := influx.NewMetricHandler()
+ parser := influx.NewParser(metricHandler)
+ parser.SetTimeFunc(h.timeFunc)
+
+ precisionStr := req.URL.Query().Get("precision")
+ if precisionStr != "" {
+ precision := getPrecisionMultiplier(precisionStr)
+ metricHandler.SetTimePrecision(precision)
+ }
+
+ var metrics []telegraf.Metric
+ var err error
+
+ metrics, err = parser.Parse(bytes)
+
+ if err != influx.EOF && err != nil {
+ h.Log.Debugf("Error parsing the request body: %v", err.Error())
+ badRequest(res, Invalid, err.Error())
+ return
+ }
+
+ for _, m := range metrics {
+ // Handle bucket_tag override
+ if h.BucketTag != "" && bucket != "" {
+ m.AddTag(h.BucketTag, bucket)
+ }
+
+ h.acc.AddMetric(m)
+ }
+
+ // http request success
+ res.WriteHeader(http.StatusNoContent)
+ }
+}
+
+func tooLarge(res http.ResponseWriter, maxLength int64) {
+ res.Header().Set("Content-Type", "application/json")
+ res.Header().Set("X-Influxdb-Error", "http: request body too large")
+ res.WriteHeader(http.StatusRequestEntityTooLarge)
+ b, _ := json.Marshal(map[string]string{
+ "code": fmt.Sprint(Invalid),
+ "message": "http: request body too large",
+ "maxLength": fmt.Sprint(maxLength)})
+ res.Write(b)
+}
+
+func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) {
+ res.Header().Set("Content-Type", "application/json")
+ if errString == "" {
+ errString = "http: bad request"
+ }
+ res.Header().Set("X-Influxdb-Error", errString)
+ res.WriteHeader(http.StatusBadRequest)
+ b, _ := json.Marshal(map[string]string{
+ "code": fmt.Sprint(code),
+ "message": errString,
+ "op": "",
+ "err": errString,
+ })
+ res.Write(b)
+}
+
+func getPrecisionMultiplier(precision string) time.Duration {
+ // Influxdb defaults silently to nanoseconds if precision isn't
+ // one of the following:
+ var d time.Duration
+ switch precision {
+ case "us":
+ d = time.Microsecond
+ case "ms":
+ d = time.Millisecond
+ case "s":
+ d = time.Second
+ default:
+ d = time.Nanosecond
+ }
+ return d
+}
+
+func init() {
+ inputs.Add("influxdb_v2_listener", func() telegraf.Input {
+ return &InfluxDBV2Listener{
+ ServiceAddress: ":9999",
+ timeFunc: time.Now,
+ }
+ })
+}
diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go
new file mode 100644
index 0000000000000..e1e2c7090b359
--- /dev/null
+++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go
@@ -0,0 +1,108 @@
+package influxdb_v2_listener
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/selfstat"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+// newListener is the minimal InfluxDBV2Listener construction to serve writes.
+func newListener() *InfluxDBV2Listener {
+ listener := &InfluxDBV2Listener{
+ timeFunc: time.Now,
+ acc: &testutil.NopAccumulator{},
+ bytesRecv: selfstat.Register("influxdb_v2_listener", "bytes_received", map[string]string{}),
+ writesServed: selfstat.Register("influxdb_v2_listener", "writes_served", map[string]string{}),
+ MaxBodySize: internal.Size{
+ Size: defaultMaxBodySize,
+ },
+ }
+ return listener
+}
+
+func BenchmarkInfluxDBV2Listener_serveWrite(b *testing.B) {
+ res := httptest.NewRecorder()
+ addr := "http://localhost/api/v2/write?bucket=mybucket"
+
+ benchmarks := []struct {
+ name string
+ lines string
+ }{
+ {
+ name: "single line, tag, and field",
+ lines: lines(1, 1, 1),
+ },
+ {
+ name: "single line, 10 tags and fields",
+ lines: lines(1, 10, 10),
+ },
+ {
+ name: "single line, 100 tags and fields",
+ lines: lines(1, 100, 100),
+ },
+ {
+ name: "1k lines, single tag and field",
+ lines: lines(1000, 1, 1),
+ },
+ {
+ name: "1k lines, 10 tags and fields",
+ lines: lines(1000, 10, 10),
+ },
+ {
+ name: "10k lines, 10 tags and fields",
+ lines: lines(10000, 10, 10),
+ },
+ {
+ name: "100k lines, 10 tags and fields",
+ lines: lines(100000, 10, 10),
+ },
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ listener := newListener()
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ req, err := http.NewRequest("POST", addr, strings.NewReader(bm.lines))
+ if err != nil {
+ b.Error(err)
+ }
+ listener.handleWrite()(res, req)
+ if res.Code != http.StatusNoContent {
+ b.Errorf("unexpected status %d", res.Code)
+ }
+ }
+ })
+ }
+}
+
+func lines(lines, numTags, numFields int) string {
+ lp := make([]string, lines)
+ for i := 0; i < lines; i++ {
+ tags := make([]string, numTags)
+ for j := 0; j < numTags; j++ {
+ tags[j] = fmt.Sprintf("t%d=v%d", j, j)
+ }
+
+ fields := make([]string, numFields)
+ for k := 0; k < numFields; k++ {
+ fields[k] = fmt.Sprintf("f%d=%d", k, k)
+ }
+
+ lp[i] = fmt.Sprintf("m%d,%s %s",
+ i,
+ strings.Join(tags, ","),
+ strings.Join(fields, ","),
+ )
+ }
+
+ return strings.Join(lp, "\n")
+}
diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go
new file mode 100644
index 0000000000000..2a80bb4d351e6
--- /dev/null
+++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go
@@ -0,0 +1,539 @@
+package influxdb_v2_listener
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "runtime"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n"
+
+ testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257"
+
+ testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257
+cpu_load_short,host=server03 value=12.0 1422568543702900257
+cpu_load_short,host=server04 value=12.0 1422568543702900257
+cpu_load_short,host=server05 value=12.0 1422568543702900257
+cpu_load_short,host=server06 value=12.0 1422568543702900257
+`
+ testPartial = `cpu,host=a value1=1
+cpu,host=b value1=1,value2=+Inf,value3=3
+cpu,host=c value1=1`
+
+ badMsg = "blahblahblah: 42\n"
+
+ emptyMsg = ""
+
+ token = "test-token-please-ignore"
+)
+
+var (
+ pki = testutil.NewPKI("../../../testutil/pki")
+)
+
+func newTestListener() *InfluxDBV2Listener {
+ listener := &InfluxDBV2Listener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ timeFunc: time.Now,
+ }
+ return listener
+}
+
+func newTestAuthListener() *InfluxDBV2Listener {
+ listener := newTestListener()
+ listener.Token = token
+ return listener
+}
+
+func newTestSecureListener() *InfluxDBV2Listener {
+ listener := &InfluxDBV2Listener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ ServerConfig: *pki.TLSServerConfig(),
+ timeFunc: time.Now,
+ }
+
+ return listener
+}
+
+func getSecureClient() *http.Client {
+ tlsConfig, err := pki.TLSClientConfig().TLSConfig()
+ if err != nil {
+ panic(err)
+ }
+ return &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConfig,
+ },
+ }
+}
+
+func createURL(listener *InfluxDBV2Listener, scheme string, path string, rawquery string) string {
+ u := url.URL{
+ Scheme: scheme,
+ Host: "localhost:" + strconv.Itoa(listener.port),
+ Path: path,
+ RawQuery: rawquery,
+ }
+ return u.String()
+}
+
+func TestWriteSecureNoClientAuth(t *testing.T) {
+ listener := newTestSecureListener()
+ listener.TLSAllowedCACerts = nil
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ cas := x509.NewCertPool()
+ cas.AppendCertsFromPEM([]byte(pki.ReadServerCert()))
+ noClientAuthClient := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ RootCAs: cas,
+ },
+ },
+ }
+
+ // post single message to listener
+ resp, err := noClientAuthClient.Post(createURL(listener, "https", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestWriteSecureWithClientAuth(t *testing.T) {
+ listener := newTestSecureListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := getSecureClient().Post(createURL(listener, "https", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestWriteTokenAuth(t *testing.T) {
+ listener := newTestAuthListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ client := &http.Client{}
+
+ req, err := http.NewRequest("POST", createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ req.Header.Set("Authorization", fmt.Sprintf("Token %s", token))
+ resp, err := client.Do(req)
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, http.StatusNoContent, resp.StatusCode)
+}
+
+func TestWriteKeepBucket(t *testing.T) {
+ testMsgWithDB := "cpu_load_short,host=server01,bucketTag=wrongbucket value=12.0 1422568543702900257\n"
+
+ listener := newTestListener()
+ listener.BucketTag = "bucketTag"
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": "server01", "bucketTag": "mybucket"},
+ )
+
+ // post single message to listener with a database tag in it already. It should be clobbered.
+ resp, err = http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgWithDB)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": "server01", "bucketTag": "mybucket"},
+ )
+
+ // post multiple message to listener
+ resp, err = http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgs)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(2)
+ hostTags := []string{"server02", "server03",
+ "server04", "server05", "server06"}
+ for _, hostTag := range hostTags {
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": hostTag, "bucketTag": "mybucket"},
+ )
+ }
+}
+
+// http listener should add a newline at the end of the buffer if it's not there
+func TestWriteNoNewline(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": "server01"},
+ )
+}
+
+func TestAllOrNothing(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testPartial)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 400, resp.StatusCode)
+}
+
+func TestWriteMaxLineSizeIncrease(t *testing.T) {
+ listener := &InfluxDBV2Listener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ timeFunc: time.Now,
+ }
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // Post a gigantic metric to the listener and verify that it writes OK this time:
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestWriteVerySmallMaxBody(t *testing.T) {
+ listener := &InfluxDBV2Listener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ MaxBodySize: internal.Size{Size: 4096},
+ timeFunc: time.Now,
+ }
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 413, resp.StatusCode)
+}
+
+func TestWriteLargeLine(t *testing.T) {
+ listener := &InfluxDBV2Listener{
+ Log: testutil.Logger{},
+ ServiceAddress: "localhost:0",
+ timeFunc: func() time.Time {
+ return time.Unix(123456789, 0)
+ },
+ }
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ //todo: with the new parser, long lines aren't a problem. Do we need to skip them?
+ //require.EqualValues(t, 400, resp.StatusCode)
+
+ expected := testutil.MustMetric(
+ "super_long_metric",
+ map[string]string{"foo": "bar"},
+ map[string]interface{}{
+ "clients": 42,
+ "connected_followers": 43,
+ "evicted_keys": 44,
+ "expired_keys": 45,
+ "instantaneous_ops_per_sec": 46,
+ "keyspace_hitrate": 47.0,
+ "keyspace_hits": 48,
+ "keyspace_misses": 49,
+ "latest_fork_usec": 50,
+ "master_repl_offset": 51,
+ "mem_fragmentation_ratio": 52.58,
+ "pubsub_channels": 53,
+ "pubsub_patterns": 54,
+ "rdb_changes_since_last_save": 55,
+ "repl_backlog_active": 56,
+ "repl_backlog_histlen": 57,
+ "repl_backlog_size": 58,
+ "sync_full": 59,
+ "sync_partial_err": 60,
+ "sync_partial_ok": 61,
+ "total_commands_processed": 62,
+ "total_connections_received": 63,
+ "uptime": 64,
+ "used_cpu_sys": 65.07,
+ "used_cpu_sys_children": 66.0,
+ "used_cpu_user": 67.1,
+ "used_cpu_user_children": 68.0,
+ "used_memory": 692048,
+ "used_memory_lua": 70792,
+ "used_memory_peak": 711128,
+ "used_memory_rss": 7298144,
+ },
+ time.Unix(123456789, 0),
+ )
+
+ m, ok := acc.Get("super_long_metric")
+ require.True(t, ok)
+ testutil.RequireMetricEqual(t, expected, testutil.FromTestMetric(m))
+
+ hostTags := []string{"server02", "server03",
+ "server04", "server05", "server06"}
+ acc.Wait(len(hostTags))
+ for _, hostTag := range hostTags {
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": hostTag},
+ )
+ }
+}
+
+// test that writing gzipped data works
+func TestWriteGzippedData(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ data, err := ioutil.ReadFile("./testdata/testmsgs.gz")
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("POST", createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), bytes.NewBuffer(data))
+ require.NoError(t, err)
+ req.Header.Set("Content-Encoding", "gzip")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ require.NoError(t, err)
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ hostTags := []string{"server02", "server03",
+ "server04", "server05", "server06"}
+ acc.Wait(len(hostTags))
+ for _, hostTag := range hostTags {
+ acc.AssertContainsTaggedFields(t, "cpu_load_short",
+ map[string]interface{}{"value": float64(12)},
+ map[string]string{"host": hostTag},
+ )
+ }
+}
+
+// writes 25,000 metrics to the listener with 10 different writers
+func TestWriteHighTraffic(t *testing.T) {
+ if runtime.GOOS == "darwin" {
+ t.Skip("Skipping due to hang on darwin")
+ }
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post many messages to listener
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func(innerwg *sync.WaitGroup) {
+ defer innerwg.Done()
+ for i := 0; i < 500; i++ {
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgs)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+ }
+ }(&wg)
+ }
+
+ wg.Wait()
+ listener.Gather(acc)
+
+ acc.Wait(25000)
+ require.Equal(t, int64(25000), int64(acc.NMetrics()))
+}
+
+func TestReceive404ForInvalidEndpoint(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 404, resp.StatusCode)
+}
+
+func TestWriteInvalid(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(badMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 400, resp.StatusCode)
+}
+
+func TestWriteEmpty(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post single message to listener
+ resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(emptyMsg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+}
+
+func TestReady(t *testing.T) {
+ listener := newTestListener()
+ listener.timeFunc = func() time.Time {
+ return time.Unix(42, 123456789)
+ }
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ // post ping to listener
+ resp, err := http.Get(createURL(listener, "http", "/api/v2/ready", ""))
+ require.NoError(t, err)
+ require.Equal(t, "application/json", resp.Header["Content-Type"][0])
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Contains(t, string(bodyBytes), "\"status\":\"ready\"")
+ resp.Body.Close()
+ require.EqualValues(t, 200, resp.StatusCode)
+}
+
+func TestWriteWithPrecision(t *testing.T) {
+ listener := newTestListener()
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ msg := "xyzzy value=42 1422568543\n"
+ resp, err := http.Post(
+ createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ require.Equal(t, 1, len(acc.Metrics))
+ // When timestamp is provided, the precision parameter is
+ // overloaded to specify the timestamp's unit
+ require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time)
+}
+
+func TestWriteWithPrecisionNoTimestamp(t *testing.T) {
+ listener := newTestListener()
+ listener.timeFunc = func() time.Time {
+ return time.Unix(42, 123456789)
+ }
+
+ acc := &testutil.Accumulator{}
+ require.NoError(t, listener.Init())
+ require.NoError(t, listener.Start(acc))
+ defer listener.Stop()
+
+ msg := "xyzzy value=42\n"
+ resp, err := http.Post(
+ createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg)))
+ require.NoError(t, err)
+ resp.Body.Close()
+ require.EqualValues(t, 204, resp.StatusCode)
+
+ acc.Wait(1)
+ require.Equal(t, 1, len(acc.Metrics))
+ // When timestamp is omitted, the precision parameter actually
+ // specifies the precision. The timestamp is set to the greatest
+ // integer unit less than the provided timestamp (floor).
+ require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time)
+}
+
+// The term 'master_repl' used here is archaic language from redis
+const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_followers=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=42i,connected_followers=43i,evicted_keys=44i,expired_keys=45i,instantaneous_ops_per_sec=46i,keyspace_hitrate=47,keyspace_hits=48i,keyspace_misses=49i,latest_fork_usec=50i,master_repl_offset=51i,mem_fragmentation_ratio=52.58,pubsub_channels=53i,pubsub_patterns=54i,rdb_changes_since_last_save=55i,repl_backlog_active=56i,repl_backlog_histlen=57i,repl_backlog_size=58i,sync_full=59i,sync_partial_err=60i,sync_partial_ok=61i,total_commands_processed=62i,total_connections_received=63i,uptime=64i,used_cpu_sys=65.07,used_cpu_sys_children=66,used_cpu_user=67.1,used_cpu_user_children=68,used_memory=692048i,used_memory_lua=70792i,used_memory_peak=711128i,used_memory_rss=7298144i
+`
diff --git a/plugins/inputs/influxdb_v2_listener/testdata/testmsgs.gz b/plugins/inputs/influxdb_v2_listener/testdata/testmsgs.gz
new file mode 100644
index 0000000000000..f524dc07128b9
Binary files /dev/null and b/plugins/inputs/influxdb_v2_listener/testdata/testmsgs.gz differ
diff --git a/plugins/inputs/internal/README.md b/plugins/inputs/internal/README.md
index 73f0b018ef067..35e14c77d0fbb 100644
--- a/plugins/inputs/internal/README.md
+++ b/plugins/inputs/internal/README.md
@@ -1,4 +1,4 @@
-# Internal Input Plugin
+# Telegraf Internal Input Plugin
The `internal` plugin collects metrics about the telegraf agent itself.
@@ -42,14 +42,16 @@ agent stats collect aggregate stats on all telegraf plugins.
- metrics_written
internal_gather stats collect aggregate stats on all input plugins
-that are of the same input type. They are tagged with `input=`.
+that are of the same input type. They are tagged with `input=`
+`version=` and `go_version=`.
- internal_gather
- gather_time_ns
- metrics_gathered
internal_write stats collect aggregate stats on all output plugins
-that are of the same input type. They are tagged with `output=`.
+that are of the same input type. They are tagged with `output=`
+and `version=`.
- internal_write
@@ -63,7 +65,7 @@ that are of the same input type. They are tagged with `output=`.
internal_ are metrics which are defined on a per-plugin basis, and
usually contain tags which differentiate each instance of a particular type of
-plugin.
+plugin and `version=`.
- internal_
- individual plugin-specific fields, such as requests counts.
@@ -71,15 +73,16 @@ plugin.
### Tags:
All measurements for specific plugins are tagged with information relevant
-to each particular plugin.
+to each particular plugin and with `version=`.
+
### Example Output:
```
internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000
-internal_agent,host=tyrion metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000
-internal_write,output=file,host=tyrion buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000
-internal_gather,input=internal,host=tyrion metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000
-internal_gather,input=http_listener,host=tyrion metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000
-internal_http_listener,address=:8186,host=tyrion queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000
+internal_agent,host=tyrion,go_version=1.12.7,version=1.99.0 metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000
+internal_write,output=file,host=tyrion,version=1.99.0 buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000
+internal_gather,input=internal,host=tyrion,version=1.99.0 metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000
+internal_gather,input=http_listener,host=tyrion,version=1.99.0 metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000
+internal_http_listener,address=:8186,host=tyrion,version=1.99.0 queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000
```
diff --git a/plugins/inputs/internal/internal.go b/plugins/inputs/internal/internal.go
index 8b5286f5637f5..2eb8b91c9e39d 100644
--- a/plugins/inputs/internal/internal.go
+++ b/plugins/inputs/internal/internal.go
@@ -2,8 +2,10 @@ package internal
import (
"runtime"
+ "strings"
"github.com/influxdata/telegraf"
+ inter "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/selfstat"
)
@@ -54,7 +56,14 @@ func (s *Self) Gather(acc telegraf.Accumulator) error {
acc.AddFields("internal_memstats", fields, map[string]string{})
}
+ telegrafVersion := inter.Version()
+ goVersion := strings.TrimPrefix(runtime.Version(), "go")
+
for _, m := range selfstat.Metrics() {
+ if m.Name() == "internal_agent" {
+ m.AddTag("go_version", goVersion)
+ }
+ m.AddTag("version", telegrafVersion)
acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
diff --git a/plugins/inputs/internal/internal_test.go b/plugins/inputs/internal/internal_test.go
index b17c530384c54..4cdba9099edf0 100644
--- a/plugins/inputs/internal/internal_test.go
+++ b/plugins/inputs/internal/internal_test.go
@@ -26,7 +26,8 @@ func TestSelfPlugin(t *testing.T) {
"test": int64(3),
},
map[string]string{
- "test": "foo",
+ "test": "foo",
+ "version": "",
},
)
acc.ClearMetrics()
@@ -39,7 +40,8 @@ func TestSelfPlugin(t *testing.T) {
"test": int64(101),
},
map[string]string{
- "test": "foo",
+ "test": "foo",
+ "version": "",
},
)
acc.ClearMetrics()
@@ -56,7 +58,8 @@ func TestSelfPlugin(t *testing.T) {
"test_ns": int64(150),
},
map[string]string{
- "test": "foo",
+ "test": "foo",
+ "version": "",
},
)
}
diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go
index 5b0ca374cb907..39b3020ddbd39 100644
--- a/plugins/inputs/interrupts/interrupts.go
+++ b/plugins/inputs/interrupts/interrupts.go
@@ -102,7 +102,7 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) {
tags := map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device}
fields := map[string]interface{}{"total": irq.Total}
for i := 0; i < len(irq.Cpus); i++ {
- cpu := fmt.Sprintf("cpu%d", i)
+ cpu := fmt.Sprintf("CPU%d", i)
fields[cpu] = irq.Cpus[i]
}
return tags, fields
diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go
index 2579d926d20c6..63ff765b678dd 100644
--- a/plugins/inputs/interrupts/interrupts_test.go
+++ b/plugins/inputs/interrupts/interrupts_test.go
@@ -23,7 +23,7 @@ func expectCpuAsFields(m *testutil.Accumulator, t *testing.T, measurement string
fields := map[string]interface{}{}
total := int64(0)
for idx, count := range irq.Cpus {
- fields[fmt.Sprintf("cpu%d", idx)] = count
+ fields[fmt.Sprintf("CPU%d", idx)] = count
total += count
}
fields["total"] = total
diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md
index fb2e8f26e0c07..0f9faa97f1f3d 100644
--- a/plugins/inputs/ipmi_sensor/README.md
+++ b/plugins/inputs/ipmi_sensor/README.md
@@ -1,7 +1,7 @@
# IPMI Sensor Input Plugin
Get bare metal metrics using the command line utility
-[`ipmitool`](https://sourceforge.net/projects/ipmitool/files/ipmitool/).
+[`ipmitool`](https://github.com/ipmitool/ipmitool).
If no servers are specified, the plugin will query the local machine sensor stats via the following command:
@@ -27,6 +27,11 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr
## optionally specify the path to the ipmitool executable
# path = "/usr/bin/ipmitool"
##
+ ## Setting 'use_sudo' to true will make use of sudo to run ipmitool.
+ ## Sudo must be configured to allow the telegraf user to run ipmitool
+ ## without a password.
+ # use_sudo = false
+ ##
## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
# privilege = "ADMINISTRATOR"
##
@@ -39,7 +44,7 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr
##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
- ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
+ ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
## gaps or overlap in pulled data
interval = "30s"
@@ -86,6 +91,21 @@ ipmi device node. When using udev you can create the device node giving
```
KERNEL=="ipmi*", MODE="660", GROUP="telegraf"
```
+Alternatively, it is possible to use sudo. You will need the following in your telegraf config:
+```toml
+[[inputs.ipmi_sensor]]
+ use_sudo = true
+```
+
+You will also need to update your sudoers file:
+
+```bash
+$ visudo
+# Add the following line:
+Cmnd_Alias IPMITOOL = /usr/bin/ipmitool *
+telegraf ALL=(root) NOPASSWD: IPMITOOL
+Defaults!IPMITOOL !logfile, !syslog, !pam_session
+```
### Example Output
diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go
index e4832cc6516a6..fb53e1bc746fe 100644
--- a/plugins/inputs/ipmi_sensor/ipmi.go
+++ b/plugins/inputs/ipmi_sensor/ipmi.go
@@ -21,7 +21,7 @@ var (
execCommand = exec.Command // execCommand is used to mock commands in tests.
re_v1_parse_line = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`)
re_v2_parse_line = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`)
- re_v2_parse_description = regexp.MustCompile(`^(?P[0-9.]+)\s(?P.*)|(?P.+)|^$`)
+ re_v2_parse_description = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`)
re_v2_parse_unit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`)
)
@@ -32,12 +32,18 @@ type Ipmi struct {
Servers []string
Timeout internal.Duration
MetricVersion int
+ UseSudo bool
}
var sampleConfig = `
## optionally specify the path to the ipmitool executable
# path = "/usr/bin/ipmitool"
##
+ ## Setting 'use_sudo' to true will make use of sudo to run ipmitool.
+ ## Sudo must be configured to allow the telegraf user to run ipmitool
+ ## without a password.
+ # use_sudo = false
+ ##
## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
# privilege = "ADMINISTRATOR"
##
@@ -112,7 +118,13 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
if m.MetricVersion == 2 {
opts = append(opts, "elist")
}
- cmd := execCommand(m.Path, opts...)
+ name := m.Path
+ if m.UseSudo {
+ // -n - avoid prompting the user for input of any kind
+ opts = append([]string{"-n", name}, opts...)
+ name = "sudo"
+ }
+ cmd := execCommand(name, opts...)
out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration)
timestamp := time.Now()
if err != nil {
@@ -150,9 +162,19 @@ func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_
fields["status"] = 0
}
- if strings.Index(ipmiFields["description"], " ") > 0 {
+ description := ipmiFields["description"]
+
+ // handle hex description field
+ if strings.HasPrefix(description, "0x") {
+ descriptionInt, err := strconv.ParseInt(description, 0, 64)
+ if err != nil {
+ continue
+ }
+
+ fields["value"] = float64(descriptionInt)
+ } else if strings.Index(description, " ") > 0 {
// split middle column into value and unit
- valunit := strings.SplitN(ipmiFields["description"], " ", 2)
+ valunit := strings.SplitN(description, " ", 2)
var err error
fields["value"], err = aToFloat(valunit[0])
if err != nil {
diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go
index a66cabfeb9bad..bd5e02c196e76 100644
--- a/plugins/inputs/ipmi_sensor/ipmi_test.go
+++ b/plugins/inputs/ipmi_sensor/ipmi_test.go
@@ -7,6 +7,7 @@ import (
"testing"
"time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
@@ -610,3 +611,140 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected
extractFieldsFromRegex(re_v2_parse_line, tests[i])
}
}
+
+func Test_parseV1(t *testing.T) {
+ type args struct {
+ hostname string
+ cmdOut []byte
+ measuredAt time.Time
+ }
+ tests := []struct {
+ name string
+ args args
+ wantFields map[string]interface{}
+ wantErr bool
+ }{
+ {
+ name: "Test correct V1 parsing with hex code",
+ args: args{
+ hostname: "host",
+ measuredAt: time.Now(),
+ cmdOut: []byte("PS1 Status | 0x02 | ok"),
+ },
+ wantFields: map[string]interface{}{"value": float64(2), "status": 1},
+ wantErr: false,
+ },
+ {
+ name: "Test correct V1 parsing with value with unit",
+ args: args{
+ hostname: "host",
+ measuredAt: time.Now(),
+ cmdOut: []byte("Avg Power | 210 Watts | ok"),
+ },
+ wantFields: map[string]interface{}{"value": float64(210), "status": 1},
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var acc testutil.Accumulator
+
+ if err := parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr {
+ t.Errorf("parseV1() error = %v, wantErr %v", err, tt.wantErr)
+ }
+
+ acc.AssertContainsFields(t, "ipmi_sensor", tt.wantFields)
+ })
+ }
+}
+
+func Test_parseV2(t *testing.T) {
+ type args struct {
+ hostname string
+ cmdOut []byte
+ measuredAt time.Time
+ }
+ tests := []struct {
+ name string
+ args args
+ expected []telegraf.Metric
+ wantErr bool
+ }{
+ {
+ name: "Test correct V2 parsing with analog value with unit",
+ args: args{
+ hostname: "host",
+ cmdOut: []byte("Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected"),
+ measuredAt: time.Now(),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("ipmi_sensor",
+ map[string]string{
+ "name": "power_supply_1",
+ "status_code": "ok",
+ "server": "host",
+ "entity_id": "10.1",
+ "unit": "watts",
+ "status_desc": "presence_detected",
+ },
+ map[string]interface{}{"value": 110.0},
+ time.Unix(0, 0),
+ ),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Test correct V2 parsing without analog value",
+ args: args{
+ hostname: "host",
+ cmdOut: []byte("Intrusion | 73h | ok | 7.1 |"),
+ measuredAt: time.Now(),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("ipmi_sensor",
+ map[string]string{
+ "name": "intrusion",
+ "status_code": "ok",
+ "server": "host",
+ "entity_id": "7.1",
+ "status_desc": "ok",
+ },
+ map[string]interface{}{"value": 0.0},
+ time.Unix(0, 0),
+ ),
+ },
+ wantErr: false,
+ },
+ {
+ name: "parse negative value",
+ args: args{
+ hostname: "host",
+ cmdOut: []byte("DIMM Thrm Mrgn 1 | B0h | ok | 8.1 | -55 degrees C"),
+ measuredAt: time.Now(),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("ipmi_sensor",
+ map[string]string{
+ "name": "dimm_thrm_mrgn_1",
+ "status_code": "ok",
+ "server": "host",
+ "entity_id": "8.1",
+ "unit": "degrees_c",
+ },
+ map[string]interface{}{"value": -55.0},
+ time.Unix(0, 0),
+ ),
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var acc testutil.Accumulator
+ if err := parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr {
+ t.Errorf("parseV2() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
+ })
+ }
+}
diff --git a/plugins/inputs/ipset/README.md b/plugins/inputs/ipset/README.md
index ae66ccfc07c38..f4477254f117d 100644
--- a/plugins/inputs/ipset/README.md
+++ b/plugins/inputs/ipset/README.md
@@ -1,4 +1,4 @@
-# Ipset Plugin
+# Ipset Input Plugin
The ipset plugin gathers packets and bytes counters from Linux ipset.
It uses the output of the command "ipset save".
diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md
index 6b56febba0300..db730c88178ff 100644
--- a/plugins/inputs/iptables/README.md
+++ b/plugins/inputs/iptables/README.md
@@ -1,4 +1,4 @@
-# Iptables Plugin
+# Iptables Input Plugin
The iptables plugin gathers packets and bytes counters for rules within a set of table and chain from the Linux's iptables firewall.
diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go
index 21f6642a939ac..e56f8b31d5725 100644
--- a/plugins/inputs/iptables/iptables.go
+++ b/plugins/inputs/iptables/iptables.go
@@ -37,7 +37,7 @@ func (ipt *Iptables) SampleConfig() string {
## iptables can be restricted to only list command "iptables -nvL".
use_sudo = false
## Setting 'use_lock' to true runs iptables with the "-w" option.
- ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
+ ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl")
use_lock = false
## Define an alternate executable, such as "ip6tables". Default is "iptables".
# binary = "ip6tables"
@@ -89,11 +89,10 @@ func (ipt *Iptables) chainList(table, chain string) (string, error) {
name = "sudo"
args = append(args, iptablePath)
}
- iptablesBaseArgs := "-nvL"
if ipt.UseLock {
- iptablesBaseArgs = "-wnvL"
+ args = append(args, "-w", "5")
}
- args = append(args, iptablesBaseArgs, chain, "-t", table, "-x")
+ args = append(args, "-nvL", chain, "-t", table, "-x")
c := exec.Command(name, args...)
out, err := c.Output()
return string(out), err
@@ -103,8 +102,8 @@ const measurement = "iptables"
var errParse = errors.New("Cannot parse iptables list information")
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
-var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
-var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+.*?/\*\s*(.+?)\s*\*/\s*`)
+var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+target`)
+var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+(\w+).*?/\*\s*(.+?)\s*\*/\s*`)
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
lines := strings.Split(data, "\n")
@@ -120,15 +119,16 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error
}
for _, line := range lines[2:] {
matches := valuesRe.FindStringSubmatch(line)
- if len(matches) != 4 {
+ if len(matches) != 5 {
continue
}
pkts := matches[1]
bytes := matches[2]
- comment := matches[3]
+ target := matches[3]
+ comment := matches[4]
- tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment}
+ tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "target": target, "ruleid": comment}
fields := make(map[string]interface{})
var err error
diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go
index cca41e1f425da..681d8bbfc130e 100644
--- a/plugins/inputs/iptables/iptables_test.go
+++ b/plugins/inputs/iptables/iptables_test.go
@@ -42,7 +42,7 @@ func TestIptables_Gather(t *testing.T) {
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
`},
- tags: []map[string]string{{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}},
+ tags: []map[string]string{{"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foobar"}},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
},
@@ -98,9 +98,9 @@ func TestIptables_Gather(t *testing.T) {
`,
},
tags: []map[string]string{
- {"table": "filter", "chain": "INPUT", "ruleid": "foo"},
- {"table": "filter", "chain": "FORWARD", "ruleid": "bar"},
- {"table": "filter", "chain": "FORWARD", "ruleid": "foobar"},
+ {"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foo"},
+ {"table": "filter", "chain": "FORWARD", "target": "RETURN", "ruleid": "bar"},
+ {"table": "filter", "chain": "FORWARD", "target": "RETURN", "ruleid": "foobar"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}},
@@ -118,7 +118,7 @@ func TestIptables_Gather(t *testing.T) {
100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80
`},
tags: []map[string]string{
- {"table": "filter", "chain": "INPUT", "ruleid": "foobar"},
+ {"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foobar"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
@@ -134,8 +134,8 @@ func TestIptables_Gather(t *testing.T) {
0 0 CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4
`},
tags: []map[string]string{
- {"table": "mangle", "chain": "SHAPER", "ruleid": "test"},
- {"table": "mangle", "chain": "SHAPER", "ruleid": "test2"},
+ {"table": "mangle", "chain": "SHAPER", "target": "ACCEPT", "ruleid": "test"},
+ {"table": "mangle", "chain": "SHAPER", "target": "CLASSIFY", "ruleid": "test2"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}},
@@ -163,7 +163,7 @@ func TestIptables_Gather(t *testing.T) {
123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */
`},
tags: []map[string]string{
- {"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"},
+ {"table": "all_recv", "chain": "accountfwd", "target": "all", "ruleid": "all_recv"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}},
diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go
index 2d3ad02787e4a..5e3ae0d5637b0 100644
--- a/plugins/inputs/ipvs/ipvs.go
+++ b/plugins/inputs/ipvs/ipvs.go
@@ -3,21 +3,21 @@
package ipvs
import (
- "errors"
"fmt"
- "log"
"math/bits"
"strconv"
"syscall"
"github.com/docker/libnetwork/ipvs"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/common/logrus"
"github.com/influxdata/telegraf/plugins/inputs"
)
// IPVS holds the state for this input plugin
type IPVS struct {
handle *ipvs.Handle
+ Log telegraf.Logger
}
// Description returns a description string
@@ -35,7 +35,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error {
if i.handle == nil {
h, err := ipvs.New("") // TODO: make the namespace configurable
if err != nil {
- return errors.New("Unable to open IPVS handle")
+ return fmt.Errorf("unable to open IPVS handle: %v", err)
}
i.handle = h
}
@@ -44,7 +44,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error {
if err != nil {
i.handle.Close()
i.handle = nil // trigger a reopen on next call to gather
- return errors.New("Failed to list IPVS services")
+ return fmt.Errorf("failed to list IPVS services: %v", err)
}
for _, s := range services {
fields := map[string]interface{}{
@@ -61,7 +61,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error {
destinations, err := i.handle.GetDestinations(s)
if err != nil {
- log.Println("E! Failed to list destinations for a virtual server")
+ i.Log.Errorf("Failed to list destinations for a virtual server: %v", err)
continue // move on to the next virtual server
}
@@ -148,5 +148,8 @@ func addressFamilyToString(af uint16) string {
}
func init() {
- inputs.Add("ipvs", func() telegraf.Input { return &IPVS{} })
+ inputs.Add("ipvs", func() telegraf.Input {
+ logrus.InstallHook()
+ return &IPVS{}
+ })
}
diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md
index 8d375f087db50..f4e9f94ac22a7 100644
--- a/plugins/inputs/jenkins/README.md
+++ b/plugins/inputs/jenkins/README.md
@@ -1,4 +1,4 @@
-# Jenkins Plugin
+# Jenkins Input Plugin
The jenkins plugin gathers information about the nodes and jobs running in a jenkins instance.
@@ -7,7 +7,8 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API
### Configuration:
```toml
- ## The Jenkins URL
+[[inputs.jenkins]]
+ ## The Jenkins URL in the format "schema://host:port"
url = "http://my-jenkins-instance:8080"
# username = "admin"
# password = "admin"
@@ -52,28 +53,41 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API
### Metrics:
- jenkins_node
+ - tags:
+ - source
+ - port
+ - fields:
+ - busy_executors
+ - total_executors
+
++ jenkins_node
- tags:
- arch
- disk_path
- temp_path
- node_name
- status ("online", "offline")
+ - source
+ - port
- fields:
- - disk_available
- - temp_available
- - memory_available
- - memory_total
- - swap_available
- - swap_total
- - response_time
+ - disk_available (Bytes)
+ - temp_available (Bytes)
+ - memory_available (Bytes)
+ - memory_total (Bytes)
+ - swap_available (Bytes)
+ - swap_total (Bytes)
+ - response_time (ms)
+ - num_executors
- jenkins_job
- tags:
- name
- parents
- result
+ - source
+ - port
- fields:
- - duration
+ - duration (ms)
- result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED)
### Sample Queries:
@@ -90,7 +104,9 @@ SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now()
```
$ ./telegraf --config telegraf.conf --input-filter jenkins --test
-jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744 1516031535000000000
-jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS duration=2831i,result_code=0i 1516026630000000000
-jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS duration=2285i,result_code=0i 1516027230000000000
+jenkins,host=myhost,port=80,source=my-jenkins-instance busy_executors=4i,total_executors=8i 1580418261000000000
+jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master,source=my-jenkins-instance,port=8080 swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744,num_executors=2i 1516031535000000000
+jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2831i,result_code=0i 1516026630000000000
+jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2285i,result_code=0i 1516027230000000000
```
+
diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go
index cfa0a38e4c104..f4882b1dc0bf2 100644
--- a/plugins/inputs/jenkins/jenkins.go
+++ b/plugins/inputs/jenkins/jenkins.go
@@ -2,10 +2,9 @@ package jenkins
import (
"context"
- "errors"
"fmt"
- "log"
"net/http"
+ "net/url"
"strconv"
"strings"
"sync"
@@ -14,7 +13,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -23,12 +22,16 @@ type Jenkins struct {
URL string
Username string
Password string
+ Source string
+ Port string
// HTTP Timeout specified as a string - 3s, 1m, 1h
ResponseTimeout internal.Duration
tls.ClientConfig
client *client
+ Log telegraf.Logger
+
MaxConnections int `toml:"max_connections"`
MaxBuildAge internal.Duration `toml:"max_build_age"`
MaxSubJobDepth int `toml:"max_subjob_depth"`
@@ -43,7 +46,7 @@ type Jenkins struct {
}
const sampleConfig = `
- ## The Jenkins URL
+ ## The Jenkins URL in the format "schema://host:port"
url = "http://my-jenkins-instance:8080"
# username = "admin"
# password = "admin"
@@ -70,7 +73,7 @@ const sampleConfig = `
## Optional Sub Job Per Layer
## In workflow-multibranch-plugin, each branch will be created as a sub job.
- ## This config will limit to call only the lasted branches in each layer,
+ ## This config will limit to call only the lasted branches in each layer,
## empty will use default value 10
# max_subjob_per_layer = 10
@@ -87,8 +90,9 @@ const sampleConfig = `
// measurement
const (
- measurementNode = "jenkins_node"
- measurementJob = "jenkins_job"
+ measurementJenkins = "jenkins"
+ measurementNode = "jenkins_node"
+ measurementJob = "jenkins_job"
)
// SampleConfig implements telegraf.Input interface
@@ -133,10 +137,26 @@ func (j *Jenkins) newHTTPClient() (*http.Client, error) {
}, nil
}
-// seperate the client as dependency to use httptest Client for mocking
+// separate the client as dependency to use httptest Client for mocking
func (j *Jenkins) initialize(client *http.Client) error {
var err error
+ // init jenkins tags
+ u, err := url.Parse(j.URL)
+ if err != nil {
+ return err
+ }
+ if u.Port() == "" {
+ if u.Scheme == "http" {
+ j.Port = "80"
+ } else if u.Scheme == "https" {
+ j.Port = "443"
+ }
+ } else {
+ j.Port = u.Port()
+ }
+ j.Source = u.Hostname()
+
// init job filter
j.jobFilter, err = filter.Compile(j.JobExclude)
if err != nil {
@@ -179,27 +199,39 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error {
return nil
}
- tags["arch"] = n.MonitorData.HudsonNodeMonitorsArchitectureMonitor
+ monitorData := n.MonitorData
+
+ if monitorData.HudsonNodeMonitorsArchitectureMonitor != "" {
+ tags["arch"] = monitorData.HudsonNodeMonitorsArchitectureMonitor
+ }
tags["status"] = "online"
if n.Offline {
tags["status"] = "offline"
}
- monitorData := n.MonitorData
- if monitorData.HudsonNodeMonitorsArchitectureMonitor == "" {
- return errors.New("empty monitor data, please check your permission")
+
+ tags["source"] = j.Source
+ tags["port"] = j.Port
+
+ fields := make(map[string]interface{})
+ fields["num_executors"] = n.NumExecutors
+
+ if monitorData.HudsonNodeMonitorsResponseTimeMonitor != nil {
+ fields["response_time"] = monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average
+ }
+ if monitorData.HudsonNodeMonitorsDiskSpaceMonitor != nil {
+ tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path
+ fields["disk_available"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size
}
- tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path
- tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path
-
- fields := map[string]interface{}{
- "response_time": monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average,
- "disk_available": monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size,
- "temp_available": monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size,
- "swap_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable,
- "memory_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable,
- "swap_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal,
- "memory_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal,
+ if monitorData.HudsonNodeMonitorsTemporarySpaceMonitor != nil {
+ tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path
+ fields["temp_available"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size
+ }
+ if monitorData.HudsonNodeMonitorsSwapSpaceMonitor != nil {
+ fields["swap_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable
+ fields["memory_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable
+ fields["swap_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal
+ fields["memory_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal
}
acc.AddFields(measurementNode, fields, tags)
@@ -213,6 +245,15 @@ func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) {
acc.AddError(err)
return
}
+
+ // get total and busy executors
+ tags := map[string]string{"source": j.Source, "port": j.Port}
+ fields := make(map[string]interface{})
+ fields["busy_executors"] = nodeResp.BusyExecutors
+ fields["total_executors"] = nodeResp.TotalExecutors
+
+ acc.AddFields(measurementJenkins, fields, tags)
+
// get node data
for _, node := range nodeResp.Computers {
err = j.gatherNodeData(node, acc)
@@ -304,7 +345,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error {
}
if build.Building {
- log.Printf("D! Ignore running build on %s, build %v", jr.name, number)
+ j.Log.Debugf("Ignore running build on %s, build %v", jr.name, number)
return nil
}
@@ -317,33 +358,29 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error {
return nil
}
- gatherJobBuild(jr, build, acc)
+ j.gatherJobBuild(jr, build, acc)
return nil
}
type nodeResponse struct {
- Computers []node `json:"computer"`
+ Computers []node `json:"computer"`
+ BusyExecutors int `json:"busyExecutors"`
+ TotalExecutors int `json:"totalExecutors"`
}
type node struct {
- DisplayName string `json:"displayName"`
- Offline bool `json:"offline"`
- MonitorData monitorData `json:"monitorData"`
+ DisplayName string `json:"displayName"`
+ Offline bool `json:"offline"`
+ NumExecutors int `json:"numExecutors"`
+ MonitorData monitorData `json:"monitorData"`
}
type monitorData struct {
- HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"`
- HudsonNodeMonitorsDiskSpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"`
- HudsonNodeMonitorsResponseTimeMonitor struct {
- Average int64 `json:"average"`
- } `json:"hudson.node_monitors.ResponseTimeMonitor"`
- HudsonNodeMonitorsSwapSpaceMonitor struct {
- SwapAvailable float64 `json:"availableSwapSpace"`
- SwapTotal float64 `json:"totalSwapSpace"`
- MemoryAvailable float64 `json:"availablePhysicalMemory"`
- MemoryTotal float64 `json:"totalPhysicalMemory"`
- } `json:"hudson.node_monitors.SwapSpaceMonitor"`
- HudsonNodeMonitorsTemporarySpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"`
+ HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"`
+ HudsonNodeMonitorsDiskSpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"`
+ HudsonNodeMonitorsResponseTimeMonitor *responseTimeMonitor `json:"hudson.node_monitors.ResponseTimeMonitor"`
+ HudsonNodeMonitorsSwapSpaceMonitor *swapSpaceMonitor `json:"hudson.node_monitors.SwapSpaceMonitor"`
+ HudsonNodeMonitorsTemporarySpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"`
}
type nodeSpaceMonitor struct {
@@ -351,6 +388,17 @@ type nodeSpaceMonitor struct {
Size float64 `json:"size"`
}
+type responseTimeMonitor struct {
+ Average int64 `json:"average"`
+}
+
+type swapSpaceMonitor struct {
+ SwapAvailable float64 `json:"availableSwapSpace"`
+ SwapTotal float64 `json:"totalSwapSpace"`
+ MemoryAvailable float64 `json:"availablePhysicalMemory"`
+ MemoryTotal float64 `json:"totalPhysicalMemory"`
+}
+
type jobResponse struct {
LastBuild jobBuild `json:"lastBuild"`
Jobs []innerJob `json:"jobs"`
@@ -394,12 +442,20 @@ func (jr jobRequest) combined() []string {
return append(jr.parents, jr.name)
}
+func (jr jobRequest) combinedEscaped() []string {
+ jobs := jr.combined()
+ for index, job := range jobs {
+ jobs[index] = url.PathEscape(job)
+ }
+ return jobs
+}
+
func (jr jobRequest) URL() string {
- return "/job/" + strings.Join(jr.combined(), "/job/") + jobPath
+ return "/job/" + strings.Join(jr.combinedEscaped(), "/job/") + jobPath
}
func (jr jobRequest) buildURL(number int64) string {
- return "/job/" + strings.Join(jr.combined(), "/job/") + "/" + strconv.Itoa(int(number)) + jobPath
+ return "/job/" + strings.Join(jr.combinedEscaped(), "/job/") + "/" + strconv.Itoa(int(number)) + jobPath
}
func (jr jobRequest) hierarchyName() string {
@@ -410,8 +466,8 @@ func (jr jobRequest) parentsString() string {
return strings.Join(jr.parents, "/")
}
-func gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.Accumulator) {
- tags := map[string]string{"name": jr.name, "parents": jr.parentsString(), "result": b.Result}
+func (j *Jenkins) gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.Accumulator) {
+ tags := map[string]string{"name": jr.name, "parents": jr.parentsString(), "result": b.Result, "source": j.Source, "port": j.Port}
fields := make(map[string]interface{})
fields["duration"] = b.Duration
fields["result_code"] = mapResultCode(b.Result)
diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go
index 7724fc0e3a139..b8284fc0d3348 100644
--- a/plugins/inputs/jenkins/jenkins_test.go
+++ b/plugins/inputs/jenkins/jenkins_test.go
@@ -1,3 +1,4 @@
+// Test Suite
package jenkins
import (
@@ -15,12 +16,14 @@ import (
func TestJobRequest(t *testing.T) {
tests := []struct {
- input jobRequest
- output string
+ input jobRequest
+ hierarchyName string
+ URL string
}{
{
jobRequest{},
"",
+ "",
},
{
jobRequest{
@@ -28,12 +31,26 @@ func TestJobRequest(t *testing.T) {
parents: []string{"3", "2"},
},
"3/2/1",
+ "/job/3/job/2/job/1/api/json",
+ },
+ {
+ jobRequest{
+ name: "job 3",
+ parents: []string{"job 1", "job 2"},
+ },
+ "job 1/job 2/job 3",
+ "/job/job%201/job/job%202/job/job%203/api/json",
},
}
for _, test := range tests {
- output := test.input.hierarchyName()
- if output != test.output {
- t.Errorf("Expected %s, got %s\n", test.output, output)
+ hierarchyName := test.input.hierarchyName()
+ URL := test.input.URL()
+ if hierarchyName != test.hierarchyName {
+ t.Errorf("Expected %s, got %s\n", test.hierarchyName, hierarchyName)
+ }
+
+ if test.URL != "" && URL != test.URL {
+ t.Errorf("Expected %s, got %s\n", test.URL, URL)
}
}
}
@@ -58,14 +75,14 @@ func TestResultCode(t *testing.T) {
}
type mockHandler struct {
- // responseMap is the path to repsonse interface
- // we will ouput the serialized response in json when serving http
+ // responseMap is the path to response interface
+ // we will output the serialized response in json when serving http
// example '/computer/api/json': *gojenkins.
responseMap map[string]interface{}
}
func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- o, ok := h.responseMap[r.URL.Path]
+ o, ok := h.responseMap[r.URL.RequestURI()]
if !ok {
w.WriteHeader(http.StatusNotFound)
return
@@ -105,9 +122,22 @@ func TestGatherNodeData(t *testing.T) {
},
},
wantErr: true,
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{
+ {
+ Tags: map[string]string{
+ "source": "127.0.0.1",
+ },
+ Fields: map[string]interface{}{
+ "busy_executors": 0,
+ "total_executors": 0,
+ },
+ },
+ },
+ },
},
{
- name: "bad empty monitor data",
+ name: "empty monitor data",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
@@ -119,7 +149,9 @@ func TestGatherNodeData(t *testing.T) {
},
},
},
- wantErr: true,
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{},
+ },
},
{
name: "filtered nodes",
@@ -127,6 +159,8 @@ func TestGatherNodeData(t *testing.T) {
responseMap: map[string]interface{}{
"/api/json": struct{}{},
"/computer/api/json": nodeResponse{
+ BusyExecutors: 4,
+ TotalExecutors: 8,
Computers: []node{
{DisplayName: "ignore-1"},
{DisplayName: "ignore-2"},
@@ -134,38 +168,45 @@ func TestGatherNodeData(t *testing.T) {
},
},
},
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{
+ {
+ Tags: map[string]string{
+ "source": "127.0.0.1",
+ },
+ Fields: map[string]interface{}{
+ "busy_executors": 4,
+ "total_executors": 8,
+ },
+ },
+ },
+ },
},
-
{
name: "normal data collection",
input: mockHandler{
responseMap: map[string]interface{}{
"/api/json": struct{}{},
"/computer/api/json": nodeResponse{
+ BusyExecutors: 4,
+ TotalExecutors: 8,
Computers: []node{
{
DisplayName: "master",
MonitorData: monitorData{
HudsonNodeMonitorsArchitectureMonitor: "linux",
- HudsonNodeMonitorsResponseTimeMonitor: struct {
- Average int64 `json:"average"`
- }{
+ HudsonNodeMonitorsResponseTimeMonitor: &responseTimeMonitor{
Average: 10032,
},
- HudsonNodeMonitorsDiskSpaceMonitor: nodeSpaceMonitor{
+ HudsonNodeMonitorsDiskSpaceMonitor: &nodeSpaceMonitor{
Path: "/path/1",
Size: 123,
},
- HudsonNodeMonitorsTemporarySpaceMonitor: nodeSpaceMonitor{
+ HudsonNodeMonitorsTemporarySpaceMonitor: &nodeSpaceMonitor{
Path: "/path/2",
Size: 245,
},
- HudsonNodeMonitorsSwapSpaceMonitor: struct {
- SwapAvailable float64 `json:"availableSwapSpace"`
- SwapTotal float64 `json:"totalSwapSpace"`
- MemoryAvailable float64 `json:"availablePhysicalMemory"`
- MemoryTotal float64 `json:"totalPhysicalMemory"`
- }{
+ HudsonNodeMonitorsSwapSpaceMonitor: &swapSpaceMonitor{
SwapAvailable: 212,
SwapTotal: 500,
MemoryAvailable: 101,
@@ -180,6 +221,15 @@ func TestGatherNodeData(t *testing.T) {
},
output: &testutil.Accumulator{
Metrics: []*testutil.Metric{
+ {
+ Tags: map[string]string{
+ "source": "127.0.0.1",
+ },
+ Fields: map[string]interface{}{
+ "busy_executors": 4,
+ "total_executors": 8,
+ },
+ },
{
Tags: map[string]string{
"node_name": "master",
@@ -187,6 +237,7 @@ func TestGatherNodeData(t *testing.T) {
"status": "online",
"disk_path": "/path/1",
"temp_path": "/path/2",
+ "source": "127.0.0.1",
},
Fields: map[string]interface{}{
"response_time": int64(10032),
@@ -201,41 +252,88 @@ func TestGatherNodeData(t *testing.T) {
},
},
},
+ {
+ name: "slave is offline",
+ input: mockHandler{
+ responseMap: map[string]interface{}{
+ "/api/json": struct{}{},
+ "/computer/api/json": nodeResponse{
+ BusyExecutors: 4,
+ TotalExecutors: 8,
+ Computers: []node{
+ {
+ DisplayName: "slave",
+ MonitorData: monitorData{},
+ NumExecutors: 1,
+ Offline: true,
+ },
+ },
+ },
+ },
+ },
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{
+ {
+ Tags: map[string]string{
+ "source": "127.0.0.1",
+ },
+ Fields: map[string]interface{}{
+ "busy_executors": 4,
+ "total_executors": 8,
+ },
+ },
+ {
+ Tags: map[string]string{
+ "node_name": "slave",
+ "status": "offline",
+ },
+ Fields: map[string]interface{}{
+ "num_executors": 1,
+ },
+ },
+ },
+ },
+ },
}
for _, test := range tests {
- ts := httptest.NewServer(test.input)
- defer ts.Close()
- j := &Jenkins{
- URL: ts.URL,
- ResponseTimeout: internal.Duration{Duration: time.Microsecond},
- NodeExclude: []string{"ignore-1", "ignore-2"},
- }
- te := j.initialize(&http.Client{Transport: &http.Transport{}})
- acc := new(testutil.Accumulator)
- j.gatherNodesData(acc)
- if err := acc.FirstError(); err != nil {
- te = err
- }
+ t.Run(test.name, func(t *testing.T) {
+ ts := httptest.NewServer(test.input)
+ defer ts.Close()
+ j := &Jenkins{
+ Log: testutil.Logger{},
+ URL: ts.URL,
+ ResponseTimeout: internal.Duration{Duration: time.Microsecond},
+ NodeExclude: []string{"ignore-1", "ignore-2"},
+ }
+ te := j.initialize(&http.Client{Transport: &http.Transport{}})
+ acc := new(testutil.Accumulator)
+ j.gatherNodesData(acc)
+ if err := acc.FirstError(); err != nil {
+ te = err
+ }
- if !test.wantErr && te != nil {
- t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
- } else if test.wantErr && te == nil {
- t.Fatalf("%s: expected err, got nil", test.name)
- }
- if test.output == nil && len(acc.Metrics) > 0 {
- t.Fatalf("%s: collected extra data", test.name)
- } else if test.output != nil && len(test.output.Metrics) > 0 {
- for k, m := range test.output.Metrics[0].Tags {
- if acc.Metrics[0].Tags[k] != m {
- t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k])
- }
+ if !test.wantErr && te != nil {
+ t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
+ } else if test.wantErr && te == nil {
+ t.Fatalf("%s: expected err, got nil", test.name)
}
- for k, m := range test.output.Metrics[0].Fields {
- if acc.Metrics[0].Fields[k] != m {
- t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k])
+ if test.output == nil && len(acc.Metrics) > 0 {
+ t.Fatalf("%s: collected extra data %s", test.name, acc.Metrics)
+ } else if test.output != nil && len(test.output.Metrics) > 0 {
+ for i := 0; i < len(test.output.Metrics); i++ {
+ for k, m := range test.output.Metrics[i].Tags {
+ if acc.Metrics[i].Tags[k] != m {
+ t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k])
+ }
+ }
+ for k, m := range test.output.Metrics[i].Fields {
+ if acc.Metrics[i].Fields[k] != m {
+ t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k])
+ }
+ }
}
}
- }
+ })
}
}
@@ -258,6 +356,7 @@ func TestInitialize(t *testing.T) {
{
name: "bad jenkins config",
input: &Jenkins{
+ Log: testutil.Logger{},
URL: "http://a bad url",
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
},
@@ -266,6 +365,7 @@ func TestInitialize(t *testing.T) {
{
name: "has filter",
input: &Jenkins{
+ Log: testutil.Logger{},
URL: ts.URL,
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
JobExclude: []string{"job1", "job2"},
@@ -275,31 +375,34 @@ func TestInitialize(t *testing.T) {
{
name: "default config",
input: &Jenkins{
+ Log: testutil.Logger{},
URL: ts.URL,
ResponseTimeout: internal.Duration{Duration: time.Microsecond},
},
output: &Jenkins{
+ Log: testutil.Logger{},
MaxConnections: 5,
MaxSubJobPerLayer: 10,
},
},
}
for _, test := range tests {
- te := test.input.initialize(mockClient)
- if !test.wantErr && te != nil {
- t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
- } else if test.wantErr && te == nil {
- t.Fatalf("%s: expected err, got nil", test.name)
- }
- if test.output != nil {
- if test.input.client == nil {
- t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error())
+ t.Run(test.name, func(t *testing.T) {
+ te := test.input.initialize(mockClient)
+ if !test.wantErr && te != nil {
+ t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
+ } else if test.wantErr && te == nil {
+ t.Fatalf("%s: expected err, got nil", test.name)
}
- if test.input.MaxConnections != test.output.MaxConnections {
- t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections)
+ if test.output != nil {
+ if test.input.client == nil {
+ t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error())
+ }
+ if test.input.MaxConnections != test.output.MaxConnections {
+ t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections)
+ }
}
- }
-
+ })
}
}
@@ -462,6 +565,43 @@ func TestGatherJobs(t *testing.T) {
},
},
},
+ {
+ name: "gather metrics for jobs with space",
+ input: mockHandler{
+ responseMap: map[string]interface{}{
+ "/api/json": &jobResponse{
+ Jobs: []innerJob{
+ {Name: "job 1"},
+ },
+ },
+ "/job/job%201/api/json": &jobResponse{
+ LastBuild: jobBuild{
+ Number: 3,
+ },
+ },
+ "/job/job%201/3/api/json": &buildResponse{
+ Building: false,
+ Result: "SUCCESS",
+ Duration: 25558,
+ Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
+ },
+ },
+ },
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{
+ {
+ Tags: map[string]string{
+ "name": "job 1",
+ "result": "SUCCESS",
+ },
+ Fields: map[string]interface{}{
+ "duration": int64(25558),
+ "result_code": 0,
+ },
+ },
+ },
+ },
+ },
{
name: "gather sub jobs, jobs filter",
input: mockHandler{
@@ -495,6 +635,8 @@ func TestGatherJobs(t *testing.T) {
{Name: "PR-100"},
{Name: "PR-101"},
{Name: "PR-ignore2"},
+ {Name: "PR 1"},
+ {Name: "PR ignore"},
},
},
"/job/apps/job/k8s-cloud/job/PR-100/api/json": &jobResponse{
@@ -507,6 +649,11 @@ func TestGatherJobs(t *testing.T) {
Number: 4,
},
},
+ "/job/apps/job/k8s-cloud/job/PR%201/api/json": &jobResponse{
+ LastBuild: jobBuild{
+ Number: 1,
+ },
+ },
"/job/apps/job/chronograf/1/api/json": &buildResponse{
Building: false,
Result: "FAILURE",
@@ -525,10 +672,27 @@ func TestGatherJobs(t *testing.T) {
Duration: 91558,
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
},
+ "/job/apps/job/k8s-cloud/job/PR%201/1/api/json": &buildResponse{
+ Building: false,
+ Result: "SUCCESS",
+ Duration: 87832,
+ Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
+ },
},
},
output: &testutil.Accumulator{
Metrics: []*testutil.Metric{
+ {
+ Tags: map[string]string{
+ "name": "PR 1",
+ "parents": "apps/k8s-cloud",
+ "result": "SUCCESS",
+ },
+ Fields: map[string]interface{}{
+ "duration": int64(87832),
+ "result_code": 0,
+ },
+ },
{
Tags: map[string]string{
"name": "PR-100",
@@ -567,49 +731,52 @@ func TestGatherJobs(t *testing.T) {
},
}
for _, test := range tests {
- ts := httptest.NewServer(test.input)
- defer ts.Close()
- j := &Jenkins{
- URL: ts.URL,
- MaxBuildAge: internal.Duration{Duration: time.Hour},
- ResponseTimeout: internal.Duration{Duration: time.Microsecond},
- JobExclude: []string{
- "ignore-1",
- "apps/ignore-all/*",
- "apps/k8s-cloud/PR-ignore2",
- },
- }
- te := j.initialize(&http.Client{Transport: &http.Transport{}})
- acc := new(testutil.Accumulator)
- acc.SetDebug(true)
- j.gatherJobs(acc)
- if err := acc.FirstError(); err != nil {
- te = err
- }
- if !test.wantErr && te != nil {
- t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
- } else if test.wantErr && te == nil {
- t.Fatalf("%s: expected err, got nil", test.name)
- }
+ t.Run(test.name, func(t *testing.T) {
+ ts := httptest.NewServer(test.input)
+ defer ts.Close()
+ j := &Jenkins{
+ Log: testutil.Logger{},
+ URL: ts.URL,
+ MaxBuildAge: internal.Duration{Duration: time.Hour},
+ ResponseTimeout: internal.Duration{Duration: time.Microsecond},
+ JobExclude: []string{
+ "ignore-1",
+ "apps/ignore-all/*",
+ "apps/k8s-cloud/PR-ignore2",
+ "apps/k8s-cloud/PR ignore",
+ },
+ }
+ te := j.initialize(&http.Client{Transport: &http.Transport{}})
+ acc := new(testutil.Accumulator)
+ j.gatherJobs(acc)
+ if err := acc.FirstError(); err != nil {
+ te = err
+ }
+ if !test.wantErr && te != nil {
+ t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
+ } else if test.wantErr && te == nil {
+ t.Fatalf("%s: expected err, got nil", test.name)
+ }
- if test.output != nil && len(test.output.Metrics) > 0 {
- // sort metrics
- sort.Slice(acc.Metrics, func(i, j int) bool {
- return strings.Compare(acc.Metrics[i].Tags["name"], acc.Metrics[j].Tags["name"]) < 0
- })
- for i := range test.output.Metrics {
- for k, m := range test.output.Metrics[i].Tags {
- if acc.Metrics[i].Tags[k] != m {
- t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k])
+ if test.output != nil && len(test.output.Metrics) > 0 {
+ // sort metrics
+ sort.Slice(acc.Metrics, func(i, j int) bool {
+ return strings.Compare(acc.Metrics[i].Tags["name"], acc.Metrics[j].Tags["name"]) < 0
+ })
+ for i := range test.output.Metrics {
+ for k, m := range test.output.Metrics[i].Tags {
+ if acc.Metrics[i].Tags[k] != m {
+ t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k])
+ }
}
- }
- for k, m := range test.output.Metrics[i].Fields {
- if acc.Metrics[i].Fields[k] != m {
- t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k])
+ for k, m := range test.output.Metrics[i].Fields {
+ if acc.Metrics[i].Fields[k] != m {
+ t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k])
+ }
}
}
- }
- }
+ }
+ })
}
}
diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md
index 2622d83bc5668..96ee48701b464 100644
--- a/plugins/inputs/jolokia/README.md
+++ b/plugins/inputs/jolokia/README.md
@@ -1,4 +1,4 @@
-# Telegraf plugin: Jolokia
+# Jolokia Input Plugin
**Deprecated in version 1.5:** Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin.
diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go
index a6acd295339e8..a1ca60604cf00 100644
--- a/plugins/inputs/jolokia/jolokia_test.go
+++ b/plugins/inputs/jolokia/jolokia_test.go
@@ -160,7 +160,7 @@ func TestHttpJsonMultiValue(t *testing.T) {
var acc testutil.Accumulator
err := acc.GatherError(jolokia.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.Equal(t, 1, len(acc.Metrics))
fields := map[string]interface{}{
@@ -184,7 +184,7 @@ func TestHttpJsonBulkResponse(t *testing.T) {
var acc testutil.Accumulator
err := jolokia.Gather(&acc)
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.Equal(t, 1, len(acc.Metrics))
fields := map[string]interface{}{
@@ -212,7 +212,7 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) {
var acc testutil.Accumulator
err := acc.GatherError(jolokia.Gather)
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.Equal(t, 1, len(acc.Metrics))
fields := map[string]interface{}{
diff --git a/plugins/inputs/jolokia2/README.md b/plugins/inputs/jolokia2/README.md
index 190e6627d272c..4a7b8f4200a42 100644
--- a/plugins/inputs/jolokia2/README.md
+++ b/plugins/inputs/jolokia2/README.md
@@ -1,4 +1,4 @@
-# Jolokia2 Input Plugins
+# Jolokia2 Input Plugin
The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go
index 9f5de15d832a2..efa6db400692d 100644
--- a/plugins/inputs/jolokia2/client.go
+++ b/plugins/inputs/jolokia2/client.go
@@ -10,7 +10,7 @@ import (
"path"
"time"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
)
type Client struct {
diff --git a/plugins/inputs/jolokia2/examples/cassandra.conf b/plugins/inputs/jolokia2/examples/cassandra.conf
index b8bb609800255..bc9c97ff1ae00 100644
--- a/plugins/inputs/jolokia2/examples/cassandra.conf
+++ b/plugins/inputs/jolokia2/examples/cassandra.conf
@@ -2,7 +2,7 @@
urls = ["http://localhost:8778/jolokia"]
name_prefix = "java_"
- [[inputs.jolokia2_agent.metrics]]
+ [[inputs.jolokia2_agent.metric]]
name = "Memory"
mbean = "java.lang:type=Memory"
diff --git a/plugins/inputs/jolokia2/examples/java.conf b/plugins/inputs/jolokia2/examples/java.conf
index 32a68195c65d2..aa9bc6852b650 100644
--- a/plugins/inputs/jolokia2/examples/java.conf
+++ b/plugins/inputs/jolokia2/examples/java.conf
@@ -23,17 +23,17 @@
mbean = "java.lang:name=G1 Young Generation,type=GarbageCollector"
paths = ["LastGcInfo/duration", "LastGcInfo/GcThreadCount", "LastGcInfo/memoryUsageAfterGc"]
- [[inputs.jolokia2_agent.metrics]]
+ [[inputs.jolokia2_agent.metric]]
name = "java_threading"
mbean = "java.lang:type=Threading"
paths = ["TotalStartedThreadCount", "ThreadCount", "DaemonThreadCount", "PeakThreadCount"]
- [[inputs.jolokia2_agent.metrics]]
+ [[inputs.jolokia2_agent.metric]]
name = "java_class_loading"
mbean = "java.lang:type=ClassLoading"
paths = ["LoadedClassCount", "UnloadedClassCount", "TotalLoadedClassCount"]
- [[inputs.jolokia2_agent.metrics]]
+ [[inputs.jolokia2_agent.metric]]
name = "java_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go
index 5005e8225b3b4..f24918998248e 100644
--- a/plugins/inputs/jolokia2/gatherer.go
+++ b/plugins/inputs/jolokia2/gatherer.go
@@ -43,7 +43,7 @@ func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error {
return nil
}
-// gatherReponses adds points to an accumulator from the ReadResponse objects
+// gatherResponses adds points to an accumulator from the ReadResponse objects
// returned by a Jolokia agent.
func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) {
series := make(map[string][]point, 0)
@@ -88,8 +88,8 @@ func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]po
case 404:
continue
default:
- errors = append(errors, fmt.Errorf("Unexpected status in response from target %s: %d",
- response.RequestTarget, response.Status))
+ errors = append(errors, fmt.Errorf("Unexpected status in response from target %s (%q): %d",
+ response.RequestTarget, response.RequestMbean, response.Status))
continue
}
@@ -144,7 +144,7 @@ func metricMatchesResponse(metric Metric, response ReadResponse) bool {
return false
}
-// compactPoints attepts to remove points by compacting points
+// compactPoints attempts to remove points by compacting points
// with matching tag sets. When a match is found, the fields from
// one point are moved to another, and the empty point is removed.
func compactPoints(points []point) []point {
diff --git a/plugins/inputs/jolokia2/jolokia_agent.go b/plugins/inputs/jolokia2/jolokia_agent.go
index fd2105d5fba5e..58b67ce5a1c9a 100644
--- a/plugins/inputs/jolokia2/jolokia_agent.go
+++ b/plugins/inputs/jolokia2/jolokia_agent.go
@@ -6,7 +6,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
)
type JolokiaAgent struct {
diff --git a/plugins/inputs/jolokia2/jolokia_proxy.go b/plugins/inputs/jolokia2/jolokia_proxy.go
index 7a921c083bce5..6428a88515aee 100644
--- a/plugins/inputs/jolokia2/jolokia_proxy.go
+++ b/plugins/inputs/jolokia2/jolokia_proxy.go
@@ -3,7 +3,7 @@ package jolokia2
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
)
type JolokiaProxy struct {
diff --git a/plugins/inputs/jti_openconfig_telemetry/README.md b/plugins/inputs/jti_openconfig_telemetry/README.md
index 7c30aaa8d3460..1a28b55aeb8d9 100644
--- a/plugins/inputs/jti_openconfig_telemetry/README.md
+++ b/plugins/inputs/jti_openconfig_telemetry/README.md
@@ -41,9 +41,13 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data f
"/interfaces",
]
- ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
- ## channel will be opened with server
- ssl_cert = "/etc/telegraf/cert.pem"
+ ## Optional TLS Config
+ # enable_tls = true
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
## Failed streams/calls will not be retried if 0 is provided
diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go
index a4cd76cc4361b..bc7c780458f99 100644
--- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go
+++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go
@@ -980,7 +980,7 @@ type OpenConfigTelemetryClient interface {
// The device should send telemetry data back on the same
// connection as the subscription request.
TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error)
- // Terminates and removes an exisiting telemetry subscription
+ // Terminates and removes an existing telemetry subscription
CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error)
// Get the list of current telemetry subscriptions from the
// target. This command returns a list of existing subscriptions
@@ -1076,7 +1076,7 @@ type OpenConfigTelemetryServer interface {
// The device should send telemetry data back on the same
// connection as the subscription request.
TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error
- // Terminates and removes an exisiting telemetry subscription
+ // Terminates and removes an existing telemetry subscription
CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error)
// Get the list of current telemetry subscriptions from the
// target. This command returns a list of existing subscriptions
diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto
index 38ce9b42233b8..cf4aa145e6911 100644
--- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto
+++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto
@@ -44,7 +44,7 @@ service OpenConfigTelemetry {
// connection as the subscription request.
rpc telemetrySubscribe(SubscriptionRequest) returns (stream OpenConfigData) {}
- // Terminates and removes an exisiting telemetry subscription
+ // Terminates and removes an existing telemetry subscription
rpc cancelTelemetrySubscription(CancelSubscriptionRequest) returns (CancelSubscriptionReply) {}
// Get the list of current telemetry subscriptions from the
diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go
index b721c4943777a..0c6fc9e052d43 100644
--- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go
+++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go
@@ -2,7 +2,6 @@ package jti_openconfig_telemetry
import (
"fmt"
- "log"
"net"
"regexp"
"strings"
@@ -11,6 +10,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
+ internaltls "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth"
"github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc"
@@ -22,15 +22,18 @@ import (
)
type OpenConfigTelemetry struct {
- Servers []string
- Sensors []string
- Username string
- Password string
+ Servers []string `toml:"servers"`
+ Sensors []string `toml:"sensors"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
ClientID string `toml:"client_id"`
SampleFrequency internal.Duration `toml:"sample_frequency"`
- SSLCert string `toml:"ssl_cert"`
StrAsTags bool `toml:"str_as_tags"`
RetryDelay internal.Duration `toml:"retry_delay"`
+ EnableTLS bool `toml:"enable_tls"`
+ internaltls.ClientConfig
+
+ Log telegraf.Logger
sensorsConfig []sensorConfig
grpcClientConns []*grpc.ClientConn
@@ -44,8 +47,8 @@ var (
## List of device addresses to collect telemetry from
servers = ["localhost:1883"]
- ## Authentication details. Username and password are must if device expects
- ## authentication. Client ID must be unique when connecting from multiple instances
+ ## Authentication details. Username and password are must if device expects
+ ## authentication. Client ID must be unique when connecting from multiple instances
## of telegraf to the same device
username = "user"
password = "pass"
@@ -57,16 +60,16 @@ var (
## Sensors to subscribe for
## A identifier for each sensor can be provided in path by separating with space
## Else sensor path will be used as identifier
- ## When identifier is used, we can provide a list of space separated sensors.
- ## A single subscription will be created with all these sensors and data will
+ ## When identifier is used, we can provide a list of space separated sensors.
+ ## A single subscription will be created with all these sensors and data will
## be saved to measurement with this identifier name
sensors = [
"/interfaces/",
"collection /components/ /lldp",
]
- ## We allow specifying sensor group level reporting rate. To do this, specify the
- ## reporting rate in Duration at the beginning of sensor paths / collection
+ ## We allow specifying sensor group level reporting rate. To do this, specify the
+ ## reporting rate in Duration at the beginning of sensor paths / collection
## name. For entries without reporting rate, we use configured sample frequency
sensors = [
"1000ms customReporting /interfaces /lldp",
@@ -74,9 +77,13 @@ var (
"/interfaces",
]
- ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
- ## channel will be opened with server
- ssl_cert = "/etc/telegraf/cert.pem"
+ ## Optional TLS Config
+ # enable_tls = true
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
## Failed streams/calls will not be retried if 0 is provided
@@ -237,7 +244,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int {
}
if len(spathSplit) == 0 {
- log.Printf("E! No sensors are specified")
+ m.Log.Error("No sensors are specified")
continue
}
@@ -251,7 +258,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int {
}
if len(spathSplit) == 0 {
- log.Printf("E! No valid sensors are specified")
+ m.Log.Error("No valid sensors are specified")
continue
}
@@ -288,13 +295,13 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
rpcStatus, _ := status.FromError(err)
// If service is currently unavailable and may come back later, retry
if rpcStatus.Code() != codes.Unavailable {
- acc.AddError(fmt.Errorf("E! Could not subscribe to %s: %v", grpcServer,
+ acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer,
err))
return
} else {
// Retry with delay. If delay is not provided, use default
if m.RetryDelay.Duration > 0 {
- log.Printf("D! Retrying %s with timeout %v", grpcServer,
+ m.Log.Debugf("Retrying %s with timeout %v", grpcServer,
m.RetryDelay.Duration)
time.Sleep(m.RetryDelay.Duration)
continue
@@ -308,11 +315,11 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
if err != nil {
// If we encounter error in the stream, break so we can retry
// the connection
- acc.AddError(fmt.Errorf("E! Failed to read from %s: %v", err, grpcServer))
+ acc.AddError(fmt.Errorf("failed to read from %s: %s", grpcServer, err))
break
}
- log.Printf("D! Received from %s: %v", grpcServer, r)
+ m.Log.Debugf("Received from %s: %v", grpcServer, r)
// Create a point and add to batch
tags := make(map[string]string)
@@ -323,7 +330,7 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
dgroups := m.extractData(r, grpcServer)
// Print final data collection
- log.Printf("D! Available collection for %s is: %v", grpcServer, dgroups)
+ m.Log.Debugf("Available collection for %s is: %v", grpcServer, dgroups)
tnow := time.Now()
// Iterate through data groups and add them
@@ -345,19 +352,19 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
// Build sensors config
if m.splitSensorConfig() == 0 {
- return fmt.Errorf("E! No valid sensor configuration available")
+ return fmt.Errorf("no valid sensor configuration available")
}
- // If SSL certificate is provided, use transport credentials
- var err error
- var transportCredentials credentials.TransportCredentials
- if m.SSLCert != "" {
- transportCredentials, err = credentials.NewClientTLSFromFile(m.SSLCert, "")
+ // Parse TLS config
+ var opts []grpc.DialOption
+ if m.EnableTLS {
+ tlscfg, err := m.ClientConfig.TLSConfig()
if err != nil {
- return fmt.Errorf("E! Failed to read certificate: %v", err)
+ return err
}
+ opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlscfg)))
} else {
- transportCredentials = nil
+ opts = append(opts, grpc.WithInsecure())
}
// Connect to given list of servers and start collecting data
@@ -369,20 +376,15 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
// Extract device address and port
grpcServer, grpcPort, err := net.SplitHostPort(server)
if err != nil {
- log.Printf("E! Invalid server address: %v", err)
+ m.Log.Errorf("Invalid server address: %s", err.Error())
continue
}
- // If a certificate is provided, open a secure channel. Else open insecure one
- if transportCredentials != nil {
- grpcClientConn, err = grpc.Dial(server, grpc.WithTransportCredentials(transportCredentials))
- } else {
- grpcClientConn, err = grpc.Dial(server, grpc.WithInsecure())
- }
+ grpcClientConn, err = grpc.Dial(server, opts...)
if err != nil {
- log.Printf("E! Failed to connect to %s: %v", server, err)
+ m.Log.Errorf("Failed to connect to %s: %s", server, err.Error())
} else {
- log.Printf("D! Opened a new gRPC session to %s on port %s", grpcServer, grpcPort)
+ m.Log.Debugf("Opened a new gRPC session to %s on port %s", grpcServer, grpcPort)
}
// Add to the list of client connections
@@ -394,13 +396,13 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
&authentication.LoginRequest{UserName: m.Username,
Password: m.Password, ClientId: m.ClientID})
if loginErr != nil {
- log.Printf("E! Could not initiate login check for %s: %v", server, loginErr)
+ m.Log.Errorf("Could not initiate login check for %s: %v", server, loginErr)
continue
}
// Check if the user is authenticated. Bail if auth error
if !loginReply.Result {
- log.Printf("E! Failed to authenticate the user for %s", server)
+ m.Log.Errorf("Failed to authenticate the user for %s", server)
continue
}
}
diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go
index 8b0abd88377d9..a3df62e1bb0c0 100644
--- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go
+++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go
@@ -17,6 +17,7 @@ import (
)
var cfg = &OpenConfigTelemetry{
+ Log: testutil.Logger{},
Servers: []string{"127.0.0.1:50051"},
SampleFrequency: internal.Duration{Duration: time.Second * 2},
}
diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md
index 56fc59245ad0e..dec39cc32871b 100644
--- a/plugins/inputs/kafka_consumer/README.md
+++ b/plugins/inputs/kafka_consumer/README.md
@@ -10,19 +10,20 @@ and use the old zookeeper connection method.
```toml
[[inputs.kafka_consumer]]
- ## kafka servers
+ ## Kafka brokers.
brokers = ["localhost:9092"]
- ## topic(s) to consume
+
+ ## Topics to consume.
topics = ["telegraf"]
- ## Add topic as tag if topic_tag is not empty
+
+ ## When set this tag will be added to all metrics with the topic as the value.
# topic_tag = ""
## Optional Client id
# client_id = "Telegraf"
## Set the minimal supported Kafka version. Setting this enables the use of new
- ## Kafka features and APIs. Of particular interest, lz4 compression
- ## requires at least version 0.10.0.0.
+ ## Kafka features and APIs. Must be 0.10.2.0 or greater.
## ex: version = "1.1.0"
# version = ""
@@ -33,14 +34,22 @@ and use the old zookeeper connection method.
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
- ## Optional SASL Config
+ ## SASL authentication credentials. These settings should typically be used
+ ## with TLS encryption enabled using the "enable_tls" option.
# sasl_username = "kafka"
# sasl_password = "secret"
- ## the name of the consumer group
- consumer_group = "telegraf_metrics_consumers"
- ## Offset (must be either "oldest" or "newest")
- offset = "oldest"
+ ## SASL protocol version. When connecting to Azure EventHub set to 0.
+ # sasl_version = 1
+
+ ## Name of the consumer group.
+ # consumer_group = "telegraf_metrics_consumers"
+
+ ## Initial offset position; one of "oldest" or "newest".
+ # offset = "oldest"
+
+ ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky".
+ # balance_strategy = "range"
## Maximum length of a message to consume, in bytes (default 0/unlimited);
## larger messages are dropped
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go
index 545e37f5a54de..0fd7d3693d48c 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer.go
@@ -6,89 +6,60 @@ import (
"log"
"strings"
"sync"
+ "time"
"github.com/Shopify/sarama"
- cluster "github.com/bsm/sarama-cluster"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/kafka"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
-const (
- defaultMaxUndeliveredMessages = 1000
-)
-
-type empty struct{}
-type semaphore chan empty
-
-type Consumer interface {
- Errors() <-chan error
- Messages() <-chan *sarama.ConsumerMessage
- MarkOffset(msg *sarama.ConsumerMessage, metadata string)
- Close() error
-}
-
-type Kafka struct {
- ConsumerGroup string `toml:"consumer_group"`
- ClientID string `toml:"client_id"`
- Topics []string `toml:"topics"`
- Brokers []string `toml:"brokers"`
- MaxMessageLen int `toml:"max_message_len"`
- Version string `toml:"version"`
- MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
- Offset string `toml:"offset"`
- SASLUsername string `toml:"sasl_username"`
- SASLPassword string `toml:"sasl_password"`
- TopicTag string `toml:"topic_tag"`
-
- tls.ClientConfig
-
- cluster Consumer
- parser parsers.Parser
- wg *sync.WaitGroup
- cancel context.CancelFunc
-
- // Unconfirmed messages
- messages map[telegraf.TrackingID]*sarama.ConsumerMessage
-
- // doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
- // this is mostly for test purposes, but there may be a use-case for it later.
- doNotCommitMsgs bool
-}
-
-var sampleConfig = `
- ## kafka servers
+const sampleConfig = `
+ ## Kafka brokers.
brokers = ["localhost:9092"]
- ## topic(s) to consume
+
+ ## Topics to consume.
topics = ["telegraf"]
- ## Add topic as tag if topic_tag is not empty
+
+ ## When set this tag will be added to all metrics with the topic as the value.
# topic_tag = ""
## Optional Client id
# client_id = "Telegraf"
## Set the minimal supported Kafka version. Setting this enables the use of new
- ## Kafka features and APIs. Of particular interest, lz4 compression
- ## requires at least version 0.10.0.0.
+ ## Kafka features and APIs. Must be 0.10.2.0 or greater.
## ex: version = "1.1.0"
# version = ""
## Optional TLS Config
+ # enable_tls = true
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
- ## Optional SASL Config
+ ## SASL authentication credentials. These settings should typically be used
+ ## with TLS encryption enabled using the "enable_tls" option.
# sasl_username = "kafka"
# sasl_password = "secret"
- ## the name of the consumer group
- consumer_group = "telegraf_metrics_consumers"
- ## Offset (must be either "oldest" or "newest")
- offset = "oldest"
+ ## SASL protocol version. When connecting to Azure EventHub set to 0.
+ # sasl_version = 1
+
+ ## Name of the consumer group.
+ # consumer_group = "telegraf_metrics_consumers"
+
+ ## Initial offset position; one of "oldest" or "newest".
+ # offset = "oldest"
+
+ ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky".
+ # balance_strategy = "range"
+
## Maximum length of a message to consume, in bytes (default 0/unlimited);
## larger messages are dropped
max_message_len = 1000000
@@ -110,55 +81,132 @@ var sampleConfig = `
data_format = "influx"
`
-func (k *Kafka) SampleConfig() string {
+const (
+ defaultMaxUndeliveredMessages = 1000
+ defaultMaxMessageLen = 1000000
+ defaultConsumerGroup = "telegraf_metrics_consumers"
+ reconnectDelay = 5 * time.Second
+)
+
+type empty struct{}
+type semaphore chan empty
+
+type KafkaConsumer struct {
+ Brokers []string `toml:"brokers"`
+ ClientID string `toml:"client_id"`
+ ConsumerGroup string `toml:"consumer_group"`
+ MaxMessageLen int `toml:"max_message_len"`
+ MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
+ Offset string `toml:"offset"`
+ BalanceStrategy string `toml:"balance_strategy"`
+ Topics []string `toml:"topics"`
+ TopicTag string `toml:"topic_tag"`
+ Version string `toml:"version"`
+ SASLPassword string `toml:"sasl_password"`
+ SASLUsername string `toml:"sasl_username"`
+ SASLVersion *int `toml:"sasl_version"`
+
+ EnableTLS *bool `toml:"enable_tls"`
+ tls.ClientConfig
+
+ Log telegraf.Logger `toml:"-"`
+
+ ConsumerCreator ConsumerGroupCreator `toml:"-"`
+ consumer ConsumerGroup
+ config *sarama.Config
+
+ parser parsers.Parser
+ wg sync.WaitGroup
+ cancel context.CancelFunc
+}
+
+type ConsumerGroup interface {
+ Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error
+ Errors() <-chan error
+ Close() error
+}
+
+type ConsumerGroupCreator interface {
+ Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error)
+}
+
+type SaramaCreator struct{}
+
+func (*SaramaCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) {
+ return sarama.NewConsumerGroup(brokers, group, config)
+}
+
+func (k *KafkaConsumer) SampleConfig() string {
return sampleConfig
}
-func (k *Kafka) Description() string {
- return "Read metrics from Kafka topic(s)"
+func (k *KafkaConsumer) Description() string {
+ return "Read metrics from Kafka topics"
}
-func (k *Kafka) SetParser(parser parsers.Parser) {
+func (k *KafkaConsumer) SetParser(parser parsers.Parser) {
k.parser = parser
}
-func (k *Kafka) Start(acc telegraf.Accumulator) error {
- var clusterErr error
+func (k *KafkaConsumer) Init() error {
+ if k.MaxUndeliveredMessages == 0 {
+ k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages
+ }
+ if k.ConsumerGroup == "" {
+ k.ConsumerGroup = defaultConsumerGroup
+ }
+
+ config := sarama.NewConfig()
+ config.Consumer.Return.Errors = true
- config := cluster.NewConfig()
+ // Kafka version 0.10.2.0 is required for consumer groups.
+ config.Version = sarama.V0_10_2_0
if k.Version != "" {
version, err := sarama.ParseKafkaVersion(k.Version)
if err != nil {
return err
}
+
config.Version = version
}
- config.Consumer.Return.Errors = true
+ if k.EnableTLS != nil && *k.EnableTLS {
+ config.Net.TLS.Enable = true
+ }
tlsConfig, err := k.ClientConfig.TLSConfig()
if err != nil {
return err
}
- if k.ClientID != "" {
- config.ClientID = k.ClientID
- } else {
- config.ClientID = "Telegraf"
- }
-
if tlsConfig != nil {
- log.Printf("D! TLS Enabled")
config.Net.TLS.Config = tlsConfig
- config.Net.TLS.Enable = true
+
+ // To maintain backwards compatibility, if the enable_tls option is not
+ // set TLS is enabled if a non-default TLS config is used.
+ if k.EnableTLS == nil {
+ k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS")
+ config.Net.TLS.Enable = true
+ }
}
+
if k.SASLUsername != "" && k.SASLPassword != "" {
- log.Printf("D! Using SASL auth with username '%s',",
- k.SASLUsername)
config.Net.SASL.User = k.SASLUsername
config.Net.SASL.Password = k.SASLPassword
config.Net.SASL.Enable = true
+
+ version, err := kafka.SASLVersion(config.Version, k.SASLVersion)
+ if err != nil {
+ return err
+ }
+ config.Net.SASL.Version = version
+ }
+
+ if k.ClientID != "" {
+ config.ClientID = k.ClientID
+ } else {
+ config.ClientID = "Telegraf"
}
switch strings.ToLower(k.Offset) {
@@ -167,140 +215,239 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
case "newest":
config.Consumer.Offsets.Initial = sarama.OffsetNewest
default:
- log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'",
- k.Offset)
- config.Consumer.Offsets.Initial = sarama.OffsetOldest
+ return fmt.Errorf("invalid offset %q", k.Offset)
}
- if k.cluster == nil {
- k.cluster, clusterErr = cluster.NewConsumer(
- k.Brokers,
- k.ConsumerGroup,
- k.Topics,
- config,
- )
-
- if clusterErr != nil {
- log.Printf("E! Error when creating Kafka Consumer, brokers: %v, topics: %v",
- k.Brokers, k.Topics)
- return clusterErr
- }
+ switch strings.ToLower(k.BalanceStrategy) {
+ case "range", "":
+ config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
+ case "roundrobin":
+ config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
+ case "sticky":
+ config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky
+ default:
+ return fmt.Errorf("invalid balance strategy %q", k.BalanceStrategy)
+ }
+
+ if k.ConsumerCreator == nil {
+ k.ConsumerCreator = &SaramaCreator{}
+ }
+
+ k.config = config
+ return nil
+}
+
+func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error {
+ var err error
+ k.consumer, err = k.ConsumerCreator.Create(
+ k.Brokers,
+ k.ConsumerGroup,
+ k.config,
+ )
+ if err != nil {
+ return err
}
ctx, cancel := context.WithCancel(context.Background())
k.cancel = cancel
// Start consumer goroutine
- k.wg = &sync.WaitGroup{}
k.wg.Add(1)
go func() {
defer k.wg.Done()
- k.receiver(ctx, acc)
+ for ctx.Err() == nil {
+ handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser)
+ handler.MaxMessageLen = k.MaxMessageLen
+ handler.TopicTag = k.TopicTag
+ err := k.consumer.Consume(ctx, k.Topics, handler)
+ if err != nil {
+ acc.AddError(err)
+ internal.SleepContext(ctx, reconnectDelay)
+ }
+ }
+ err = k.consumer.Close()
+ if err != nil {
+ acc.AddError(err)
+ }
}()
- log.Printf("I! Started the kafka consumer service, brokers: %v, topics: %v",
- k.Brokers, k.Topics)
+ k.wg.Add(1)
+ go func() {
+ defer k.wg.Done()
+ for err := range k.consumer.Errors() {
+ acc.AddError(err)
+ }
+ }()
+
+ return nil
+}
+
+func (k *KafkaConsumer) Gather(acc telegraf.Accumulator) error {
return nil
}
-// receiver() reads all incoming messages from the consumer, and parses them into
-// influxdb metric points.
-func (k *Kafka) receiver(ctx context.Context, ac telegraf.Accumulator) {
- k.messages = make(map[telegraf.TrackingID]*sarama.ConsumerMessage)
+func (k *KafkaConsumer) Stop() {
+ k.cancel()
+ k.wg.Wait()
+}
+
+// Message is an aggregate type binding the Kafka message and the session so
+// that offsets can be updated.
+type Message struct {
+ message *sarama.ConsumerMessage
+ session sarama.ConsumerGroupSession
+}
+
+func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler {
+ handler := &ConsumerGroupHandler{
+ acc: acc.WithTracking(maxUndelivered),
+ sem: make(chan empty, maxUndelivered),
+ undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered),
+ parser: parser,
+ }
+ return handler
+}
+
+// ConsumerGroupHandler is a sarama.ConsumerGroupHandler implementation.
+type ConsumerGroupHandler struct {
+ MaxMessageLen int
+ TopicTag string
+
+ acc telegraf.TrackingAccumulator
+ sem semaphore
+ parser parsers.Parser
+ wg sync.WaitGroup
+ cancel context.CancelFunc
+
+ mu sync.Mutex
+ undelivered map[telegraf.TrackingID]Message
+}
+
+// Setup is called once when a new session is opened. It setups up the handler
+// and begins processing delivered messages.
+func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error {
+ h.undelivered = make(map[telegraf.TrackingID]Message)
- acc := ac.WithTracking(k.MaxUndeliveredMessages)
- sem := make(semaphore, k.MaxUndeliveredMessages)
+ ctx, cancel := context.WithCancel(context.Background())
+ h.cancel = cancel
+
+ h.wg.Add(1)
+ go func() {
+ defer h.wg.Done()
+ h.run(ctx)
+ }()
+ return nil
+}
+// Run processes any delivered metrics during the lifetime of the session.
+func (h *ConsumerGroupHandler) run(ctx context.Context) error {
for {
select {
case <-ctx.Done():
- return
- case track := <-acc.Delivered():
- <-sem
- k.onDelivery(track)
- case err := <-k.cluster.Errors():
- acc.AddError(err)
- case sem <- empty{}:
- select {
- case <-ctx.Done():
- return
- case track := <-acc.Delivered():
- // Once for the delivered message, once to leave the case
- <-sem
- <-sem
- k.onDelivery(track)
- case err := <-k.cluster.Errors():
- <-sem
- acc.AddError(err)
- case msg := <-k.cluster.Messages():
- err := k.onMessage(acc, msg)
- if err != nil {
- acc.AddError(err)
- <-sem
- }
- }
+ return nil
+ case track := <-h.acc.Delivered():
+ h.onDelivery(track)
}
}
}
-func (k *Kafka) markOffset(msg *sarama.ConsumerMessage) {
- if !k.doNotCommitMsgs {
- k.cluster.MarkOffset(msg, "")
+func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ msg, ok := h.undelivered[track.ID()]
+ if !ok {
+ log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID())
+ return
}
+
+ if track.Delivered() {
+ msg.session.MarkMessage(msg.message, "")
+ }
+
+ delete(h.undelivered, track.ID())
+ <-h.sem
}
-func (k *Kafka) onMessage(acc telegraf.TrackingAccumulator, msg *sarama.ConsumerMessage) error {
- if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen {
- k.markOffset(msg)
- return fmt.Errorf("Message longer than max_message_len (%d > %d)",
- len(msg.Value), k.MaxMessageLen)
+// Reserve blocks until there is an available slot for a new message.
+func (h *ConsumerGroupHandler) Reserve(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case h.sem <- empty{}:
+ return nil
}
+}
+
+func (h *ConsumerGroupHandler) release() {
+ <-h.sem
+}
- metrics, err := k.parser.Parse(msg.Value)
+// Handle processes a message and if successful saves it to be acknowledged
+// after delivery.
+func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error {
+ if h.MaxMessageLen != 0 && len(msg.Value) > h.MaxMessageLen {
+ session.MarkMessage(msg, "")
+ h.release()
+ return fmt.Errorf("message exceeds max_message_len (actual %d, max %d)",
+ len(msg.Value), h.MaxMessageLen)
+ }
+
+ metrics, err := h.parser.Parse(msg.Value)
if err != nil {
+ h.release()
return err
}
- if len(k.TopicTag) > 0 {
+
+ if len(h.TopicTag) > 0 {
for _, metric := range metrics {
- metric.AddTag(k.TopicTag, msg.Topic)
+ metric.AddTag(h.TopicTag, msg.Topic)
}
}
- id := acc.AddTrackingMetricGroup(metrics)
- k.messages[id] = msg
+ h.mu.Lock()
+ id := h.acc.AddTrackingMetricGroup(metrics)
+ h.undelivered[id] = Message{session: session, message: msg}
+ h.mu.Unlock()
return nil
}
-func (k *Kafka) onDelivery(track telegraf.DeliveryInfo) {
- msg, ok := k.messages[track.ID()]
- if !ok {
- log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID())
- return
- }
-
- if track.Delivered() {
- k.markOffset(msg)
- }
- delete(k.messages, track.ID())
-}
+// ConsumeClaim is called once each claim in a goroutine and must be
+// thread-safe. Should run until the claim is closed.
+func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
+ ctx := session.Context()
-func (k *Kafka) Stop() {
- k.cancel()
- k.wg.Wait()
+ for {
+ err := h.Reserve(ctx)
+ if err != nil {
+ return nil
+ }
- if err := k.cluster.Close(); err != nil {
- log.Printf("E! [inputs.kafka_consumer] Error closing consumer: %v", err)
+ select {
+ case <-ctx.Done():
+ return nil
+ case msg, ok := <-claim.Messages():
+ if !ok {
+ return nil
+ }
+ err := h.Handle(session, msg)
+ if err != nil {
+ h.acc.AddError(err)
+ }
+ }
}
}
-func (k *Kafka) Gather(acc telegraf.Accumulator) error {
+// Cleanup stops the internal goroutine and is called after all ConsumeClaim
+// functions have completed.
+func (h *ConsumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error {
+ h.cancel()
+ h.wg.Wait()
return nil
}
func init() {
inputs.Add("kafka_consumer", func() telegraf.Input {
- return &Kafka{
- MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
- }
+ return &KafkaConsumer{}
})
}
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go
deleted file mode 100644
index 23f9e0f920ac6..0000000000000
--- a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package kafka_consumer
-
-import (
- "fmt"
- "testing"
- "time"
-
- "github.com/Shopify/sarama"
- "github.com/influxdata/telegraf/testutil"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/influxdata/telegraf/plugins/parsers"
-)
-
-func TestReadsMetricsFromKafka(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping integration test in short mode")
- }
-
- brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
- testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
-
- // Send a Kafka message to the kafka host
- msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n"
- producer, err := sarama.NewSyncProducer(brokerPeers, nil)
- require.NoError(t, err)
- _, _, err = producer.SendMessage(
- &sarama.ProducerMessage{
- Topic: testTopic,
- Value: sarama.StringEncoder(msg),
- })
- require.NoError(t, err)
- defer producer.Close()
-
- // Start the Kafka Consumer
- k := &Kafka{
- ConsumerGroup: "telegraf_test_consumers",
- Topics: []string{testTopic},
- Brokers: brokerPeers,
- Offset: "oldest",
- }
- p, _ := parsers.NewInfluxParser()
- k.SetParser(p)
-
- // Verify that we can now gather the sent message
- var acc testutil.Accumulator
-
- // Sanity check
- assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
- if err := k.Start(&acc); err != nil {
- t.Fatal(err.Error())
- } else {
- defer k.Stop()
- }
-
- waitForPoint(&acc, t)
-
- // Gather points
- err = acc.GatherError(k.Gather)
- require.NoError(t, err)
- if len(acc.Metrics) == 1 {
- point := acc.Metrics[0]
- assert.Equal(t, "cpu_load_short", point.Measurement)
- assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
- assert.Equal(t, map[string]string{
- "host": "server01",
- "direction": "in",
- "region": "us-west",
- }, point.Tags)
- assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
- } else {
- t.Errorf("No points found in accumulator, expected 1")
- }
-}
-
-// Waits for the metric that was sent to the kafka broker to arrive at the kafka
-// consumer
-func waitForPoint(acc *testutil.Accumulator, t *testing.T) {
- // Give the kafka container up to 2 seconds to get the point to the consumer
- ticker := time.NewTicker(5 * time.Millisecond)
- counter := 0
- for {
- select {
- case <-ticker.C:
- counter++
- if counter > 1000 {
- t.Fatal("Waited for 5s, point never arrived to consumer")
- } else if acc.NFields() == 1 {
- return
- }
- }
- }
-}
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go
index a4d06efe6fba8..01146e180a8c8 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go
@@ -2,219 +2,395 @@ package kafka_consumer
import (
"context"
- "strings"
"testing"
+ "time"
"github.com/Shopify/sarama"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/parsers/value"
"github.com/influxdata/telegraf/testutil"
- "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
-const (
- testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n"
- testMsgGraphite = "cpu.load.short.graphite 23422 1454780029"
- testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n"
- invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n"
-)
+type FakeConsumerGroup struct {
+ brokers []string
+ group string
+ config *sarama.Config
-type TestConsumer struct {
- errors chan error
- messages chan *sarama.ConsumerMessage
+ handler sarama.ConsumerGroupHandler
+ errors chan error
}
-func (c *TestConsumer) Errors() <-chan error {
- return c.errors
+func (g *FakeConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error {
+ g.handler = handler
+ g.handler.Setup(nil)
+ return nil
}
-func (c *TestConsumer) Messages() <-chan *sarama.ConsumerMessage {
- return c.messages
+func (g *FakeConsumerGroup) Errors() <-chan error {
+ return g.errors
}
-func (c *TestConsumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
+func (g *FakeConsumerGroup) Close() error {
+ close(g.errors)
+ return nil
}
-func (c *TestConsumer) Close() error {
- return nil
+type FakeCreator struct {
+ ConsumerGroup *FakeConsumerGroup
}
-func (c *TestConsumer) Inject(msg *sarama.ConsumerMessage) {
- c.messages <- msg
+func (c *FakeCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) {
+ c.ConsumerGroup.brokers = brokers
+ c.ConsumerGroup.group = group
+ c.ConsumerGroup.config = config
+ return c.ConsumerGroup, nil
}
-func newTestKafka() (*Kafka, *TestConsumer) {
- consumer := &TestConsumer{
- errors: make(chan error),
- messages: make(chan *sarama.ConsumerMessage, 1000),
+func TestInit(t *testing.T) {
+ tests := []struct {
+ name string
+ plugin *KafkaConsumer
+ initError bool
+ check func(t *testing.T, plugin *KafkaConsumer)
+ }{
+ {
+ name: "default config",
+ plugin: &KafkaConsumer{},
+ check: func(t *testing.T, plugin *KafkaConsumer) {
+ require.Equal(t, plugin.ConsumerGroup, defaultConsumerGroup)
+ require.Equal(t, plugin.MaxUndeliveredMessages, defaultMaxUndeliveredMessages)
+ require.Equal(t, plugin.config.ClientID, "Telegraf")
+ require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetOldest)
+ },
+ },
+ {
+ name: "parses valid version string",
+ plugin: &KafkaConsumer{
+ Version: "1.0.0",
+ Log: testutil.Logger{},
+ },
+ check: func(t *testing.T, plugin *KafkaConsumer) {
+ require.Equal(t, plugin.config.Version, sarama.V1_0_0_0)
+ },
+ },
+ {
+ name: "invalid version string",
+ plugin: &KafkaConsumer{
+ Version: "100",
+ Log: testutil.Logger{},
+ },
+ initError: true,
+ },
+ {
+ name: "custom client_id",
+ plugin: &KafkaConsumer{
+ ClientID: "custom",
+ Log: testutil.Logger{},
+ },
+ check: func(t *testing.T, plugin *KafkaConsumer) {
+ require.Equal(t, plugin.config.ClientID, "custom")
+ },
+ },
+ {
+ name: "custom offset",
+ plugin: &KafkaConsumer{
+ Offset: "newest",
+ Log: testutil.Logger{},
+ },
+ check: func(t *testing.T, plugin *KafkaConsumer) {
+ require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetNewest)
+ },
+ },
+ {
+ name: "invalid offset",
+ plugin: &KafkaConsumer{
+ Offset: "middle",
+ Log: testutil.Logger{},
+ },
+ initError: true,
+ },
+ {
+ name: "default tls without tls config",
+ plugin: &KafkaConsumer{
+ Log: testutil.Logger{},
+ },
+ check: func(t *testing.T, plugin *KafkaConsumer) {
+ require.False(t, plugin.config.Net.TLS.Enable)
+ },
+ },
+ {
+ name: "default tls with a tls config",
+ plugin: &KafkaConsumer{
+ ClientConfig: tls.ClientConfig{
+ InsecureSkipVerify: true,
+ },
+ Log: testutil.Logger{},
+ },
+ check: func(t *testing.T, plugin *KafkaConsumer) {
+ require.True(t, plugin.config.Net.TLS.Enable)
+ },
+ },
+ {
+ name: "disable tls",
+ plugin: &KafkaConsumer{
+ EnableTLS: func() *bool { v := false; return &v }(),
+ ClientConfig: tls.ClientConfig{
+ InsecureSkipVerify: true,
+ },
+ Log: testutil.Logger{},
+ },
+ check: func(t *testing.T, plugin *KafkaConsumer) {
+ require.False(t, plugin.config.Net.TLS.Enable)
+ },
+ },
+ {
+ name: "enable tls",
+ plugin: &KafkaConsumer{
+ EnableTLS: func() *bool { v := true; return &v }(),
+ Log: testutil.Logger{},
+ },
+ check: func(t *testing.T, plugin *KafkaConsumer) {
+ require.True(t, plugin.config.Net.TLS.Enable)
+ },
+ },
}
- k := Kafka{
- cluster: consumer,
- ConsumerGroup: "test",
- Topics: []string{"telegraf"},
- Brokers: []string{"localhost:9092"},
- Offset: "oldest",
- MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
- doNotCommitMsgs: true,
- messages: make(map[telegraf.TrackingID]*sarama.ConsumerMessage),
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cg := &FakeConsumerGroup{}
+ tt.plugin.ConsumerCreator = &FakeCreator{ConsumerGroup: cg}
+ err := tt.plugin.Init()
+ if tt.initError {
+ require.Error(t, err)
+ return
+ }
+
+ tt.check(t, tt.plugin)
+ })
}
- return &k, consumer
}
-func newTestKafkaWithTopicTag() (*Kafka, *TestConsumer) {
- consumer := &TestConsumer{
- errors: make(chan error),
- messages: make(chan *sarama.ConsumerMessage, 1000),
- }
- k := Kafka{
- cluster: consumer,
- ConsumerGroup: "test",
- Topics: []string{"telegraf"},
- Brokers: []string{"localhost:9092"},
- Offset: "oldest",
- MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
- doNotCommitMsgs: true,
- messages: make(map[telegraf.TrackingID]*sarama.ConsumerMessage),
- TopicTag: "topic",
+func TestStartStop(t *testing.T) {
+ cg := &FakeConsumerGroup{errors: make(chan error)}
+ plugin := &KafkaConsumer{
+ ConsumerCreator: &FakeCreator{ConsumerGroup: cg},
+ Log: testutil.Logger{},
}
- return &k, consumer
-}
+ err := plugin.Init()
+ require.NoError(t, err)
-// Test that the parser parses kafka messages into points
-func TestRunParser(t *testing.T) {
- k, consumer := newTestKafka()
- acc := testutil.Accumulator{}
- ctx := context.Background()
+ var acc testutil.Accumulator
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
- k.parser, _ = parsers.NewInfluxParser()
- go k.receiver(ctx, &acc)
- consumer.Inject(saramaMsg(testMsg))
- acc.Wait(1)
+ plugin.Stop()
+}
- assert.Equal(t, acc.NFields(), 1)
+type FakeConsumerGroupSession struct {
+ ctx context.Context
}
-// Test that the parser parses kafka messages into points
-// and adds the topic tag
-func TestRunParserWithTopic(t *testing.T) {
- k, consumer := newTestKafkaWithTopicTag()
- acc := testutil.Accumulator{}
- ctx := context.Background()
+func (s *FakeConsumerGroupSession) Claims() map[string][]int32 {
+ panic("not implemented")
+}
- k.parser, _ = parsers.NewInfluxParser()
- go k.receiver(ctx, &acc)
- consumer.Inject(saramaMsgWithTopic(testMsg, "test_topic"))
- acc.Wait(1)
+func (s *FakeConsumerGroupSession) MemberID() string {
+ panic("not implemented")
+}
- assert.Equal(t, acc.NFields(), 1)
- assert.True(t, acc.HasTag("cpu_load_short", "topic"))
+func (s *FakeConsumerGroupSession) GenerationID() int32 {
+ panic("not implemented")
}
-// Test that the parser ignores invalid messages
-func TestRunParserInvalidMsg(t *testing.T) {
- k, consumer := newTestKafka()
- acc := testutil.Accumulator{}
- ctx := context.Background()
+func (s *FakeConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
+ panic("not implemented")
+}
- k.parser, _ = parsers.NewInfluxParser()
- go k.receiver(ctx, &acc)
- consumer.Inject(saramaMsg(invalidMsg))
- acc.WaitError(1)
+func (s *FakeConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
+ panic("not implemented")
+}
- assert.Equal(t, acc.NFields(), 0)
+func (s *FakeConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) {
}
-// Test that overlong messages are dropped
-func TestDropOverlongMsg(t *testing.T) {
- const maxMessageLen = 64 * 1024
- k, consumer := newTestKafka()
- k.MaxMessageLen = maxMessageLen
- acc := testutil.Accumulator{}
- ctx := context.Background()
- overlongMsg := strings.Repeat("v", maxMessageLen+1)
+func (s *FakeConsumerGroupSession) Context() context.Context {
+ return s.ctx
+}
- go k.receiver(ctx, &acc)
- consumer.Inject(saramaMsg(overlongMsg))
- acc.WaitError(1)
+type FakeConsumerGroupClaim struct {
+ messages chan *sarama.ConsumerMessage
+}
- assert.Equal(t, acc.NFields(), 0)
+func (c *FakeConsumerGroupClaim) Topic() string {
+ panic("not implemented")
}
-// Test that the parser parses kafka messages into points
-func TestRunParserAndGather(t *testing.T) {
- k, consumer := newTestKafka()
- acc := testutil.Accumulator{}
- ctx := context.Background()
+func (c *FakeConsumerGroupClaim) Partition() int32 {
+ panic("not implemented")
+}
- k.parser, _ = parsers.NewInfluxParser()
- go k.receiver(ctx, &acc)
- consumer.Inject(saramaMsg(testMsg))
- acc.Wait(1)
+func (c *FakeConsumerGroupClaim) InitialOffset() int64 {
+ panic("not implemented")
+}
- acc.GatherError(k.Gather)
+func (c *FakeConsumerGroupClaim) HighWaterMarkOffset() int64 {
+ panic("not implemented")
+}
- assert.Equal(t, acc.NFields(), 1)
- acc.AssertContainsFields(t, "cpu_load_short",
- map[string]interface{}{"value": float64(23422)})
+func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage {
+ return c.messages
}
-// Test that the parser parses kafka messages into points
-func TestRunParserAndGatherGraphite(t *testing.T) {
- k, consumer := newTestKafka()
- acc := testutil.Accumulator{}
- ctx := context.Background()
+func TestConsumerGroupHandler_Lifecycle(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ parser := &value.ValueParser{MetricName: "cpu", DataType: "int"}
+ cg := NewConsumerGroupHandler(acc, 1, parser)
- k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
- go k.receiver(ctx, &acc)
- consumer.Inject(saramaMsg(testMsgGraphite))
- acc.Wait(1)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ session := &FakeConsumerGroupSession{
+ ctx: ctx,
+ }
+ var claim FakeConsumerGroupClaim
+ var err error
- acc.GatherError(k.Gather)
+ err = cg.Setup(session)
+ require.NoError(t, err)
- assert.Equal(t, acc.NFields(), 1)
- acc.AssertContainsFields(t, "cpu_load_short_graphite",
- map[string]interface{}{"value": float64(23422)})
+ cancel()
+ err = cg.ConsumeClaim(session, &claim)
+ require.NoError(t, err)
+
+ err = cg.Cleanup(session)
+ require.NoError(t, err)
}
-// Test that the parser parses kafka messages into points
-func TestRunParserAndGatherJSON(t *testing.T) {
- k, consumer := newTestKafka()
- acc := testutil.Accumulator{}
- ctx := context.Background()
+func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ parser := &value.ValueParser{MetricName: "cpu", DataType: "int"}
+ cg := NewConsumerGroupHandler(acc, 1, parser)
- k.parser, _ = parsers.NewParser(&parsers.Config{
- DataFormat: "json",
- MetricName: "kafka_json_test",
- })
- go k.receiver(ctx, &acc)
- consumer.Inject(saramaMsg(testMsgJSON))
- acc.Wait(1)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
- acc.GatherError(k.Gather)
+ session := &FakeConsumerGroupSession{ctx: ctx}
+ claim := &FakeConsumerGroupClaim{
+ messages: make(chan *sarama.ConsumerMessage, 1),
+ }
- assert.Equal(t, acc.NFields(), 2)
- acc.AssertContainsFields(t, "kafka_json_test",
- map[string]interface{}{
- "a": float64(5),
- "b_c": float64(6),
- })
-}
+ err := cg.Setup(session)
+ require.NoError(t, err)
-func saramaMsg(val string) *sarama.ConsumerMessage {
- return &sarama.ConsumerMessage{
- Key: nil,
- Value: []byte(val),
- Offset: 0,
- Partition: 0,
+ claim.messages <- &sarama.ConsumerMessage{
+ Topic: "telegraf",
+ Value: []byte("42"),
}
+
+ go func() {
+ err := cg.ConsumeClaim(session, claim)
+ require.NoError(t, err)
+ }()
+
+ acc.Wait(1)
+ cancel()
+
+ err = cg.Cleanup(session)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Now(),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
-func saramaMsgWithTopic(val string, topic string) *sarama.ConsumerMessage {
- return &sarama.ConsumerMessage{
- Key: nil,
- Value: []byte(val),
- Offset: 0,
- Partition: 0,
- Topic: topic,
+func TestConsumerGroupHandler_Handle(t *testing.T) {
+ tests := []struct {
+ name string
+ maxMessageLen int
+ topicTag string
+ msg *sarama.ConsumerMessage
+ expected []telegraf.Metric
+ }{
+ {
+ name: "happy path",
+ msg: &sarama.ConsumerMessage{
+ Topic: "telegraf",
+ Value: []byte("42"),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Now(),
+ ),
+ },
+ },
+ {
+ name: "message to long",
+ maxMessageLen: 4,
+ msg: &sarama.ConsumerMessage{
+ Topic: "telegraf",
+ Value: []byte("12345"),
+ },
+ expected: []telegraf.Metric{},
+ },
+ {
+ name: "parse error",
+ msg: &sarama.ConsumerMessage{
+ Topic: "telegraf",
+ Value: []byte("not an integer"),
+ },
+ expected: []telegraf.Metric{},
+ },
+ {
+ name: "add topic tag",
+ topicTag: "topic",
+ msg: &sarama.ConsumerMessage{
+ Topic: "telegraf",
+ Value: []byte("42"),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "topic": "telegraf",
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Now(),
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ parser := &value.ValueParser{MetricName: "cpu", DataType: "int"}
+ cg := NewConsumerGroupHandler(acc, 1, parser)
+ cg.MaxMessageLen = tt.maxMessageLen
+ cg.TopicTag = tt.topicTag
+
+ ctx := context.Background()
+ session := &FakeConsumerGroupSession{ctx: ctx}
+
+ cg.Reserve(ctx)
+ cg.Handle(session, tt.msg)
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
+ })
}
}
diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md
index 31976788bc75f..2f0c219ea8647 100644
--- a/plugins/inputs/kafka_consumer_legacy/README.md
+++ b/plugins/inputs/kafka_consumer_legacy/README.md
@@ -1,4 +1,4 @@
-# Kafka Consumer Input Plugin
+# Kafka Consumer Legacy Input Plugin
The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka
topic and adds messages to InfluxDB. The plugin assumes messages follow the
@@ -13,12 +13,16 @@ from the same topic in parallel.
[[inputs.kafka_consumer]]
## topic(s) to consume
topics = ["telegraf"]
+
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
+
## Zookeeper Chroot
zookeeper_chroot = ""
+
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
+
## Offset (must be either "oldest" or "newest")
offset = "oldest"
diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go
index d9558d5bd080a..939fc8850ef5f 100644
--- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go
+++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go
@@ -2,7 +2,6 @@ package kafka_consumer_legacy
import (
"fmt"
- "log"
"strings"
"sync"
@@ -30,6 +29,8 @@ type Kafka struct {
Offset string
parser parsers.Parser
+ Log telegraf.Logger
+
sync.Mutex
// channel for all incoming kafka messages
@@ -49,12 +50,16 @@ type Kafka struct {
var sampleConfig = `
## topic(s) to consume
topics = ["telegraf"]
+
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
+
## Zookeeper Chroot
zookeeper_chroot = ""
+
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
+
## Offset (must be either "oldest" or "newest")
offset = "oldest"
@@ -96,7 +101,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
case "newest":
config.Offsets.Initial = sarama.OffsetNewest
default:
- log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
+ k.Log.Infof("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
k.Offset)
config.Offsets.Initial = sarama.OffsetOldest
}
@@ -121,7 +126,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
// Start the kafka message reader
go k.receiver()
- log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
+ k.Log.Infof("Started the kafka consumer service, peers: %v, topics: %v\n",
k.ZookeeperPeers, k.Topics)
return nil
}
diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go
index 60404cfac13fc..31bea2210b741 100644
--- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go
+++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go
@@ -37,6 +37,7 @@ func TestReadsMetricsFromKafka(t *testing.T) {
// Start the Kafka Consumer
k := &Kafka{
+ Log: testutil.Logger{},
ConsumerGroup: "telegraf_test_consumers",
Topics: []string{testTopic},
ZookeeperPeers: zkPeers,
diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go
index 38bc48290a37e..8037f49a053b5 100644
--- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go
+++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go
@@ -21,6 +21,7 @@ const (
func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
in := make(chan *sarama.ConsumerMessage, 1000)
k := Kafka{
+ Log: testutil.Logger{},
ConsumerGroup: "test",
Topics: []string{"telegraf"},
ZookeeperPeers: []string{"localhost:2181"},
diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md
index 2ff4eab88af57..6a70387ee587b 100644
--- a/plugins/inputs/kapacitor/README.md
+++ b/plugins/inputs/kapacitor/README.md
@@ -1,6 +1,6 @@
-# Kapacitor Plugin
+# Kapacitor Input Plugin
-The Kapacitor plugin will collect metrics from the given Kapacitor instances.
+The Kapacitor plugin collects metrics from the given Kapacitor instances.
### Configuration:
@@ -23,70 +23,290 @@ The Kapacitor plugin will collect metrics from the given Kapacitor instances.
# insecure_skip_verify = false
```
-### Measurements & Fields
-
-- kapacitor
- - num_enabled_tasks, integer
- - num_subscriptions, integer
- - num_tasks, integer
-- kapacitor_edges
- - collected, integer
- - emitted, integer
-- kapacitor_ingress
- - points_received, integer
-- kapacitor_memstats
- - alloc_bytes, integer
- - buck_hash_sys_bytes, integer
- - frees, integer
- - gcc_pu_fraction, float
- - gc_sys_bytes, integer
- - heap_alloc_bytes, integer
- - heap_idle_bytes, integer
- - heap_inuse_bytes, integer
- - heap_objects, integer
- - heap_released_bytes, integer
- - heap_sys_bytes, integer
- - last_gc_ns, integer
- - lookups, integer
- - mallocs, integer
- - mcache_in_use_bytes, integer
- - mcache_sys_bytes, integer
- - mspan_in_use_bytes, integer
- - mspan_sys_bytes, integer
- - next_gc_ns, integer
- - num_gc, integer
- - other_sys_bytes, integer
- - pause_total_ns, integer
- - stack_in_use_bytes, integer
- - stack_sys_bytes, integer
- - sys_bytes, integer
- - total_alloc_bytes, integer
-- kapacitor_nodes
- - alerts_triggered, integer
- - avg_exec_time_ns, integer
- - batches_queried, integer
- - crits_triggered, integer
- - eval_errors, integer
- - fields_defaulted, integer
- - infos_triggered, integer
- - oks_triggered, integer
- - points_queried, integer
- - points_written, integer
- - query_errors, integer
- - tags_defaulted, integer
- - warns_triggered, integer
- - write_errors, integer
+### Measurements and fields
+
+- [kapacitor](#kapacitor)
+ - [num_enabled_tasks](#num_enabled_tasks) _(integer)_
+ - [num_subscriptions](#num_subscriptions) _(integer)_
+ - [num_tasks](#num_tasks) _(integer)_
+- [kapacitor_alert](#kapacitor_alert)
+ - [notification_dropped](#notification_dropped) _(integer)_
+ - [primary-handle-count](#primary-handle-count) _(integer)_
+ - [secondary-handle-count](#secondary-handle-count) _(integer)_
+- (Kapacitor Enterprise only) [kapacitor_cluster](#kapacitor_cluster) _(integer)_
+ - [dropped_member_events](#dropped_member_events) _(integer)_
+ - [dropped_user_events](#dropped_user_events) _(integer)_
+ - [query_handler_errors](#query_handler_errors) _(integer)_
+- [kapacitor_edges](#kapacitor_edges)
+ - [collected](#collected) _(integer)_
+ - [emitted](#emitted) _(integer)_
+- [kapacitor_ingress](#kapacitor_ingress)
+ - [points_received](#points_received) _(integer)_
+- [kapacitor_load](#kapacitor_load)
+ - [errors](#errors) _(integer)_
+- [kapacitor_memstats](#kapacitor_memstats)
+ - [alloc_bytes](#alloc_bytes) _(integer)_
+ - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_
+ - [frees](#frees) _(integer)_
+ - [gc_sys_bytes](#gc_sys_bytes) _(integer)_
+ - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_
+ - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_
+ - [heap_idle_bytes](#heap_idle_bytes) _(integer)_
+ - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_
+ - [heap_objects](#heap_objects) _(integer)_
+ - [heap_released_bytes](#heap_released_bytes) _(integer)_
+ - [heap_sys_bytes](#heap_sys_bytes) _(integer)_
+ - [last_gc_ns](#last_gc_ns) _(integer)_
+ - [lookups](#lookups) _(integer)_
+ - [mallocs](#mallocs) _(integer)_
+ - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_
+ - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_
+ - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_
+ - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_
+ - [next_gc_ns](#next_gc_ns) _(integer)_
+ - [num_gc](#num_gc) _(integer)_
+ - [other_sys_bytes](#other_sys_bytes) _(integer)_
+ - [pause_total_ns](#pause_total_ns) _(integer)_
+ - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_
+ - [stack_sys_bytes](#stack_sys_bytes) _(integer)_
+ - [sys_bytes](#sys_bytes) _(integer)_
+ - [total_alloc_bytes](#total_alloc_bytes) _(integer)_
+- [kapacitor_nodes](#kapacitor_nodes)
+ - [alerts_inhibited](#alerts_inhibited) _(integer)_
+ - [alerts_triggered](#alerts_triggered) _(integer)_
+ - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_
+ - [crits_triggered](#crits_triggered) _(integer)_
+ - [errors](#errors) _(integer)_
+ - [infos_triggered](#infos_triggered) _(integer)_
+ - [oks_triggered](#oks_triggered) _(integer)_
+ - [points_written](#points_written) _(integer)_
+ - [warns_triggered](#warns_triggered) _(integer)_
+ - [write_errors](#write_errors) _(integer)_
+- [kapacitor_topics](#kapacitor_topics)
+ - [collected](#collected) _(integer)_
+
+
+---
+
+### kapacitor
+The `kapacitor` measurement stores fields with information related to
+[Kapacitor tasks](https://docs.influxdata.com/kapacitor/latest/introduction/getting-started/#kapacitor-tasks)
+and [subscriptions](https://docs.influxdata.com/kapacitor/latest/administration/subscription-management/).
+
+#### num_enabled_tasks
+The number of enabled Kapacitor tasks.
+
+#### num_subscriptions
+The number of Kapacitor/InfluxDB subscriptions.
+
+#### num_tasks
+The total number of Kapacitor tasks.
+
+---
+
+### kapacitor_alert
+The `kapacitor_alert` measurement stores fields with information related to
+[Kapacitor alerts](https://docs.influxdata.com/kapacitor/v1.5/working/alerts/).
+
+#### notification-dropped
+The number of internal notifications dropped because they arrive too late from another Kapacitor node.
+If this count is increasing, Kapacitor Enterprise nodes aren't able to communicate fast enough
+to keep up with the volume of alerts.
+
+#### primary-handle-count
+The number of times this node handled an alert as the primary. This count should increase under normal conditions.
+
+#### secondary-handle-count
+The number of times this node handled an alert as the secondary. An increase in this counter indicates that the primary is failing to handle alerts in a timely manner.
+
+---
+
+### kapacitor_cluster
+The `kapacitor_cluster` measurement reflects the ability of [Kapacitor nodes to communicate](https://docs.influxdata.com/enterprise_kapacitor/v1.5/administration/configuration/#cluster-communications) with one another. Specifically, these metrics track the gossip communication between the Kapacitor nodes.
+
+#### dropped_member_events
+The number of gossip member events that were dropped.
+
+#### dropped_user_events
+The number of gossip user events that were dropped.
+
+---
+
+### kapacitor_edges
+The `kapacitor_edges` measurement stores fields with information related to
+[edges](https://docs.influxdata.com/kapacitor/latest/tick/introduction/#pipelines)
+in Kapacitor TICKscripts.
+
+#### collected
+The number of messages collected by TICKscript edges.
+
+#### emitted
+The number of messages emitted by TICKscript edges.
+
+---
+
+### kapacitor_ingress
+The `kapacitor_ingress` measurement stores fields with information related to data
+coming into Kapacitor.
+
+#### points_received
+The number of points received by Kapacitor.
+
+---
+
+### kapacitor_load
+The `kapacitor_load` measurement stores fields with information related to the
+[Kapacitor Load Directory service](https://docs.influxdata.com/kapacitor/latest/guides/load_directory/).
+
+#### errors
+The number of errors reported from the load directory service.
+
+---
+
+### kapacitor_memstats
+The `kapacitor_memstats` measurement stores fields related to Kapacitor memory usage.
+
+#### alloc_bytes
+The number of bytes of memory allocated by Kapacitor that are still in use.
+
+#### buck_hash_sys_bytes
+The number of bytes of memory used by the profiling bucket hash table.
+
+#### frees
+The number of heap objects freed.
+
+#### gc_sys_bytes
+The number of bytes of memory used for garbage collection system metadata.
+
+#### gc_cpu_fraction
+The fraction of Kapacitor's available CPU time used by garbage collection since
+Kapacitor started.
+
+#### heap_alloc_bytes
+The number of reachable and unreachable heap objects garbage collection has
+not freed.
+
+#### heap_idle_bytes
+The number of heap bytes waiting to be used.
+
+#### heap_in_use_bytes
+The number of heap bytes in use.
+
+#### heap_objects
+The number of allocated objects.
+
+#### heap_released_bytes
+The number of heap bytes released to the operating system.
+
+#### heap_sys_bytes
+The number of heap bytes obtained from `system`.
+
+#### last_gc_ns
+The nanosecond epoch time of the last garbage collection.
+
+#### lookups
+The total number of pointer lookups.
+
+#### mallocs
+The total number of mallocs.
+
+#### mcache_in_use_bytes
+The number of bytes in use by mcache structures.
+
+#### mcache_sys_bytes
+The number of bytes used for mcache structures obtained from `system`.
+
+#### mspan_in_use_bytes
+The number of bytes in use by mspan structures.
+
+#### mspan_sys_bytes
+The number of bytes used for mspan structures obtained from `system`.
+
+#### next_gc_ns
+The nanosecond epoch time of the next garbage collection.
+
+#### num_gc
+The number of completed garbage collection cycles.
+
+#### other_sys_bytes
+The number of bytes used for other system allocations.
+
+#### pause_total_ns
+The total number of nanoseconds spent in garbage collection "stop-the-world"
+pauses since Kapacitor started.
+
+#### stack_in_use_bytes
+The number of bytes in use by the stack allocator.
+
+#### stack_sys_bytes
+The number of bytes obtained from `system` for stack allocator.
+
+#### sys_bytes
+The number of bytes of memory obtained from `system`.
+
+#### total_alloc_bytes
+The total number of bytes allocated, even if freed.
+
+---
+
+### kapacitor_nodes
+The `kapacitor_nodes` measurement stores fields related to events that occur in
+[TICKscript nodes](https://docs.influxdata.com/kapacitor/latest/nodes/).
+
+#### alerts_inhibited
+The total number of alerts inhibited by TICKscripts.
+
+#### alerts_triggered
+The total number of alerts triggered by TICKscripts.
+
+#### avg_exec_time_ns
+The average execution time of TICKscripts in nanoseconds.
+
+#### crits_triggered
+The number of critical (`crit`) alerts triggered by TICKscripts.
+
+#### errors
+The number of errors caused caused by TICKscripts.
+
+#### infos_triggered
+The number of info (`info`) alerts triggered by TICKscripts.
+
+#### oks_triggered
+The number of ok (`ok`) alerts triggered by TICKscripts.
+
+#### points_written
+The number of points written to InfluxDB or back to Kapacitor.
+
+#### warns_triggered
+The number of warning (`warn`) alerts triggered by TICKscripts.
+
+#### working_cardinality
+The total number of unique series processed.
+
+#### write_errors
+The number of errors that occurred when writing to InfluxDB or other write endpoints.
+
+---
+
+### kapacitor_topics
+The `kapacitor_topics` measurement stores fields related to
+Kapacitor topics](https://docs.influxdata.com/kapacitor/latest/working/using_alert_topics/).
+
+#### collected
+The number of events collected by Kapacitor topics.
+
+---
*Note:* The Kapacitor variables `host`, `cluster_id`, and `server_id`
are currently not recorded due to the potential high cardinality of
these values.
-### Example Output:
+## Example Output
```
$ telegraf --config /etc/telegraf.conf --input-filter kapacitor --test
* Plugin: inputs.kapacitor, Collection 1
-> kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gcc_pu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000
+> kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gc_cpu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000
> kapacitor,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars num_enabled_tasks=5i,num_subscriptions=5i,num_tasks=5i 1478791462000000000
> kapacitor_edges,child=stream0,host=hostname.local,parent=stream,task=deadman-test,type=stream collected=0,emitted=0 1478791462000000000
> kapacitor_ingress,database=_internal,host=hostname.local,measurement=shard,retention_policy=monitor,task_master=main points_received=120 1478791462000000000
diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go
index f20b98774aebd..dd3303a7419d3 100644
--- a/plugins/inputs/kapacitor/kapacitor.go
+++ b/plugins/inputs/kapacitor/kapacitor.go
@@ -9,7 +9,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -171,7 +171,7 @@ func (k *Kapacitor) gatherURL(
"alloc_bytes": s.MemStats.Alloc,
"buck_hash_sys_bytes": s.MemStats.BuckHashSys,
"frees": s.MemStats.Frees,
- "gcc_pu_fraction": s.MemStats.GCCPUFraction,
+ "gc_cpu_fraction": s.MemStats.GCCPUFraction,
"gc_sys_bytes": s.MemStats.GCSys,
"heap_alloc_bytes": s.MemStats.HeapAlloc,
"heap_idle_bytes": s.MemStats.HeapIdle,
diff --git a/plugins/inputs/kapacitor/kapacitor_test.go b/plugins/inputs/kapacitor/kapacitor_test.go
index b32aeec24b383..cae1f9ce30e77 100644
--- a/plugins/inputs/kapacitor/kapacitor_test.go
+++ b/plugins/inputs/kapacitor/kapacitor_test.go
@@ -33,7 +33,7 @@ func TestKapacitor(t *testing.T) {
"alloc_bytes": int64(6950624),
"buck_hash_sys_bytes": int64(1446737),
"frees": int64(129656),
- "gcc_pu_fraction": float64(0.006757149597237818),
+ "gc_cpu_fraction": float64(0.006757149597237818),
"gc_sys_bytes": int64(575488),
"heap_alloc_bytes": int64(6950624),
"heap_idle_bytes": int64(499712),
diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md
index 7d885aed102a8..73bf4a2981d63 100644
--- a/plugins/inputs/kibana/README.md
+++ b/plugins/inputs/kibana/README.md
@@ -1,15 +1,17 @@
-# Kibana input plugin
+# Kibana Input Plugin
-The [kibana](https://www.elastic.co/) plugin queries Kibana status API to
-obtain the health status of Kibana and some useful metrics.
+The `kibana` plugin queries the [Kibana][] API to obtain the service status.
-This plugin has been tested and works on Kibana 6.x versions.
+- Telegraf minimum version: 1.8
+- Kibana minimum tested version: 6.0
+
+[Kibana]: https://www.elastic.co/
### Configuration
```toml
[[inputs.kibana]]
- ## specify a list of one or more Kibana servers
+ ## Specify a list of one or more Kibana servers
servers = ["http://localhost:5601"]
## Timeout for HTTP requests
@@ -27,37 +29,27 @@ This plugin has been tested and works on Kibana 6.x versions.
# insecure_skip_verify = false
```
-### Status mappings
-
-When reporting health (green/yellow/red), additional field `status_code`
-is reported. Field contains mapping from status:string to status_code:int
-with following rules:
-
-- `green` - 1
-- `yellow` - 2
-- `red` - 3
-- `unknown` - 0
-
-### Measurements & Fields
+### Metrics
- kibana
- - status_code: integer (1, 2, 3, 0)
- - heap_max_bytes: integer
- - heap_used_bytes: integer
- - uptime_ms: integer
- - response_time_avg_ms: float
- - response_time_max_ms: integer
- - concurrent_connections: integer
- - requests_per_sec: float
-
-### Tags
-
-- status (Kibana health: green, yellow, red)
-- name (Kibana reported name)
-- uuid (Kibana reported UUID)
-- version (Kibana version)
-- source (Kibana server hostname or IP)
+ - tags:
+ - name (Kibana reported name)
+ - source (Kibana server hostname or IP)
+ - status (Kibana health: green, yellow, red)
+ - version (Kibana version)
+ - fields:
+ - status_code (integer, green=1 yellow=2 red=3 unknown=0)
+ - heap_total_bytes (integer)
+ - heap_max_bytes (integer; deprecated in 1.13.3: use `heap_total_bytes` field)
+ - heap_used_bytes (integer)
+ - uptime_ms (integer)
+ - response_time_avg_ms (float)
+ - response_time_max_ms (integer)
+ - concurrent_connections (integer)
+ - requests_per_sec (float)
### Example Output
-kibana,host=myhost,name=my-kibana,source=localhost:5601,version=6.3.2 concurrent_connections=0i,heap_max_bytes=136478720i,heap_used_bytes=119231088i,response_time_avg_ms=0i,response_time_max_ms=0i,status="green",status_code=1i,uptime_ms=2187428019i 1534864502000000000
+```
+kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000
+```
diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go
index 0e21ad800d408..7a2f7ae3d12fa 100644
--- a/plugins/inputs/kibana/kibana.go
+++ b/plugins/inputs/kibana/kibana.go
@@ -3,14 +3,17 @@ package kibana
import (
"encoding/json"
"fmt"
+ "io"
+ "io/ioutil"
"net/http"
+ "strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -40,7 +43,7 @@ type overallStatus struct {
}
type metrics struct {
- UptimeInMillis int64 `json:"uptime_in_millis"`
+ UptimeInMillis float64 `json:"uptime_in_millis"`
ConcurrentConnections int64 `json:"concurrent_connections"`
CollectionIntervalInMilles int64 `json:"collection_interval_in_millis"`
ResponseTimes responseTimes `json:"response_times"`
@@ -54,7 +57,9 @@ type responseTimes struct {
}
type process struct {
- Mem mem `json:"mem"`
+ Mem mem `json:"mem"`
+ Memory memory `json:"memory"`
+ UptimeInMillis float64 `json:"uptime_in_millis"`
}
type requests struct {
@@ -66,8 +71,17 @@ type mem struct {
HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
}
+type memory struct {
+ Heap heap `json:"heap"`
+}
+
+type heap struct {
+ TotalInBytes int64 `json:"total_in_bytes"`
+ UsedInBytes int64 `json:"used_in_bytes"`
+}
+
const sampleConfig = `
- ## specify a list of one or more Kibana servers
+ ## Specify a list of one or more Kibana servers
servers = ["http://localhost:5601"]
## Timeout for HTTP requests
@@ -187,15 +201,37 @@ func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) er
tags["status"] = kibanaStatus.Status.Overall.State
fields["status_code"] = mapHealthStatusToCode(kibanaStatus.Status.Overall.State)
-
- fields["uptime_ms"] = kibanaStatus.Metrics.UptimeInMillis
fields["concurrent_connections"] = kibanaStatus.Metrics.ConcurrentConnections
- fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes
- fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes
fields["response_time_avg_ms"] = kibanaStatus.Metrics.ResponseTimes.AvgInMillis
fields["response_time_max_ms"] = kibanaStatus.Metrics.ResponseTimes.MaxInMillis
fields["requests_per_sec"] = float64(kibanaStatus.Metrics.Requests.Total) / float64(kibanaStatus.Metrics.CollectionIntervalInMilles) * 1000
+ versionArray := strings.Split(kibanaStatus.Version.Number, ".")
+ arrayElement := 1
+
+ if len(versionArray) > 1 {
+ arrayElement = 2
+ }
+ versionNumber, err := strconv.ParseFloat(strings.Join(versionArray[:arrayElement], "."), 64)
+ if err != nil {
+ return err
+ }
+
+ // Same value will be assigned to both the metrics [heap_max_bytes and heap_total_bytes ]
+ // Which keeps the code backward compatible
+ if versionNumber >= 6.4 {
+ fields["uptime_ms"] = int64(kibanaStatus.Metrics.Process.UptimeInMillis)
+ fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes
+ fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes
+ fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.UsedInBytes
+ } else {
+ fields["uptime_ms"] = int64(kibanaStatus.Metrics.UptimeInMillis)
+ fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes
+ fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes
+ fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes
+
+ }
+
acc.AddFields("kibana", fields, tags)
return nil
@@ -216,6 +252,12 @@ func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err err
defer response.Body.Close()
+ if response.StatusCode != http.StatusOK {
+ // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
+ body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200))
+ return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body)
+ }
+
if err = json.NewDecoder(response.Body).Decode(v); err != nil {
return request.Host, err
}
diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go
index ad5e32d290c34..3dfed9edfa9a2 100644
--- a/plugins/inputs/kibana/kibana_test.go
+++ b/plugins/inputs/kibana/kibana_test.go
@@ -9,7 +9,7 @@ import (
"github.com/influxdata/telegraf/testutil"
)
-func defaultTags() map[string]string {
+func defaultTags6_3() map[string]string {
return map[string]string{
"name": "my-kibana",
"source": "example.com:5601",
@@ -18,6 +18,15 @@ func defaultTags() map[string]string {
}
}
+func defaultTags6_5() map[string]string {
+ return map[string]string{
+ "name": "my-kibana",
+ "source": "example.com:5601",
+ "version": "6.5.4",
+ "status": "green",
+ }
+}
+
type transportMock struct {
statusCode int
body string
@@ -41,22 +50,34 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
return res, nil
}
-func checkKibanaStatusResult(t *testing.T, acc *testutil.Accumulator) {
- tags := defaultTags()
- acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected, tags)
+func checkKibanaStatusResult(version string, t *testing.T, acc *testutil.Accumulator) {
+ if version == "6.3.2" {
+ tags := defaultTags6_3()
+ acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected6_3, tags)
+ } else {
+ tags := defaultTags6_5()
+ acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected6_5, tags)
+ }
}
func TestGather(t *testing.T) {
ks := newKibanahWithClient()
ks.Servers = []string{"http://example.com:5601"}
- ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse)
-
- var acc testutil.Accumulator
- if err := acc.GatherError(ks.Gather); err != nil {
+ // Unit test for Kibana version < 6.4
+ ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_3)
+ var acc1 testutil.Accumulator
+ if err := acc1.GatherError(ks.Gather); err != nil {
t.Fatal(err)
}
+ checkKibanaStatusResult(defaultTags6_3()["version"], t, &acc1)
- checkKibanaStatusResult(t, &acc)
+ //Unit test for Kibana version >= 6.4
+ ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_5)
+ var acc2 testutil.Accumulator
+ if err := acc2.GatherError(ks.Gather); err != nil {
+ t.Fatal(err)
+ }
+ checkKibanaStatusResult(defaultTags6_5()["version"], t, &acc2)
}
func newKibanahWithClient() *Kibana {
diff --git a/plugins/inputs/kibana/testdata_test.go b/plugins/inputs/kibana/testdata_test6_3.go
similarity index 96%
rename from plugins/inputs/kibana/testdata_test.go
rename to plugins/inputs/kibana/testdata_test6_3.go
index ec393bb197ae9..bda52927303af 100644
--- a/plugins/inputs/kibana/testdata_test.go
+++ b/plugins/inputs/kibana/testdata_test6_3.go
@@ -1,6 +1,6 @@
package kibana
-const kibanaStatusResponse = `
+const kibanaStatusResponse6_3 = `
{
"name": "my-kibana",
"uuid": "00000000-0000-0000-0000-000000000000",
@@ -187,8 +187,9 @@ const kibanaStatusResponse = `
}
`
-var kibanaStatusExpected = map[string]interface{}{
+var kibanaStatusExpected6_3 = map[string]interface{}{
"status_code": 1,
+ "heap_total_bytes": int64(149954560),
"heap_max_bytes": int64(149954560),
"heap_used_bytes": int64(126274392),
"uptime_ms": int64(2173595336),
diff --git a/plugins/inputs/kibana/testdata_test6_5.go b/plugins/inputs/kibana/testdata_test6_5.go
new file mode 100644
index 0000000000000..a000229c14f73
--- /dev/null
+++ b/plugins/inputs/kibana/testdata_test6_5.go
@@ -0,0 +1,227 @@
+package kibana
+
+const kibanaStatusResponse6_5 = `
+{
+ "name": "my-kibana",
+ "uuid": "00000000-0000-0000-0000-000000000000",
+ "version": {
+ "number": "6.5.4",
+ "build_hash": "53d0c6758ac3fb38a3a1df198c1d4c87765e63f7",
+ "build_number": 17307,
+ "build_snapshot": false
+ },
+ "status": {
+ "overall": {
+ "state": "green",
+ "title": "Green",
+ "nickname": "Looking good",
+ "icon": "success",
+ "since": "2018-07-27T07:37:42.567Z"
+ },
+ "statuses": [{
+ "id": "plugin:kibana@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.567Z"
+ },
+ {
+ "id": "plugin:elasticsearch@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:04.920Z"
+ },
+ {
+ "id": "plugin:xpack_main@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:02.393Z"
+ },
+ {
+ "id": "plugin:searchprofiler@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:02.395Z"
+ },
+ {
+ "id": "plugin:tilemap@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:02.396Z"
+ },
+ {
+ "id": "plugin:watcher@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:02.397Z"
+ },
+ {
+ "id": "plugin:license_management@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.668Z"
+ },
+ {
+ "id": "plugin:index_management@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:02.399Z"
+ },
+ {
+ "id": "plugin:timelion@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.912Z"
+ },
+ {
+ "id": "plugin:logtrail@0.1.29",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.919Z"
+ },
+ {
+ "id": "plugin:monitoring@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.922Z"
+ },
+ {
+ "id": "plugin:grokdebugger@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:02.400Z"
+ },
+ {
+ "id": "plugin:dashboard_mode@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.928Z"
+ },
+ {
+ "id": "plugin:logstash@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:02.401Z"
+ },
+ {
+ "id": "plugin:apm@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.950Z"
+ },
+ {
+ "id": "plugin:console@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.958Z"
+ },
+ {
+ "id": "plugin:console_extensions@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.961Z"
+ },
+ {
+ "id": "plugin:metrics@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-27T07:37:42.965Z"
+ },
+ {
+ "id": "plugin:reporting@6.5.4",
+ "state": "green",
+ "icon": "success",
+ "message": "Ready",
+ "since": "2018-07-28T10:07:02.402Z"
+ }]
+ },
+ "metrics": {
+ "last_updated": "2020-01-15T09:40:17.733Z",
+ "collection_interval_in_millis": 5000,
+ "process": {
+ "memory": {
+ "heap": {
+ "total_in_bytes": 149954560,
+ "used_in_bytes": 126274392,
+ "size_limit": 1501560832
+ },
+ "resident_set_size_in_bytes": 286650368
+ },
+ "event_loop_delay": 0.5314235687255859,
+ "pid": 6,
+ "uptime_in_millis": 2173595336.9999999998
+ },
+ "os": {
+ "load": {
+ "1m": 2.66015625,
+ "5m": 2.8173828125,
+ "15m": 2.51025390625
+ },
+ "memory": {
+ "total_in_bytes": 404355756032,
+ "free_in_bytes": 294494244864,
+ "used_in_bytes": 109861511168
+ },
+ "uptime_in_millis": 8220745000,
+ "cgroup": {
+ "cpuacct": {
+ "control_group": "/",
+ "usage_nanos": 1086527218898
+ },
+ "cpu": {
+ "control_group": "/",
+ "cfs_period_micros": 100000,
+ "cfs_quota_micros": -1,
+ "stat": {
+ "number_of_elapsed_periods": 0,
+ "number_of_times_throttled": 0,
+ "time_throttled_nanos": 0
+ }
+ }
+ }
+ },
+ "response_times": {
+ "avg_in_millis": 12.5,
+ "max_in_millis": 123
+ },
+ "requests": {
+ "total": 2,
+ "disconnects": 0,
+ "status_codes": {
+ "200": 1,
+ "304": 1
+ }
+ },
+ "concurrent_connections": 10
+ }
+}
+`
+
+var kibanaStatusExpected6_5 = map[string]interface{}{
+ "status_code": 1,
+ "heap_total_bytes": int64(149954560),
+ "heap_max_bytes": int64(149954560),
+ "heap_used_bytes": int64(126274392),
+ "uptime_ms": int64(2173595337),
+ "response_time_avg_ms": float64(12.5),
+ "response_time_max_ms": int64(123),
+ "concurrent_connections": int64(10),
+ "requests_per_sec": float64(0.4),
+}
diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md
index d6f3a707b7552..7896557ac6cf5 100644
--- a/plugins/inputs/kinesis_consumer/README.md
+++ b/plugins/inputs/kinesis_consumer/README.md
@@ -78,7 +78,7 @@ DynamoDB:
#### DynamoDB Checkpoint
The DynamoDB checkpoint stores the last processed record in a DynamoDB. To leverage
-this functionality, create a table with the folowing string type keys:
+this functionality, create a table with the following string type keys:
```
Partition key: namespace
diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go
index b9b98243b3100..6a3b1c8301a48 100644
--- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go
+++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go
@@ -3,7 +3,6 @@ package kinesis_consumer
import (
"context"
"fmt"
- "log"
"math/big"
"strings"
"sync"
@@ -15,7 +14,7 @@ import (
"github.com/harlow/kinesis-consumer/checkpoint/ddb"
"github.com/influxdata/telegraf"
- internalaws "github.com/influxdata/telegraf/internal/config/aws"
+ internalaws "github.com/influxdata/telegraf/config/aws"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -40,6 +39,8 @@ type (
DynamoDB *DynamoDB `toml:"checkpoint_dynamodb"`
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
+ Log telegraf.Logger
+
cons *consumer.Consumer
parser parsers.Parser
cancel context.CancelFunc
@@ -220,7 +221,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error {
})
if err != nil {
k.cancel()
- log.Printf("E! [inputs.kinesis_consumer] Scan encounterred an error - %s", err.Error())
+ k.Log.Errorf("Scan encountered an error: %s", err.Error())
k.cons = nil
}
}()
@@ -285,7 +286,7 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) {
k.lastSeqNum = strToBint(sequenceNum)
k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum)
} else {
- log.Println("D! [inputs.kinesis_consumer] Metric group failed to process")
+ k.Log.Debug("Metric group failed to process")
}
}
}
diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md
index 7bcb63d1411c3..dbed6d6f01edb 100644
--- a/plugins/inputs/kube_inventory/README.md
+++ b/plugins/inputs/kube_inventory/README.md
@@ -1,17 +1,28 @@
-# Kube_Inventory Plugin
+# Kubernetes Inventory Input Plugin
+
This plugin generates metrics derived from the state of the following Kubernetes resources:
- - daemonsets
- - deployments
- - nodes
- - persistentvolumes
- - persistentvolumeclaims
- - pods (containers)
- - statefulsets
+
+- daemonsets
+- deployments
+- endpoints
+- ingress
+- nodes
+- persistentvolumes
+- persistentvolumeclaims
+- pods (containers)
+- services
+- statefulsets
+
+Kubernetes is a fast moving project, with a new minor release every 3 months. As
+such, we will aim to maintain support only for versions that are supported by
+the major cloud providers; this is roughly 4 release / 2 years.
+
+**This plugin supports Kubernetes 1.11 and later.**
#### Series Cardinality Warning
This plugin may produce a high number of series which, when not controlled
-for, will cause high load on your database. Use the following techniques to
+for, will cause high load on your database. Use the following techniques to
avoid cardinality issues:
- Use [metric filtering][] options to exclude unneeded measurements and tags.
@@ -33,6 +44,8 @@ avoid cardinality issues:
# namespace = "default"
## Use bearer token for authorization. ('bearer_token' takes priority)
+ ## If both of these are empty, we'll use the default serviceaccount:
+ ## at: /run/secrets/kubernetes.io/serviceaccount/token
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
@@ -42,25 +55,35 @@ avoid cardinality issues:
## Optional Resources to exclude from gathering
## Leave them with blank with try to gather everything available.
- ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes",
- ## "persistentvolumeclaims", "pods", "statefulsets"
+ ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
+ ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
# resource_exclude = [ "deployments", "nodes", "statefulsets" ]
## Optional Resources to include when gathering
## Overrides resource_exclude if both set.
# resource_include = [ "deployments", "nodes", "statefulsets" ]
+ ## selectors to include and exclude as tags. Globs accepted.
+ ## Note that an empty array for both will include all selectors as tags
+ ## selector_exclude overrides selector_include if both set.
+ selector_include = []
+ selector_exclude = ["*"]
+
## Optional TLS Config
# tls_ca = "/path/to/cafile"
# tls_cert = "/path/to/certfile"
# tls_key = "/path/to/keyfile"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
+
+ ## Uncomment to remove deprecated metrics.
+ # fielddrop = ["terminated_reason"]
```
#### Kubernetes Permissions
If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group.
+
```yaml
---
kind: ClusterRole
@@ -70,9 +93,9 @@ metadata:
labels:
rbac.authorization.k8s.io/aggregate-view-telegraf: "true"
rules:
-- apiGroups: [""]
- resources: ["persistentvolumes","nodes"]
- verbs: ["get","list"]
+ - apiGroups: [""]
+ resources: ["persistentvolumes", "nodes"]
+ verbs: ["get", "list"]
---
kind: ClusterRole
@@ -81,14 +104,15 @@ metadata:
name: influx:telegraf
aggregationRule:
clusterRoleSelectors:
- - matchLabels:
- rbac.authorization.k8s.io/aggregate-view-telegraf: "true"
- - matchLabels:
- rbac.authorization.k8s.io/aggregate-to-view: "true"
+ - matchLabels:
+ rbac.authorization.k8s.io/aggregate-view-telegraf: "true"
+ - matchLabels:
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
rules: [] # Rules are automatically filled in by the controller manager.
```
Bind the newly created aggregated ClusterRole with the following config file, updating the subjects as needed.
+
```yaml
---
apiVersion: rbac.authorization.k8s.io/v1
@@ -100,18 +124,18 @@ roleRef:
kind: ClusterRole
name: influx:telegraf
subjects:
-- kind: ServiceAccount
- name: telegraf
- namespace: default
+ - kind: ServiceAccount
+ name: telegraf
+ namespace: default
```
-
### Metrics:
-+ kubernetes_daemonset
+- kubernetes_daemonset
- tags:
- daemonset_name
- namespace
+ - selector (\*varies)
- fields:
- generation
- current_number_scheduled
@@ -122,16 +146,47 @@ subjects:
- number_unavailable
- updated_number_scheduled
-- kubernetes_deployment
+* kubernetes_deployment
- tags:
- deployment_name
- namespace
+ - selector (\*varies)
- fields:
- replicas_available
- replicas_unavailable
- created
-+ kubernetes_node
+- kubernetes_endpoints
+ - tags:
+ - endpoint_name
+ - namespace
+ - hostname
+ - node_name
+ - port_name
+ - port_protocol
+ - kind (\*varies)
+ - fields:
+ - created
+ - generation
+ - ready
+ - port
+
+* kubernetes_ingress
+ - tags:
+ - ingress_name
+ - namespace
+ - hostname
+ - ip
+ - backend_service_name
+ - path
+ - host
+ - fields:
+ - created
+ - generation
+ - backend_service_port
+ - tls
+
+- kubernetes_node
- tags:
- node_name
- fields:
@@ -142,7 +197,7 @@ subjects:
- allocatable_memory_bytes
- allocatable_pods
-- kubernetes_persistentvolume
+* kubernetes_persistentvolume
- tags:
- pv_name
- phase
@@ -150,34 +205,55 @@ subjects:
- fields:
- phase_type (int, [see below](#pv-phase_type))
-+ kubernetes_persistentvolumeclaim
+- kubernetes_persistentvolumeclaim
- tags:
- pvc_name
- namespace
- phase
- storageclass
+ - selector (\*varies)
- fields:
- phase_type (int, [see below](#pvc-phase_type))
-- kubernetes_pod_container
+* kubernetes_pod_container
- tags:
- container_name
- namespace
- node_name
- pod_name
+ - node_selector (\*varies)
+ - state
+ - readiness
- fields:
- restarts_total
- - state
- - terminated_reason
+ - state_code
+ - state_reason
+ - terminated_reason (string, deprecated in 1.15: use `state_reason` instead)
- resource_requests_cpu_units
- resource_requests_memory_bytes
- resource_limits_cpu_units
- resource_limits_memory_bytes
-+ kubernetes_statefulset
+- kubernetes_service
+ - tags:
+ - service_name
+ - namespace
+ - port_name
+ - port_protocol
+ - external_name
+ - cluster_ip
+ - selector (\*varies)
+ - fields
+ - created
+ - generation
+ - port
+ - target_port
+
+* kubernetes_statefulset
- tags:
- statefulset_name
- namespace
+ - selector (\*varies)
- fields:
- created
- generation
@@ -192,42 +268,41 @@ subjects:
The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value.
-|Tag value |Corresponding field value|
------------|-------------------------|
-|bound | 0 |
-|failed | 1 |
-|pending | 2 |
-|released | 3 |
-|available | 4 |
-|unknown | 5 |
+| Tag value | Corresponding field value |
+| --------- | ------------------------- |
+| bound | 0 |
+| failed | 1 |
+| pending | 2 |
+| released | 3 |
+| available | 4 |
+| unknown | 5 |
#### pvc `phase_type`
The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value.
-|Tag value |Corresponding field value|
------------|-------------------------|
-|bound | 0 |
-|lost | 1 |
-|pending | 2 |
-|unknown | 3 |
-
+| Tag value | Corresponding field value |
+| --------- | ------------------------- |
+| bound | 0 |
+| lost | 1 |
+| pending | 2 |
+| unknown | 3 |
### Example Output:
```
kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000
-kubernetes_daemonset,daemonset_name=telegraf,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000
-kubernetes_deployment,deployment_name=deployd,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000
+kubernetes_daemonset,daemonset_name=telegraf,selector_select1=s1,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000
+kubernetes_deployment,deployment_name=deployd,selector_select1=s1,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000
kubernetes_node,node_name=ip-172-17-0-2.internal allocatable_pods=110i,capacity_memory_bytes=128837533696,capacity_pods=110i,capacity_cpu_cores=16i,allocatable_cpu_cores=16i,allocatable_memory_bytes=128732676096 1547597616000000000
kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-222222222222,storageclass=ebs-1-retain phase_type=3i 1547597616000000000
-kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,storageclass=ebs-1-retain phase_type=0i 1547597615000000000
+kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,selector_select1=s1,storageclass=ebs-1-retain phase_type=0i 1547597615000000000
kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000
-kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1,state=running resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,terminated_reason="",resource_requests_memory_bytes=524288000 1547597616000000000
-kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000
+kubernetes_service,cluster_ip=172.29.61.80,namespace=redis-cache-0001,port_name=redis,port_protocol=TCP,selector_app=myapp,selector_io.kompose.service=redis,selector_role=slave,service_name=redis-slave created=1588690034000000000i,generation=0i,port=6379i,target_port=0i 1547597616000000000
+kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",resource_requests_memory_bytes=524288000 1547597616000000000
+kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000
```
-
[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering
[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/
[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000
@@ -236,4 +311,3 @@ kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=
[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality
[influx-docs]: https://docs.influxdata.com/influxdb/latest/
[k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/
-[tick-charts]: https://github.com/influxdata/tick-charts
diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go
index bf207b0ad46d6..d9b24ba5c0a95 100644
--- a/plugins/inputs/kube_inventory/client.go
+++ b/plugins/inputs/kube_inventory/client.go
@@ -5,11 +5,11 @@ import (
"time"
"github.com/ericchiang/k8s"
- "github.com/ericchiang/k8s/apis/apps/v1beta1"
- "github.com/ericchiang/k8s/apis/apps/v1beta2"
- "github.com/ericchiang/k8s/apis/core/v1"
+ v1APPS "github.com/ericchiang/k8s/apis/apps/v1"
+ v1 "github.com/ericchiang/k8s/apis/core/v1"
+ v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
)
type client struct {
@@ -47,15 +47,29 @@ func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tl
}, nil
}
-func (c *client) getDaemonSets(ctx context.Context) (*v1beta2.DaemonSetList, error) {
- list := new(v1beta2.DaemonSetList)
+func (c *client) getDaemonSets(ctx context.Context) (*v1APPS.DaemonSetList, error) {
+ list := new(v1APPS.DaemonSetList)
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return list, c.List(ctx, c.namespace, list)
}
-func (c *client) getDeployments(ctx context.Context) (*v1beta1.DeploymentList, error) {
- list := &v1beta1.DeploymentList{}
+func (c *client) getDeployments(ctx context.Context) (*v1APPS.DeploymentList, error) {
+ list := &v1APPS.DeploymentList{}
+ ctx, cancel := context.WithTimeout(ctx, c.timeout)
+ defer cancel()
+ return list, c.List(ctx, c.namespace, list)
+}
+
+func (c *client) getEndpoints(ctx context.Context) (*v1.EndpointsList, error) {
+ list := new(v1.EndpointsList)
+ ctx, cancel := context.WithTimeout(ctx, c.timeout)
+ defer cancel()
+ return list, c.List(ctx, c.namespace, list)
+}
+
+func (c *client) getIngress(ctx context.Context) (*v1beta1EXT.IngressList, error) {
+ list := new(v1beta1EXT.IngressList)
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return list, c.List(ctx, c.namespace, list)
@@ -89,8 +103,15 @@ func (c *client) getPods(ctx context.Context) (*v1.PodList, error) {
return list, c.List(ctx, c.namespace, list)
}
-func (c *client) getStatefulSets(ctx context.Context) (*v1beta1.StatefulSetList, error) {
- list := new(v1beta1.StatefulSetList)
+func (c *client) getServices(ctx context.Context) (*v1.ServiceList, error) {
+ list := new(v1.ServiceList)
+ ctx, cancel := context.WithTimeout(ctx, c.timeout)
+ defer cancel()
+ return list, c.List(ctx, c.namespace, list)
+}
+
+func (c *client) getStatefulSets(ctx context.Context) (*v1APPS.StatefulSetList, error) {
+ list := new(v1APPS.StatefulSetList)
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return list, c.List(ctx, c.namespace, list)
diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go
index 4f54755b02362..88411ea367ccf 100644
--- a/plugins/inputs/kube_inventory/client_test.go
+++ b/plugins/inputs/kube_inventory/client_test.go
@@ -4,7 +4,8 @@ import (
"testing"
"time"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/ericchiang/k8s/util/intstr"
+ "github.com/influxdata/telegraf/plugins/common/tls"
)
type mockHandler struct {
@@ -27,6 +28,13 @@ func toBoolPtr(b bool) *bool {
return &b
}
+func toIntStrPtrS(s string) *intstr.IntOrString {
+ return &intstr.IntOrString{StrVal: &s}
+}
+
+func toIntStrPtrI(i int32) *intstr.IntOrString {
+ return &intstr.IntOrString{IntVal: &i}
+}
func TestNewClient(t *testing.T) {
_, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{})
if err != nil {
diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go
index 92c7bc195763e..db612a5e33b2a 100644
--- a/plugins/inputs/kube_inventory/daemonset.go
+++ b/plugins/inputs/kube_inventory/daemonset.go
@@ -4,7 +4,7 @@ import (
"context"
"time"
- "github.com/ericchiang/k8s/apis/apps/v1beta2"
+ "github.com/ericchiang/k8s/apis/apps/v1"
"github.com/influxdata/telegraf"
)
@@ -23,7 +23,7 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern
}
}
-func (ki *KubernetesInventory) gatherDaemonSet(d v1beta2.DaemonSet, acc telegraf.Accumulator) error {
+func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error {
fields := map[string]interface{}{
"generation": d.Metadata.GetGeneration(),
"current_number_scheduled": d.Status.GetCurrentNumberScheduled(),
@@ -38,6 +38,11 @@ func (ki *KubernetesInventory) gatherDaemonSet(d v1beta2.DaemonSet, acc telegraf
"daemonset_name": d.Metadata.GetName(),
"namespace": d.Metadata.GetNamespace(),
}
+ for key, val := range d.GetSpec().GetSelector().GetMatchLabels() {
+ if ki.selectorFilter.Match(key) {
+ tags["selector_"+key] = val
+ }
+ }
if d.Metadata.CreationTimestamp.GetSeconds() != 0 {
fields["created"] = time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano()
diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go
index 3f11df1ca108d..0a13f1e42cb3d 100644
--- a/plugins/inputs/kube_inventory/daemonset_test.go
+++ b/plugins/inputs/kube_inventory/daemonset_test.go
@@ -1,10 +1,12 @@
package kube_inventory
import (
+ "reflect"
+ "strings"
"testing"
"time"
- "github.com/ericchiang/k8s/apis/apps/v1beta2"
+ "github.com/ericchiang/k8s/apis/apps/v1"
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
"github.com/influxdata/telegraf/testutil"
@@ -12,6 +14,8 @@ import (
func TestDaemonSet(t *testing.T) {
cli := &client{}
+ selectInclude := []string{}
+ selectExclude := []string{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
@@ -24,7 +28,7 @@ func TestDaemonSet(t *testing.T) {
name: "no daemon set",
handler: &mockHandler{
responseMap: map[string]interface{}{
- "/daemonsets/": &v1beta2.DaemonSetList{},
+ "/daemonsets/": &v1.DaemonSetList{},
},
},
hasError: false,
@@ -33,10 +37,10 @@ func TestDaemonSet(t *testing.T) {
name: "collect daemonsets",
handler: &mockHandler{
responseMap: map[string]interface{}{
- "/daemonsets/": &v1beta2.DaemonSetList{
- Items: []*v1beta2.DaemonSet{
+ "/daemonsets/": &v1.DaemonSetList{
+ Items: []*v1.DaemonSet{
{
- Status: &v1beta2.DaemonSetStatus{
+ Status: &v1.DaemonSetStatus{
CurrentNumberScheduled: toInt32Ptr(3),
DesiredNumberScheduled: toInt32Ptr(5),
NumberAvailable: toInt32Ptr(2),
@@ -55,6 +59,14 @@ func TestDaemonSet(t *testing.T) {
},
CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
},
+ Spec: &v1.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
+ },
},
},
},
@@ -75,8 +87,10 @@ func TestDaemonSet(t *testing.T) {
"created": now.UnixNano(),
},
Tags: map[string]string{
- "daemonset_name": "daemon1",
- "namespace": "ns1",
+ "daemonset_name": "daemon1",
+ "namespace": "ns1",
+ "selector_select1": "s1",
+ "selector_select2": "s2",
},
},
},
@@ -87,10 +101,13 @@ func TestDaemonSet(t *testing.T) {
for _, v := range tests {
ks := &KubernetesInventory{
- client: cli,
+ client: cli,
+ SelectorInclude: selectInclude,
+ SelectorExclude: selectExclude,
}
+ ks.createSelectorFilters()
acc := new(testutil.Accumulator)
- for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1beta2.DaemonSetList)).Items {
+ for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
err := ks.gatherDaemonSet(*dset, acc)
if err != nil {
t.Errorf("Failed to gather daemonset - %s", err.Error())
@@ -121,3 +138,170 @@ func TestDaemonSet(t *testing.T) {
}
}
}
+
+func TestDaemonSetSelectorFilter(t *testing.T) {
+ cli := &client{}
+ now := time.Now()
+ now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
+
+ responseMap := map[string]interface{}{
+ "/daemonsets/": &v1.DaemonSetList{
+ Items: []*v1.DaemonSet{
+ {
+ Status: &v1.DaemonSetStatus{
+ CurrentNumberScheduled: toInt32Ptr(3),
+ DesiredNumberScheduled: toInt32Ptr(5),
+ NumberAvailable: toInt32Ptr(2),
+ NumberMisscheduled: toInt32Ptr(2),
+ NumberReady: toInt32Ptr(1),
+ NumberUnavailable: toInt32Ptr(1),
+ UpdatedNumberScheduled: toInt32Ptr(2),
+ },
+ Metadata: &metav1.ObjectMeta{
+ Generation: toInt64Ptr(11221),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("daemon1"),
+ Labels: map[string]string{
+ "lab1": "v1",
+ "lab2": "v2",
+ },
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ Spec: &v1.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ hasError bool
+ include []string
+ exclude []string
+ expected map[string]string
+ }{
+ {
+ name: "nil filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: nil,
+ exclude: nil,
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "empty filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "include filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"select1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude filter equals only non-excluded selectors (overrides include filter)",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"select2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "include glob filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"*1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ }
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ ks.SelectorInclude = v.include
+ ks.SelectorExclude = v.exclude
+ ks.createSelectorFilters()
+ acc := new(testutil.Accumulator)
+ for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
+ err := ks.gatherDaemonSet(*dset, acc)
+ if err != nil {
+ t.Errorf("Failed to gather daemonset - %s", err.Error())
+ }
+ }
+
+ // Grab selector tags
+ actual := map[string]string{}
+ for _, metric := range acc.Metrics {
+ for key, val := range metric.Tags {
+ if strings.Contains(key, "selector_") {
+ actual[key] = val
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v.expected, actual) {
+ t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
+ }
+ }
+}
diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go
index 2d72e8d03a4f0..b91216765e9a6 100644
--- a/plugins/inputs/kube_inventory/deployment.go
+++ b/plugins/inputs/kube_inventory/deployment.go
@@ -4,8 +4,7 @@ import (
"context"
"time"
- "github.com/ericchiang/k8s/apis/apps/v1beta1"
-
+ v1 "github.com/ericchiang/k8s/apis/apps/v1"
"github.com/influxdata/telegraf"
)
@@ -23,7 +22,7 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber
}
}
-func (ki *KubernetesInventory) gatherDeployment(d v1beta1.Deployment, acc telegraf.Accumulator) error {
+func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error {
fields := map[string]interface{}{
"replicas_available": d.Status.GetAvailableReplicas(),
"replicas_unavailable": d.Status.GetUnavailableReplicas(),
@@ -33,6 +32,11 @@ func (ki *KubernetesInventory) gatherDeployment(d v1beta1.Deployment, acc telegr
"deployment_name": d.Metadata.GetName(),
"namespace": d.Metadata.GetNamespace(),
}
+ for key, val := range d.GetSpec().GetSelector().GetMatchLabels() {
+ if ki.selectorFilter.Match(key) {
+ tags["selector_"+key] = val
+ }
+ }
acc.AddFields(deploymentMeasurement, fields, tags)
diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go
index 0429b84fa1d87..9407c84d91322 100644
--- a/plugins/inputs/kube_inventory/deployment_test.go
+++ b/plugins/inputs/kube_inventory/deployment_test.go
@@ -1,10 +1,12 @@
package kube_inventory
import (
+ "reflect"
+ "strings"
"testing"
"time"
- "github.com/ericchiang/k8s/apis/apps/v1beta1"
+ "github.com/ericchiang/k8s/apis/apps/v1"
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
"github.com/ericchiang/k8s/util/intstr"
"github.com/influxdata/telegraf/testutil"
@@ -12,7 +14,8 @@ import (
func TestDeployment(t *testing.T) {
cli := &client{}
-
+ selectInclude := []string{}
+ selectExclude := []string{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
outputMetric := &testutil.Metric{
@@ -22,8 +25,10 @@ func TestDeployment(t *testing.T) {
"created": now.UnixNano(),
},
Tags: map[string]string{
- "namespace": "ns1",
- "deployment_name": "deploy1",
+ "namespace": "ns1",
+ "deployment_name": "deploy1",
+ "selector_select1": "s1",
+ "selector_select2": "s2",
},
}
@@ -37,7 +42,7 @@ func TestDeployment(t *testing.T) {
name: "no deployments",
handler: &mockHandler{
responseMap: map[string]interface{}{
- "/deployments/": &v1beta1.DeploymentList{},
+ "/deployments/": &v1.DeploymentList{},
},
},
hasError: false,
@@ -46,19 +51,19 @@ func TestDeployment(t *testing.T) {
name: "collect deployments",
handler: &mockHandler{
responseMap: map[string]interface{}{
- "/deployments/": &v1beta1.DeploymentList{
- Items: []*v1beta1.Deployment{
+ "/deployments/": &v1.DeploymentList{
+ Items: []*v1.Deployment{
{
- Status: &v1beta1.DeploymentStatus{
+ Status: &v1.DeploymentStatus{
Replicas: toInt32Ptr(3),
AvailableReplicas: toInt32Ptr(1),
UnavailableReplicas: toInt32Ptr(4),
UpdatedReplicas: toInt32Ptr(2),
ObservedGeneration: toInt64Ptr(9121),
},
- Spec: &v1beta1.DeploymentSpec{
- Strategy: &v1beta1.DeploymentStrategy{
- RollingUpdate: &v1beta1.RollingUpdateDeployment{
+ Spec: &v1.DeploymentSpec{
+ Strategy: &v1.DeploymentStrategy{
+ RollingUpdate: &v1.RollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{
IntVal: toInt32Ptr(30),
},
@@ -68,6 +73,12 @@ func TestDeployment(t *testing.T) {
},
},
Replicas: toInt32Ptr(4),
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
},
Metadata: &metav1.ObjectMeta{
Generation: toInt64Ptr(11221),
@@ -95,10 +106,13 @@ func TestDeployment(t *testing.T) {
for _, v := range tests {
ks := &KubernetesInventory{
- client: cli,
+ client: cli,
+ SelectorInclude: selectInclude,
+ SelectorExclude: selectExclude,
}
+ ks.createSelectorFilters()
acc := new(testutil.Accumulator)
- for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1beta1.DeploymentList)).Items {
+ for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
err := ks.gatherDeployment(*deployment, acc)
if err != nil {
t.Errorf("Failed to gather deployment - %s", err.Error())
@@ -129,3 +143,179 @@ func TestDeployment(t *testing.T) {
}
}
}
+
+func TestDeploymentSelectorFilter(t *testing.T) {
+ cli := &client{}
+ now := time.Now()
+ now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
+
+ responseMap := map[string]interface{}{
+ "/deployments/": &v1.DeploymentList{
+ Items: []*v1.Deployment{
+ {
+ Status: &v1.DeploymentStatus{
+ Replicas: toInt32Ptr(3),
+ AvailableReplicas: toInt32Ptr(1),
+ UnavailableReplicas: toInt32Ptr(4),
+ UpdatedReplicas: toInt32Ptr(2),
+ ObservedGeneration: toInt64Ptr(9121),
+ },
+ Spec: &v1.DeploymentSpec{
+ Strategy: &v1.DeploymentStrategy{
+ RollingUpdate: &v1.RollingUpdateDeployment{
+ MaxUnavailable: &intstr.IntOrString{
+ IntVal: toInt32Ptr(30),
+ },
+ MaxSurge: &intstr.IntOrString{
+ IntVal: toInt32Ptr(20),
+ },
+ },
+ },
+ Replicas: toInt32Ptr(4),
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ Generation: toInt64Ptr(11221),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("deploy1"),
+ Labels: map[string]string{
+ "lab1": "v1",
+ "lab2": "v2",
+ },
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ },
+ },
+ },
+ }
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ hasError bool
+ include []string
+ exclude []string
+ expected map[string]string
+ }{
+ {
+ name: "nil filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: nil,
+ exclude: nil,
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "empty filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "include filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"select1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude filter equals only non-excluded selectors (overrides include filter)",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"select2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "include glob filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"*1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ }
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ ks.SelectorInclude = v.include
+ ks.SelectorExclude = v.exclude
+ ks.createSelectorFilters()
+ acc := new(testutil.Accumulator)
+ for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
+ err := ks.gatherDeployment(*deployment, acc)
+ if err != nil {
+ t.Errorf("Failed to gather deployment - %s", err.Error())
+ }
+ }
+
+ // Grab selector tags
+ actual := map[string]string{}
+ for _, metric := range acc.Metrics {
+ for key, val := range metric.Tags {
+ if strings.Contains(key, "selector_") {
+ actual[key] = val
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v.expected, actual) {
+ t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
+ }
+ }
+}
diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go
new file mode 100644
index 0000000000000..7298789da8e08
--- /dev/null
+++ b/plugins/inputs/kube_inventory/endpoint.go
@@ -0,0 +1,82 @@
+package kube_inventory
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "github.com/ericchiang/k8s/apis/core/v1"
+
+ "github.com/influxdata/telegraf"
+)
+
+func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
+ list, err := ki.client.getEndpoints(ctx)
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
+ for _, i := range list.Items {
+ if err = ki.gatherEndpoint(*i, acc); err != nil {
+ acc.AddError(err)
+ return
+ }
+ }
+}
+
+func (ki *KubernetesInventory) gatherEndpoint(e v1.Endpoints, acc telegraf.Accumulator) error {
+ if e.Metadata.CreationTimestamp.GetSeconds() == 0 && e.Metadata.CreationTimestamp.GetNanos() == 0 {
+ return nil
+ }
+
+ fields := map[string]interface{}{
+ "created": time.Unix(e.Metadata.CreationTimestamp.GetSeconds(), int64(e.Metadata.CreationTimestamp.GetNanos())).UnixNano(),
+ "generation": e.Metadata.GetGeneration(),
+ }
+
+ tags := map[string]string{
+ "endpoint_name": e.Metadata.GetName(),
+ "namespace": e.Metadata.GetNamespace(),
+ }
+
+ for _, endpoint := range e.GetSubsets() {
+ for _, readyAddr := range endpoint.GetAddresses() {
+ fields["ready"] = true
+
+ tags["hostname"] = readyAddr.GetHostname()
+ tags["node_name"] = readyAddr.GetNodeName()
+ if readyAddr.TargetRef != nil {
+ tags[strings.ToLower(readyAddr.GetTargetRef().GetKind())] = readyAddr.GetTargetRef().GetName()
+ }
+
+ for _, port := range endpoint.GetPorts() {
+ fields["port"] = port.GetPort()
+
+ tags["port_name"] = port.GetName()
+ tags["port_protocol"] = port.GetProtocol()
+
+ acc.AddFields(endpointMeasurement, fields, tags)
+ }
+ }
+ for _, notReadyAddr := range endpoint.GetNotReadyAddresses() {
+ fields["ready"] = false
+
+ tags["hostname"] = notReadyAddr.GetHostname()
+ tags["node_name"] = notReadyAddr.GetNodeName()
+ if notReadyAddr.TargetRef != nil {
+ tags[strings.ToLower(notReadyAddr.GetTargetRef().GetKind())] = notReadyAddr.GetTargetRef().GetName()
+ }
+
+ for _, port := range endpoint.GetPorts() {
+ fields["port"] = port.GetPort()
+
+ tags["port_name"] = port.GetName()
+ tags["port_protocol"] = port.GetProtocol()
+
+ acc.AddFields(endpointMeasurement, fields, tags)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go
new file mode 100644
index 0000000000000..b88c388162bd2
--- /dev/null
+++ b/plugins/inputs/kube_inventory/endpoint_test.go
@@ -0,0 +1,194 @@
+package kube_inventory
+
+import (
+ "testing"
+ "time"
+
+ "github.com/ericchiang/k8s/apis/core/v1"
+ metav1 "github.com/ericchiang/k8s/apis/meta/v1"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func TestEndpoint(t *testing.T) {
+ cli := &client{}
+
+ now := time.Now()
+ now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ output *testutil.Accumulator
+ hasError bool
+ }{
+ {
+ name: "no endpoints",
+ handler: &mockHandler{
+ responseMap: map[string]interface{}{
+ "/endpoints/": &v1.EndpointsList{},
+ },
+ },
+ hasError: false,
+ },
+ {
+ name: "collect ready endpoints",
+ handler: &mockHandler{
+ responseMap: map[string]interface{}{
+ "/endpoints/": &v1.EndpointsList{
+ Items: []*v1.Endpoints{
+ {
+ Subsets: []*v1.EndpointSubset{
+ {
+ Addresses: []*v1.EndpointAddress{
+ {
+ Hostname: toStrPtr("storage-6"),
+ NodeName: toStrPtr("b.storage.internal"),
+ TargetRef: &v1.ObjectReference{
+ Kind: toStrPtr("pod"),
+ Name: toStrPtr("storage-6"),
+ },
+ },
+ },
+ Ports: []*v1.EndpointPort{
+ {
+ Name: toStrPtr("server"),
+ Protocol: toStrPtr("TCP"),
+ Port: toInt32Ptr(8080),
+ },
+ },
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ Generation: toInt64Ptr(12),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("storage"),
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ },
+ },
+ },
+ },
+ },
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{
+ {
+ Fields: map[string]interface{}{
+ "ready": true,
+ "port": int32(8080),
+ "generation": int64(12),
+ "created": now.UnixNano(),
+ },
+ Tags: map[string]string{
+ "endpoint_name": "storage",
+ "namespace": "ns1",
+ "hostname": "storage-6",
+ "node_name": "b.storage.internal",
+ "port_name": "server",
+ "port_protocol": "TCP",
+ "pod": "storage-6",
+ },
+ },
+ },
+ },
+ hasError: false,
+ },
+ {
+ name: "collect notready endpoints",
+ handler: &mockHandler{
+ responseMap: map[string]interface{}{
+ "/endpoints/": &v1.EndpointsList{
+ Items: []*v1.Endpoints{
+ {
+ Subsets: []*v1.EndpointSubset{
+ {
+ NotReadyAddresses: []*v1.EndpointAddress{
+ {
+ Hostname: toStrPtr("storage-6"),
+ NodeName: toStrPtr("b.storage.internal"),
+ TargetRef: &v1.ObjectReference{
+ Kind: toStrPtr("pod"),
+ Name: toStrPtr("storage-6"),
+ },
+ },
+ },
+ Ports: []*v1.EndpointPort{
+ {
+ Name: toStrPtr("server"),
+ Protocol: toStrPtr("TCP"),
+ Port: toInt32Ptr(8080),
+ },
+ },
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ Generation: toInt64Ptr(12),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("storage"),
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ },
+ },
+ },
+ },
+ },
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{
+ {
+ Fields: map[string]interface{}{
+ "ready": false,
+ "port": int32(8080),
+ "generation": int64(12),
+ "created": now.UnixNano(),
+ },
+ Tags: map[string]string{
+ "endpoint_name": "storage",
+ "namespace": "ns1",
+ "hostname": "storage-6",
+ "node_name": "b.storage.internal",
+ "port_name": "server",
+ "port_protocol": "TCP",
+ "pod": "storage-6",
+ },
+ },
+ },
+ },
+ hasError: false,
+ },
+ }
+
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ acc := new(testutil.Accumulator)
+ for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items {
+ err := ks.gatherEndpoint(*endpoint, acc)
+ if err != nil {
+ t.Errorf("Failed to gather endpoint - %s", err.Error())
+ }
+ }
+
+ err := acc.FirstError()
+ if err == nil && v.hasError {
+ t.Fatalf("%s failed, should have error", v.name)
+ } else if err != nil && !v.hasError {
+ t.Fatalf("%s failed, err: %v", v.name, err)
+ }
+ if v.output == nil && len(acc.Metrics) > 0 {
+ t.Fatalf("%s: collected extra data", v.name)
+ } else if v.output != nil && len(v.output.Metrics) > 0 {
+ for i := range v.output.Metrics {
+ for k, m := range v.output.Metrics[i].Tags {
+ if acc.Metrics[i].Tags[k] != m {
+ t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k])
+ }
+ }
+ for k, m := range v.output.Metrics[i].Fields {
+ if acc.Metrics[i].Fields[k] != m {
+ t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k])
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go
new file mode 100644
index 0000000000000..6d5c8019927cf
--- /dev/null
+++ b/plugins/inputs/kube_inventory/ingress.go
@@ -0,0 +1,60 @@
+package kube_inventory
+
+import (
+ "context"
+ "time"
+
+ v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1"
+
+ "github.com/influxdata/telegraf"
+)
+
+func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
+ list, err := ki.client.getIngress(ctx)
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
+ for _, i := range list.Items {
+ if err = ki.gatherIngress(*i, acc); err != nil {
+ acc.AddError(err)
+ return
+ }
+ }
+}
+
+func (ki *KubernetesInventory) gatherIngress(i v1beta1EXT.Ingress, acc telegraf.Accumulator) error {
+ if i.Metadata.CreationTimestamp.GetSeconds() == 0 && i.Metadata.CreationTimestamp.GetNanos() == 0 {
+ return nil
+ }
+
+ fields := map[string]interface{}{
+ "created": time.Unix(i.Metadata.CreationTimestamp.GetSeconds(), int64(i.Metadata.CreationTimestamp.GetNanos())).UnixNano(),
+ "generation": i.Metadata.GetGeneration(),
+ }
+
+ tags := map[string]string{
+ "ingress_name": i.Metadata.GetName(),
+ "namespace": i.Metadata.GetNamespace(),
+ }
+
+ for _, ingress := range i.GetStatus().GetLoadBalancer().GetIngress() {
+ tags["hostname"] = ingress.GetHostname()
+ tags["ip"] = ingress.GetIp()
+
+ for _, rule := range i.GetSpec().GetRules() {
+ for _, path := range rule.GetIngressRuleValue().GetHttp().GetPaths() {
+ fields["backend_service_port"] = path.GetBackend().GetServicePort().GetIntVal()
+ fields["tls"] = i.GetSpec().GetTls() != nil
+
+ tags["backend_service_name"] = path.GetBackend().GetServiceName()
+ tags["path"] = path.GetPath()
+ tags["host"] = rule.GetHost()
+
+ acc.AddFields(ingressMeasurement, fields, tags)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go
new file mode 100644
index 0000000000000..2d111801a96f3
--- /dev/null
+++ b/plugins/inputs/kube_inventory/ingress_test.go
@@ -0,0 +1,142 @@
+package kube_inventory
+
+import (
+ "testing"
+ "time"
+
+ v1 "github.com/ericchiang/k8s/apis/core/v1"
+ v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1"
+ metav1 "github.com/ericchiang/k8s/apis/meta/v1"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func TestIngress(t *testing.T) {
+ cli := &client{}
+
+ now := time.Now()
+ now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ output *testutil.Accumulator
+ hasError bool
+ }{
+ {
+ name: "no ingress",
+ handler: &mockHandler{
+ responseMap: map[string]interface{}{
+ "/ingress/": &v1beta1EXT.IngressList{},
+ },
+ },
+ hasError: false,
+ },
+ {
+ name: "collect ingress",
+ handler: &mockHandler{
+ responseMap: map[string]interface{}{
+ "/ingress/": &v1beta1EXT.IngressList{
+ Items: []*v1beta1EXT.Ingress{
+ {
+ Status: &v1beta1EXT.IngressStatus{
+ LoadBalancer: &v1.LoadBalancerStatus{
+ Ingress: []*v1.LoadBalancerIngress{
+ {
+ Hostname: toStrPtr("chron-1"),
+ Ip: toStrPtr("1.0.0.127"),
+ },
+ },
+ },
+ },
+ Spec: &v1beta1EXT.IngressSpec{
+ Rules: []*v1beta1EXT.IngressRule{
+ {
+ Host: toStrPtr("ui.internal"),
+ IngressRuleValue: &v1beta1EXT.IngressRuleValue{
+ Http: &v1beta1EXT.HTTPIngressRuleValue{
+ Paths: []*v1beta1EXT.HTTPIngressPath{
+ {
+ Path: toStrPtr("/"),
+ Backend: &v1beta1EXT.IngressBackend{
+ ServiceName: toStrPtr("chronografd"),
+ ServicePort: toIntStrPtrI(8080),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ Generation: toInt64Ptr(12),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("ui-lb"),
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ },
+ },
+ },
+ },
+ },
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{
+ {
+ Fields: map[string]interface{}{
+ "tls": false,
+ "backend_service_port": int32(8080),
+ "generation": int64(12),
+ "created": now.UnixNano(),
+ },
+ Tags: map[string]string{
+ "ingress_name": "ui-lb",
+ "namespace": "ns1",
+ "ip": "1.0.0.127",
+ "hostname": "chron-1",
+ "backend_service_name": "chronografd",
+ "host": "ui.internal",
+ "path": "/",
+ },
+ },
+ },
+ },
+ hasError: false,
+ },
+ }
+
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ acc := new(testutil.Accumulator)
+ for _, ingress := range ((v.handler.responseMap["/ingress/"]).(*v1beta1EXT.IngressList)).Items {
+ err := ks.gatherIngress(*ingress, acc)
+ if err != nil {
+ t.Errorf("Failed to gather ingress - %s", err.Error())
+ }
+ }
+
+ err := acc.FirstError()
+ if err == nil && v.hasError {
+ t.Fatalf("%s failed, should have error", v.name)
+ } else if err != nil && !v.hasError {
+ t.Fatalf("%s failed, err: %v", v.name, err)
+ }
+ if v.output == nil && len(acc.Metrics) > 0 {
+ t.Fatalf("%s: collected extra data", v.name)
+ } else if v.output != nil && len(v.output.Metrics) > 0 {
+ for i := range v.output.Metrics {
+ for k, m := range v.output.Metrics[i].Tags {
+ if acc.Metrics[i].Tags[k] != m {
+ t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k])
+ }
+ }
+ for k, m := range v.output.Metrics[i].Fields {
+ if acc.Metrics[i].Fields[k] != m {
+ t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k])
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go
index 57d31908d2bf1..0a2a882974e67 100644
--- a/plugins/inputs/kube_inventory/kube_state.go
+++ b/plugins/inputs/kube_inventory/kube_state.go
@@ -15,10 +15,14 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
+const (
+ defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token"
+)
+
// KubernetesInventory represents the config object for the plugin.
type KubernetesInventory struct {
URL string `toml:"url"`
@@ -30,8 +34,13 @@ type KubernetesInventory struct {
ResourceInclude []string `toml:"resource_include"`
MaxConfigMapAge internal.Duration `toml:"max_config_map_age"`
+ SelectorInclude []string `toml:"selector_include"`
+ SelectorExclude []string `toml:"selector_exclude"`
+
tls.ClientConfig
client *client
+
+ selectorFilter filter.Filter
}
var sampleConfig = `
@@ -42,6 +51,8 @@ var sampleConfig = `
# namespace = "default"
## Use bearer token for authorization. ('bearer_token' takes priority)
+ ## If both of these are empty, we'll use the default serviceaccount:
+ ## at: /run/secrets/kubernetes.io/serviceaccount/token
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
@@ -51,14 +62,20 @@ var sampleConfig = `
## Optional Resources to exclude from gathering
## Leave them with blank with try to gather everything available.
- ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes",
- ## "persistentvolumeclaims", "pods", "statefulsets"
+ ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
+ ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
# resource_exclude = [ "deployments", "nodes", "statefulsets" ]
## Optional Resources to include when gathering
## Overrides resource_exclude if both set.
# resource_include = [ "deployments", "nodes", "statefulsets" ]
+ ## selectors to include and exclude as tags. Globs accepted.
+ ## Note that an empty array for both will include all selectors as tags
+ ## selector_exclude overrides selector_include if both set.
+ # selector_include = []
+ # selector_exclude = ["*"]
+
## Optional TLS Config
# tls_ca = "/path/to/cafile"
# tls_cert = "/path/to/certfile"
@@ -77,19 +94,42 @@ func (ki *KubernetesInventory) Description() string {
return "Read metrics from the Kubernetes api"
}
-// Gather collects kubernetes metrics from a given URL.
-func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) {
- if ki.client == nil {
- if ki.client, err = ki.initClient(); err != nil {
+func (ki *KubernetesInventory) Init() error {
+ // If neither are provided, use the default service account.
+ if ki.BearerToken == "" && ki.BearerTokenString == "" {
+ ki.BearerToken = defaultServiceAccountPath
+ }
+
+ if ki.BearerToken != "" {
+ token, err := ioutil.ReadFile(ki.BearerToken)
+ if err != nil {
return err
}
+ ki.BearerTokenString = strings.TrimSpace(string(token))
+ }
+
+ var err error
+ ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig)
+
+ if err != nil {
+ return err
}
+ return nil
+}
+
+// Gather collects kubernetes metrics from a given URL.
+func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) {
resourceFilter, err := filter.NewIncludeExcludeFilter(ki.ResourceInclude, ki.ResourceExclude)
if err != nil {
return err
}
+ ki.selectorFilter, err = filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude)
+ if err != nil {
+ return err
+ }
+
wg := sync.WaitGroup{}
ctx := context.Background()
@@ -111,23 +151,14 @@ func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) {
var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory){
"daemonsets": collectDaemonSets,
"deployments": collectDeployments,
+ "endpoints": collectEndpoints,
+ "ingress": collectIngress,
"nodes": collectNodes,
- "persistentvolumes": collectPersistentVolumes,
- "persistentvolumeclaims": collectPersistentVolumeClaims,
"pods": collectPods,
+ "services": collectServices,
"statefulsets": collectStatefulSets,
-}
-
-func (ki *KubernetesInventory) initClient() (*client, error) {
- if ki.BearerToken != "" {
- token, err := ioutil.ReadFile(ki.BearerToken)
- if err != nil {
- return nil, err
- }
- ki.BearerTokenString = strings.TrimSpace(string(token))
- }
-
- return newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig)
+ "persistentvolumes": collectPersistentVolumes,
+ "persistentvolumeclaims": collectPersistentVolumeClaims,
}
func atoi(s string) int64 {
@@ -141,12 +172,12 @@ func atoi(s string) int64 {
func convertQuantity(s string, m float64) int64 {
q, err := resource.ParseQuantity(s)
if err != nil {
- log.Printf("E! Failed to parse quantity - %v", err)
+ log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error())
return 0
}
f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64)
if err != nil {
- log.Printf("E! Failed to parse float - %v", err)
+ log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error())
return 0
}
if m < 1 {
@@ -155,13 +186,25 @@ func convertQuantity(s string, m float64) int64 {
return int64(f * m)
}
+func (ki *KubernetesInventory) createSelectorFilters() error {
+ filter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude)
+ if err != nil {
+ return err
+ }
+ ki.selectorFilter = filter
+ return nil
+}
+
var (
daemonSetMeasurement = "kubernetes_daemonset"
deploymentMeasurement = "kubernetes_deployment"
+ endpointMeasurement = "kubernetes_endpoint"
+ ingressMeasurement = "kubernetes_ingress"
nodeMeasurement = "kubernetes_node"
persistentVolumeMeasurement = "kubernetes_persistentvolume"
persistentVolumeClaimMeasurement = "kubernetes_persistentvolumeclaim"
podContainerMeasurement = "kubernetes_pod_container"
+ serviceMeasurement = "kubernetes_service"
statefulSetMeasurement = "kubernetes_statefulset"
)
@@ -170,6 +213,8 @@ func init() {
return &KubernetesInventory{
ResponseTimeout: internal.Duration{Duration: time.Second * 5},
Namespace: "default",
+ SelectorInclude: []string{},
+ SelectorExclude: []string{"*"},
}
})
}
diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go
index 0663462ae992d..ac8c9f85a931c 100644
--- a/plugins/inputs/kube_inventory/persistentvolumeclaim.go
+++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go
@@ -42,6 +42,11 @@ func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolu
"phase": pvc.Status.GetPhase(),
"storageclass": pvc.Spec.GetStorageClassName(),
}
+ for key, val := range pvc.GetSpec().GetSelector().GetMatchLabels() {
+ if ki.selectorFilter.Match(key) {
+ tags["selector_"+key] = val
+ }
+ }
acc.AddFields(persistentVolumeClaimMeasurement, fields, tags)
diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go
index 8a50c0f2eb914..5155a5d3ba698 100644
--- a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go
+++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go
@@ -1,6 +1,8 @@
package kube_inventory
import (
+ "reflect"
+ "strings"
"testing"
"time"
@@ -12,6 +14,8 @@ import (
func TestPersistentVolumeClaim(t *testing.T) {
cli := &client{}
+ selectInclude := []string{}
+ selectExclude := []string{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
@@ -43,6 +47,12 @@ func TestPersistentVolumeClaim(t *testing.T) {
Spec: &v1.PersistentVolumeClaimSpec{
VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"),
StorageClassName: toStrPtr("ebs-1"),
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
},
Metadata: &metav1.ObjectMeta{
Namespace: toStrPtr("ns1"),
@@ -65,10 +75,12 @@ func TestPersistentVolumeClaim(t *testing.T) {
"phase_type": 0,
},
Tags: map[string]string{
- "pvc_name": "pc1",
- "namespace": "ns1",
- "storageclass": "ebs-1",
- "phase": "bound",
+ "pvc_name": "pc1",
+ "namespace": "ns1",
+ "storageclass": "ebs-1",
+ "phase": "bound",
+ "selector_select1": "s1",
+ "selector_select2": "s2",
},
},
},
@@ -79,8 +91,11 @@ func TestPersistentVolumeClaim(t *testing.T) {
for _, v := range tests {
ks := &KubernetesInventory{
- client: cli,
+ client: cli,
+ SelectorInclude: selectInclude,
+ SelectorExclude: selectExclude,
}
+ ks.createSelectorFilters()
acc := new(testutil.Accumulator)
for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items {
err := ks.gatherPersistentVolumeClaim(*pvc, acc)
@@ -113,3 +128,165 @@ func TestPersistentVolumeClaim(t *testing.T) {
}
}
}
+
+func TestPersistentVolumeClaimSelectorFilter(t *testing.T) {
+ cli := &client{}
+ now := time.Now()
+ now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
+
+ responseMap := map[string]interface{}{
+ "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{
+ Items: []*v1.PersistentVolumeClaim{
+ {
+ Status: &v1.PersistentVolumeClaimStatus{
+ Phase: toStrPtr("bound"),
+ },
+ Spec: &v1.PersistentVolumeClaimSpec{
+ VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"),
+ StorageClassName: toStrPtr("ebs-1"),
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("pc1"),
+ Labels: map[string]string{
+ "lab1": "v1",
+ "lab2": "v2",
+ },
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ },
+ },
+ },
+ }
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ hasError bool
+ include []string
+ exclude []string
+ expected map[string]string
+ }{
+ {
+ name: "nil filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: nil,
+ exclude: nil,
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "empty filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "include filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"select1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude filter equals only non-excluded selectors (overrides include filter)",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"select2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "include glob filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"*1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ }
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ ks.SelectorInclude = v.include
+ ks.SelectorExclude = v.exclude
+ ks.createSelectorFilters()
+ acc := new(testutil.Accumulator)
+ for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items {
+ err := ks.gatherPersistentVolumeClaim(*pvc, acc)
+ if err != nil {
+ t.Errorf("Failed to gather pvc - %s", err.Error())
+ }
+ }
+
+ // Grab selector tags
+ actual := map[string]string{}
+ for _, metric := range acc.Metrics {
+ for key, val := range metric.Tags {
+ if strings.Contains(key, "selector_") {
+ actual[key] = val
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v.expected, actual) {
+ t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
+ }
+ }
+}
diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go
index 7b5207616d412..2f17f690d08c5 100644
--- a/plugins/inputs/kube_inventory/pod.go
+++ b/plugins/inputs/kube_inventory/pod.go
@@ -3,7 +3,7 @@ package kube_inventory
import (
"context"
- "github.com/ericchiang/k8s/apis/core/v1"
+ v1 "github.com/ericchiang/k8s/apis/core/v1"
"github.com/influxdata/telegraf"
)
@@ -29,15 +29,17 @@ func (ki *KubernetesInventory) gatherPod(p v1.Pod, acc telegraf.Accumulator) err
for i, cs := range p.Status.ContainerStatuses {
c := p.Spec.Containers[i]
- gatherPodContainer(*p.Spec.NodeName, p, *cs, *c, acc)
+ gatherPodContainer(*p.Spec.NodeName, ki, p, *cs, *c, acc)
}
return nil
}
-func gatherPodContainer(nodeName string, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) {
+func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) {
stateCode := 3
+ stateReason := ""
state := "unknown"
+
switch {
case cs.State.Running != nil:
stateCode = 0
@@ -45,9 +47,16 @@ func gatherPodContainer(nodeName string, p v1.Pod, cs v1.ContainerStatus, c v1.C
case cs.State.Terminated != nil:
stateCode = 1
state = "terminated"
+ stateReason = cs.State.Terminated.GetReason()
case cs.State.Waiting != nil:
stateCode = 2
state = "waiting"
+ stateReason = cs.State.Waiting.GetReason()
+ }
+
+ readiness := "unready"
+ if cs.GetReady() {
+ readiness = "ready"
}
fields := map[string]interface{}{
@@ -55,12 +64,23 @@ func gatherPodContainer(nodeName string, p v1.Pod, cs v1.ContainerStatus, c v1.C
"state_code": stateCode,
"terminated_reason": cs.State.Terminated.GetReason(),
}
+
+ if stateReason != "" {
+ fields["state_reason"] = stateReason
+ }
+
tags := map[string]string{
"container_name": *c.Name,
"namespace": *p.Metadata.Namespace,
"node_name": *p.Spec.NodeName,
"pod_name": *p.Metadata.Name,
"state": state,
+ "readiness": readiness,
+ }
+ for key, val := range p.GetSpec().GetNodeSelector() {
+ if ki.selectorFilter.Match(key) {
+ tags["node_selector_"+key] = val
+ }
}
req := c.Resources.Requests
diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go
index 50b093880dbf1..d9b3221655027 100644
--- a/plugins/inputs/kube_inventory/pod_test.go
+++ b/plugins/inputs/kube_inventory/pod_test.go
@@ -1,10 +1,12 @@
package kube_inventory
import (
+ "reflect"
+ "strings"
"testing"
"time"
- "github.com/ericchiang/k8s/apis/core/v1"
+ v1 "github.com/ericchiang/k8s/apis/core/v1"
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
"github.com/ericchiang/k8s/apis/resource"
"github.com/influxdata/telegraf/testutil"
@@ -12,6 +14,8 @@ import (
func TestPod(t *testing.T) {
cli := &client{}
+ selectInclude := []string{}
+ selectExclude := []string{}
now := time.Now()
started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location())
created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location())
@@ -44,7 +48,43 @@ func TestPod(t *testing.T) {
NodeName: toStrPtr("node1"),
Containers: []*v1.Container{
{
- Name: toStrPtr("forwarder"),
+ Name: toStrPtr("running"),
+ Image: toStrPtr("image1"),
+ Ports: []*v1.ContainerPort{
+ {
+ ContainerPort: toInt32Ptr(8080),
+ Protocol: toStrPtr("TCP"),
+ },
+ },
+ Resources: &v1.ResourceRequirements{
+ Limits: map[string]*resource.Quantity{
+ "cpu": {String_: toStrPtr("100m")},
+ },
+ Requests: map[string]*resource.Quantity{
+ "cpu": {String_: toStrPtr("100m")},
+ },
+ },
+ },
+ {
+ Name: toStrPtr("completed"),
+ Image: toStrPtr("image1"),
+ Ports: []*v1.ContainerPort{
+ {
+ ContainerPort: toInt32Ptr(8080),
+ Protocol: toStrPtr("TCP"),
+ },
+ },
+ Resources: &v1.ResourceRequirements{
+ Limits: map[string]*resource.Quantity{
+ "cpu": {String_: toStrPtr("100m")},
+ },
+ Requests: map[string]*resource.Quantity{
+ "cpu": {String_: toStrPtr("100m")},
+ },
+ },
+ },
+ {
+ Name: toStrPtr("waiting"),
Image: toStrPtr("image1"),
Ports: []*v1.ContainerPort{
{
@@ -76,6 +116,10 @@ func TestPod(t *testing.T) {
Name: toStrPtr("vol2"),
},
},
+ NodeSelector: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
},
Status: &v1.PodStatus{
Phase: toStrPtr("Running"),
@@ -101,7 +145,7 @@ func TestPod(t *testing.T) {
},
ContainerStatuses: []*v1.ContainerStatus{
{
- Name: toStrPtr("forwarder"),
+ Name: toStrPtr("running"),
State: &v1.ContainerState{
Running: &v1.ContainerStateRunning{
StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())},
@@ -113,6 +157,34 @@ func TestPod(t *testing.T) {
ImageID: toStrPtr("image_id1"),
ContainerID: toStrPtr("docker://54abe32d0094479d3d"),
},
+ {
+ Name: toStrPtr("completed"),
+ State: &v1.ContainerState{
+ Terminated: &v1.ContainerStateTerminated{
+ StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())},
+ ExitCode: toInt32Ptr(0),
+ Reason: toStrPtr("Completed"),
+ },
+ },
+ Ready: toBoolPtr(false),
+ RestartCount: toInt32Ptr(3),
+ Image: toStrPtr("image1"),
+ ImageID: toStrPtr("image_id1"),
+ ContainerID: toStrPtr("docker://54abe32d0094479d3d"),
+ },
+ {
+ Name: toStrPtr("waiting"),
+ State: &v1.ContainerState{
+ Waiting: &v1.ContainerStateWaiting{
+ Reason: toStrPtr("PodUninitialized"),
+ },
+ },
+ Ready: toBoolPtr(false),
+ RestartCount: toInt32Ptr(3),
+ Image: toStrPtr("image1"),
+ ImageID: toStrPtr("image_id1"),
+ ContainerID: toStrPtr("docker://54abe32d0094479d3d"),
+ },
},
},
Metadata: &metav1.ObjectMeta{
@@ -148,12 +220,51 @@ func TestPod(t *testing.T) {
"resource_requests_millicpu_units": int64(100),
"resource_limits_millicpu_units": int64(100),
},
+ Tags: map[string]string{
+ "namespace": "ns1",
+ "container_name": "running",
+ "node_name": "node1",
+ "pod_name": "pod1",
+ "state": "running",
+ "readiness": "ready",
+ "node_selector_select1": "s1",
+ "node_selector_select2": "s2",
+ },
+ },
+ {
+ Measurement: podContainerMeasurement,
+ Fields: map[string]interface{}{
+ "restarts_total": int32(3),
+ "state_code": 1,
+ "state_reason": "Completed",
+ "resource_requests_millicpu_units": int64(100),
+ "resource_limits_millicpu_units": int64(100),
+ },
+ Tags: map[string]string{
+ "namespace": "ns1",
+ "container_name": "completed",
+ "node_name": "node1",
+ "pod_name": "pod1",
+ "state": "terminated",
+ "readiness": "unready",
+ },
+ },
+ {
+ Measurement: podContainerMeasurement,
+ Fields: map[string]interface{}{
+ "restarts_total": int32(3),
+ "state_code": 2,
+ "state_reason": "PodUninitialized",
+ "resource_requests_millicpu_units": int64(100),
+ "resource_limits_millicpu_units": int64(100),
+ },
Tags: map[string]string{
"namespace": "ns1",
- "container_name": "forwarder",
+ "container_name": "waiting",
"node_name": "node1",
"pod_name": "pod1",
- "state": "running",
+ "state": "waiting",
+ "readiness": "unready",
},
},
},
@@ -163,8 +274,11 @@ func TestPod(t *testing.T) {
}
for _, v := range tests {
ks := &KubernetesInventory{
- client: cli,
+ client: cli,
+ SelectorInclude: selectInclude,
+ SelectorExclude: selectExclude,
}
+ ks.createSelectorFilters()
acc := new(testutil.Accumulator)
for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items {
err := ks.gatherPod(*pod, acc)
@@ -197,3 +311,243 @@ func TestPod(t *testing.T) {
}
}
}
+
+func TestPodSelectorFilter(t *testing.T) {
+ cli := &client{}
+ now := time.Now()
+ started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location())
+ created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location())
+ cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location())
+ cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location())
+
+ responseMap := map[string]interface{}{
+ "/pods/": &v1.PodList{
+ Items: []*v1.Pod{
+ {
+ Spec: &v1.PodSpec{
+ NodeName: toStrPtr("node1"),
+ Containers: []*v1.Container{
+ {
+ Name: toStrPtr("forwarder"),
+ Image: toStrPtr("image1"),
+ Ports: []*v1.ContainerPort{
+ {
+ ContainerPort: toInt32Ptr(8080),
+ Protocol: toStrPtr("TCP"),
+ },
+ },
+ Resources: &v1.ResourceRequirements{
+ Limits: map[string]*resource.Quantity{
+ "cpu": {String_: toStrPtr("100m")},
+ },
+ Requests: map[string]*resource.Quantity{
+ "cpu": {String_: toStrPtr("100m")},
+ },
+ },
+ },
+ },
+ Volumes: []*v1.Volume{
+ {
+ Name: toStrPtr("vol1"),
+ VolumeSource: &v1.VolumeSource{
+ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
+ ClaimName: toStrPtr("pc1"),
+ ReadOnly: toBoolPtr(true),
+ },
+ },
+ },
+ {
+ Name: toStrPtr("vol2"),
+ },
+ },
+ NodeSelector: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
+ Status: &v1.PodStatus{
+ Phase: toStrPtr("Running"),
+ HostIP: toStrPtr("180.12.10.18"),
+ PodIP: toStrPtr("10.244.2.15"),
+ StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())},
+ Conditions: []*v1.PodCondition{
+ {
+ Type: toStrPtr("Initialized"),
+ Status: toStrPtr("True"),
+ LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())},
+ },
+ {
+ Type: toStrPtr("Ready"),
+ Status: toStrPtr("True"),
+ LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())},
+ },
+ {
+ Type: toStrPtr("Scheduled"),
+ Status: toStrPtr("True"),
+ LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())},
+ },
+ },
+ ContainerStatuses: []*v1.ContainerStatus{
+ {
+ Name: toStrPtr("forwarder"),
+ State: &v1.ContainerState{
+ Running: &v1.ContainerStateRunning{
+ StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())},
+ },
+ },
+ Ready: toBoolPtr(true),
+ RestartCount: toInt32Ptr(3),
+ Image: toStrPtr("image1"),
+ ImageID: toStrPtr("image_id1"),
+ ContainerID: toStrPtr("docker://54abe32d0094479d3d"),
+ },
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ OwnerReferences: []*metav1.OwnerReference{
+ {
+ ApiVersion: toStrPtr("apps/v1"),
+ Kind: toStrPtr("DaemonSet"),
+ Name: toStrPtr("forwarder"),
+ Controller: toBoolPtr(true),
+ },
+ },
+ Generation: toInt64Ptr(11232),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("pod1"),
+ Labels: map[string]string{
+ "lab1": "v1",
+ "lab2": "v2",
+ },
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())},
+ },
+ },
+ },
+ },
+ }
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ hasError bool
+ include []string
+ exclude []string
+ expected map[string]string
+ }{
+ {
+ name: "nil filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: nil,
+ exclude: nil,
+ expected: map[string]string{
+ "node_selector_select1": "s1",
+ "node_selector_select2": "s2",
+ },
+ },
+ {
+ name: "empty filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{},
+ expected: map[string]string{
+ "node_selector_select1": "s1",
+ "node_selector_select2": "s2",
+ },
+ },
+ {
+ name: "include filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"select1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "node_selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude filter equals only non-excluded selectors (overrides include filter)",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"select2"},
+ expected: map[string]string{
+ "node_selector_select1": "s1",
+ },
+ },
+ {
+ name: "include glob filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"*1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "node_selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "node_selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "node_selector_select1": "s1",
+ },
+ },
+ }
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ ks.SelectorInclude = v.include
+ ks.SelectorExclude = v.exclude
+ ks.createSelectorFilters()
+ acc := new(testutil.Accumulator)
+ for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items {
+ err := ks.gatherPod(*pod, acc)
+ if err != nil {
+ t.Errorf("Failed to gather pod - %s", err.Error())
+ }
+ }
+
+ // Grab selector tags
+ actual := map[string]string{}
+ for _, metric := range acc.Metrics {
+ for key, val := range metric.Tags {
+ if strings.Contains(key, "node_selector_") {
+ actual[key] = val
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v.expected, actual) {
+ t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
+ }
+ }
+}
diff --git a/plugins/inputs/kube_inventory/service.go b/plugins/inputs/kube_inventory/service.go
new file mode 100644
index 0000000000000..0c749ea8ac3fc
--- /dev/null
+++ b/plugins/inputs/kube_inventory/service.go
@@ -0,0 +1,76 @@
+package kube_inventory
+
+import (
+ "context"
+ "time"
+
+ "github.com/ericchiang/k8s/apis/core/v1"
+
+ "github.com/influxdata/telegraf"
+)
+
+func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
+ list, err := ki.client.getServices(ctx)
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
+ for _, i := range list.Items {
+ if err = ki.gatherService(*i, acc); err != nil {
+ acc.AddError(err)
+ return
+ }
+ }
+}
+
+func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumulator) error {
+ if s.Metadata.CreationTimestamp.GetSeconds() == 0 && s.Metadata.CreationTimestamp.GetNanos() == 0 {
+ return nil
+ }
+
+ fields := map[string]interface{}{
+ "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(),
+ "generation": s.Metadata.GetGeneration(),
+ }
+
+ tags := map[string]string{
+ "service_name": s.Metadata.GetName(),
+ "namespace": s.Metadata.GetNamespace(),
+ }
+
+ for key, val := range s.GetSpec().GetSelector() {
+ if ki.selectorFilter.Match(key) {
+ tags["selector_"+key] = val
+ }
+ }
+
+ var getPorts = func() {
+ for _, port := range s.GetSpec().GetPorts() {
+ fields["port"] = port.GetPort()
+ fields["target_port"] = port.GetTargetPort().GetIntVal()
+
+ tags["port_name"] = port.GetName()
+ tags["port_protocol"] = port.GetProtocol()
+
+ if s.GetSpec().GetType() == "ExternalName" {
+ tags["external_name"] = s.GetSpec().GetExternalName()
+ } else {
+ tags["cluster_ip"] = s.GetSpec().GetClusterIP()
+ }
+
+ acc.AddFields(serviceMeasurement, fields, tags)
+ }
+ }
+
+ if externIPs := s.GetSpec().GetExternalIPs(); externIPs != nil {
+ for _, ip := range externIPs {
+ tags["ip"] = ip
+
+ getPorts()
+ }
+ } else {
+ getPorts()
+ }
+
+ return nil
+}
diff --git a/plugins/inputs/kube_inventory/service_test.go b/plugins/inputs/kube_inventory/service_test.go
new file mode 100644
index 0000000000000..3b1089130fbf7
--- /dev/null
+++ b/plugins/inputs/kube_inventory/service_test.go
@@ -0,0 +1,299 @@
+package kube_inventory
+
+import (
+ "reflect"
+
+ "testing"
+ "time"
+
+ "github.com/ericchiang/k8s/apis/core/v1"
+ metav1 "github.com/ericchiang/k8s/apis/meta/v1"
+ "github.com/influxdata/telegraf/testutil"
+
+ "strings"
+)
+
+func TestService(t *testing.T) {
+ cli := &client{}
+ now := time.Now()
+ now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ output *testutil.Accumulator
+ hasError bool
+ include []string
+ exclude []string
+ }{
+ {
+ name: "no service",
+ handler: &mockHandler{
+ responseMap: map[string]interface{}{
+ "/service/": &v1.ServiceList{},
+ },
+ },
+ hasError: false,
+ },
+ {
+ name: "collect service",
+ handler: &mockHandler{
+ responseMap: map[string]interface{}{
+ "/service/": &v1.ServiceList{
+ Items: []*v1.Service{
+ {
+ Spec: &v1.ServiceSpec{
+ Ports: []*v1.ServicePort{
+ {
+ Port: toInt32Ptr(8080),
+ TargetPort: toIntStrPtrI(1234),
+ Name: toStrPtr("diagnostic"),
+ Protocol: toStrPtr("TCP"),
+ },
+ },
+ ExternalIPs: []string{"1.0.0.127"},
+ ClusterIP: toStrPtr("127.0.0.1"),
+ Selector: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ Generation: toInt64Ptr(12),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("checker"),
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ },
+ },
+ },
+ },
+ },
+
+ output: &testutil.Accumulator{
+ Metrics: []*testutil.Metric{
+ {
+ Fields: map[string]interface{}{
+ "port": int32(8080),
+ "target_port": int32(1234),
+ "generation": int64(12),
+ "created": now.UnixNano(),
+ },
+ Tags: map[string]string{
+ "service_name": "checker",
+ "namespace": "ns1",
+ "port_name": "diagnostic",
+ "port_protocol": "TCP",
+ "cluster_ip": "127.0.0.1",
+ "ip": "1.0.0.127",
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ },
+ },
+ hasError: false,
+ },
+ }
+
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ ks.SelectorInclude = v.include
+ ks.SelectorExclude = v.exclude
+ ks.createSelectorFilters()
+ acc := new(testutil.Accumulator)
+ for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items {
+ err := ks.gatherService(*service, acc)
+ if err != nil {
+ t.Errorf("Failed to gather service - %s", err.Error())
+ }
+ }
+
+ err := acc.FirstError()
+ if err == nil && v.hasError {
+ t.Fatalf("%s failed, should have error", v.name)
+ } else if err != nil && !v.hasError {
+ t.Fatalf("%s failed, err: %v", v.name, err)
+ }
+ if v.output == nil && len(acc.Metrics) > 0 {
+ t.Fatalf("%s: collected extra data", v.name)
+ } else if v.output != nil && len(v.output.Metrics) > 0 {
+ for i := range v.output.Metrics {
+ for k, m := range v.output.Metrics[i].Tags {
+ if acc.Metrics[i].Tags[k] != m {
+ t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k])
+ }
+ }
+ for k, m := range v.output.Metrics[i].Fields {
+ if acc.Metrics[i].Fields[k] != m {
+ t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k])
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestServiceSelectorFilter(t *testing.T) {
+ cli := &client{}
+ now := time.Now()
+ now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
+
+ responseMap := map[string]interface{}{
+ "/service/": &v1.ServiceList{
+ Items: []*v1.Service{
+ {
+ Spec: &v1.ServiceSpec{
+ Ports: []*v1.ServicePort{
+ {
+ Port: toInt32Ptr(8080),
+ TargetPort: toIntStrPtrI(1234),
+ Name: toStrPtr("diagnostic"),
+ Protocol: toStrPtr("TCP"),
+ },
+ },
+ ExternalIPs: []string{"1.0.0.127"},
+ ClusterIP: toStrPtr("127.0.0.1"),
+ Selector: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ Generation: toInt64Ptr(12),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("checker"),
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ },
+ },
+ },
+ }
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ hasError bool
+ include []string
+ exclude []string
+ expected map[string]string
+ }{
+ {
+ name: "nil filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: nil,
+ exclude: nil,
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "empty filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "include filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"select1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude filter equals only non-excluded selectors (overrides include filter)",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"select2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "include glob filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"*1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ }
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ ks.SelectorInclude = v.include
+ ks.SelectorExclude = v.exclude
+ ks.createSelectorFilters()
+ acc := new(testutil.Accumulator)
+ for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items {
+ err := ks.gatherService(*service, acc)
+ if err != nil {
+ t.Errorf("Failed to gather service - %s", err.Error())
+ }
+ }
+
+ // Grab selector tags
+ actual := map[string]string{}
+ for _, metric := range acc.Metrics {
+ for key, val := range metric.Tags {
+ if strings.Contains(key, "selector_") {
+ actual[key] = val
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v.expected, actual) {
+ t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
+ }
+ }
+}
diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go
index 407aaac2fce08..fe25f19f08440 100644
--- a/plugins/inputs/kube_inventory/statefulset.go
+++ b/plugins/inputs/kube_inventory/statefulset.go
@@ -4,7 +4,7 @@ import (
"context"
"time"
- "github.com/ericchiang/k8s/apis/apps/v1beta1"
+ "github.com/ericchiang/k8s/apis/apps/v1"
"github.com/influxdata/telegraf"
)
@@ -23,7 +23,7 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube
}
}
-func (ki *KubernetesInventory) gatherStatefulSet(s v1beta1.StatefulSet, acc telegraf.Accumulator) error {
+func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error {
status := s.Status
fields := map[string]interface{}{
"created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(),
@@ -39,6 +39,11 @@ func (ki *KubernetesInventory) gatherStatefulSet(s v1beta1.StatefulSet, acc tele
"statefulset_name": *s.Metadata.Name,
"namespace": *s.Metadata.Namespace,
}
+ for key, val := range s.GetSpec().GetSelector().GetMatchLabels() {
+ if ki.selectorFilter.Match(key) {
+ tags["selector_"+key] = val
+ }
+ }
acc.AddFields(statefulSetMeasurement, fields, tags)
diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go
index 6e94ad150ce0f..689cbadbc4b8d 100644
--- a/plugins/inputs/kube_inventory/statefulset_test.go
+++ b/plugins/inputs/kube_inventory/statefulset_test.go
@@ -1,10 +1,12 @@
package kube_inventory
import (
+ "reflect"
+ "strings"
"testing"
"time"
- "github.com/ericchiang/k8s/apis/apps/v1beta1"
+ "github.com/ericchiang/k8s/apis/apps/v1"
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
"github.com/influxdata/telegraf/testutil"
@@ -12,6 +14,8 @@ import (
func TestStatefulSet(t *testing.T) {
cli := &client{}
+ selectInclude := []string{}
+ selectExclude := []string{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
@@ -24,7 +28,7 @@ func TestStatefulSet(t *testing.T) {
name: "no statefulsets",
handler: &mockHandler{
responseMap: map[string]interface{}{
- "/statefulsets/": &v1beta1.StatefulSetList{},
+ "/statefulsets/": &v1.StatefulSetList{},
},
},
hasError: false,
@@ -33,18 +37,24 @@ func TestStatefulSet(t *testing.T) {
name: "collect statefulsets",
handler: &mockHandler{
responseMap: map[string]interface{}{
- "/statefulsets/": &v1beta1.StatefulSetList{
- Items: []*v1beta1.StatefulSet{
+ "/statefulsets/": &v1.StatefulSetList{
+ Items: []*v1.StatefulSet{
{
- Status: &v1beta1.StatefulSetStatus{
+ Status: &v1.StatefulSetStatus{
Replicas: toInt32Ptr(2),
CurrentReplicas: toInt32Ptr(4),
ReadyReplicas: toInt32Ptr(1),
UpdatedReplicas: toInt32Ptr(3),
ObservedGeneration: toInt64Ptr(119),
},
- Spec: &v1beta1.StatefulSetSpec{
+ Spec: &v1.StatefulSetSpec{
Replicas: toInt32Ptr(3),
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
},
Metadata: &metav1.ObjectMeta{
Generation: toInt64Ptr(332),
@@ -77,6 +87,8 @@ func TestStatefulSet(t *testing.T) {
Tags: map[string]string{
"namespace": "ns1",
"statefulset_name": "sts1",
+ "selector_select1": "s1",
+ "selector_select2": "s2",
},
},
},
@@ -87,10 +99,13 @@ func TestStatefulSet(t *testing.T) {
for _, v := range tests {
ks := &KubernetesInventory{
- client: cli,
+ client: cli,
+ SelectorInclude: selectInclude,
+ SelectorExclude: selectExclude,
}
+ ks.createSelectorFilters()
acc := new(testutil.Accumulator)
- for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1beta1.StatefulSetList)).Items {
+ for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
err := ks.gatherStatefulSet(*ss, acc)
if err != nil {
t.Errorf("Failed to gather ss - %s", err.Error())
@@ -121,3 +136,169 @@ func TestStatefulSet(t *testing.T) {
}
}
}
+
+func TestStatefulSetSelectorFilter(t *testing.T) {
+ cli := &client{}
+ now := time.Now()
+ now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
+
+ responseMap := map[string]interface{}{
+ "/statefulsets/": &v1.StatefulSetList{
+ Items: []*v1.StatefulSet{
+ {
+ Status: &v1.StatefulSetStatus{
+ Replicas: toInt32Ptr(2),
+ CurrentReplicas: toInt32Ptr(4),
+ ReadyReplicas: toInt32Ptr(1),
+ UpdatedReplicas: toInt32Ptr(3),
+ ObservedGeneration: toInt64Ptr(119),
+ },
+ Spec: &v1.StatefulSetSpec{
+ Replicas: toInt32Ptr(3),
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "select1": "s1",
+ "select2": "s2",
+ },
+ },
+ },
+ Metadata: &metav1.ObjectMeta{
+ Generation: toInt64Ptr(332),
+ Namespace: toStrPtr("ns1"),
+ Name: toStrPtr("sts1"),
+ Labels: map[string]string{
+ "lab1": "v1",
+ "lab2": "v2",
+ },
+ CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
+ },
+ },
+ },
+ },
+ }
+
+ tests := []struct {
+ name string
+ handler *mockHandler
+ hasError bool
+ include []string
+ exclude []string
+ expected map[string]string
+ }{
+ {
+ name: "nil filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: nil,
+ exclude: nil,
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "empty filters equals all selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ "selector_select2": "s2",
+ },
+ },
+ {
+ name: "include filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"select1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude filter equals only non-excluded selectors (overrides include filter)",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"select2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "include glob filter equals only include-matched selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{"*1"},
+ exclude: []string{},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ {
+ name: "exclude glob filter equals only non-excluded selectors",
+ handler: &mockHandler{
+ responseMap: responseMap,
+ },
+ hasError: false,
+ include: []string{},
+ exclude: []string{"*2"},
+ expected: map[string]string{
+ "selector_select1": "s1",
+ },
+ },
+ }
+ for _, v := range tests {
+ ks := &KubernetesInventory{
+ client: cli,
+ }
+ ks.SelectorInclude = v.include
+ ks.SelectorExclude = v.exclude
+ ks.createSelectorFilters()
+ acc := new(testutil.Accumulator)
+ for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
+ err := ks.gatherStatefulSet(*ss, acc)
+ if err != nil {
+ t.Errorf("Failed to gather ss - %s", err.Error())
+ }
+ }
+
+ // Grab selector tags
+ actual := map[string]string{}
+ for _, metric := range acc.Metrics {
+ for key, val := range metric.Tags {
+ if strings.Contains(key, "selector_") {
+ actual[key] = val
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v.expected, actual) {
+ t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
+ }
+ }
+}
diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md
index 33cca8590e46b..a574bed06ffe4 100644
--- a/plugins/inputs/kubernetes/README.md
+++ b/plugins/inputs/kubernetes/README.md
@@ -1,17 +1,29 @@
# Kubernetes Input Plugin
-This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet.
+The Kubernetes plugin talks to the Kubelet API and gathers metrics about the
+running pods and containers for a single host. It is assumed that this plugin
+is running as part of a `daemonset` within a kubernetes installation. This
+means that telegraf is running on every node within the cluster. Therefore, you
+should configure this plugin to talk to its locally running kubelet.
To find the ip address of the host you are running on you can issue a command like the following:
+
```
$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP'
```
+
In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API.
+Kubernetes is a fast moving project, with a new minor release every 3 months. As
+such, we will aim to maintain support only for versions that are supported by
+the major cloud providers; this is roughly 4 release / 2 years.
+
+**This plugin supports Kubernetes 1.11 and later.**
+
#### Series Cardinality Warning
This plugin may produce a high number of series which, when not controlled
-for, will cause high load on your database. Use the following techniques to
+for, will cause high load on your database. Use the following techniques to
avoid cardinality issues:
- Use [metric filtering][] options to exclude unneeded measurements and tags.
@@ -30,10 +42,17 @@ avoid cardinality issues:
url = "http://127.0.0.1:10255"
## Use bearer token for authorization. ('bearer_token' takes priority)
+ ## If both of these are empty, we'll use the default serviceaccount:
+ ## at: /run/secrets/kubernetes.io/serviceaccount/token
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
+ ## Pod labels to be added as tags. An empty array for both include and
+ ## exclude will include all labels.
+ # label_include = []
+ # label_exclude = ["*"]
+
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
@@ -48,7 +67,12 @@ avoid cardinality issues:
### DaemonSet
For recommendations on running Telegraf as a DaemonSet see [Monitoring Kubernetes
-Architecture][k8s-telegraf] or view the [Helm charts][tick-charts].
+Architecture][k8s-telegraf] or view the Helm charts:
+
+- [Telegraf][]
+- [InfluxDB][]
+- [Chronograf][]
+- [Kapacitor][]
### Metrics
@@ -75,7 +99,7 @@ Architecture][k8s-telegraf] or view the [Helm charts][tick-charts].
- runtime_image_fs_capacity_bytes
- runtime_image_fs_used_bytes
-+ kubernetes_pod_container
+* kubernetes_pod_container
- tags:
- container_name
- namespace
@@ -92,7 +116,7 @@ Architecture][k8s-telegraf] or view the [Helm charts][tick-charts].
- rootfs_available_bytes
- rootfs_capacity_bytes
- rootfs_used_bytes
- - logsfs_avaialble_bytes
+ - logsfs_available_bytes
- logsfs_capacity_bytes
- logsfs_used_bytes
@@ -107,7 +131,7 @@ Architecture][k8s-telegraf] or view the [Helm charts][tick-charts].
- capacity_bytes
- used_bytes
-+ kubernetes_pod_network
+* kubernetes_pod_network
- tags:
- namespace
- node_name
@@ -122,7 +146,7 @@ Architecture][k8s-telegraf] or view the [Helm charts][tick-charts].
```
kubernetes_node
-kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000
+kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_available_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000
kubernetes_pod_network,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000
kubernetes_pod_volume,volume_name=default-token-f7wts,namespace=default,node_name=ip-172-17-0-1.internal,pod_name=storage-7 available_bytes=8415240192i,capacity_bytes=8415252480i,used_bytes=12288i 1546910783000000000
kubernetes_system_container
@@ -136,4 +160,7 @@ kubernetes_system_container
[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality
[influx-docs]: https://docs.influxdata.com/influxdb/latest/
[k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/
-[tick-charts]: https://github.com/influxdata/tick-charts
+[telegraf]: https://github.com/helm/charts/tree/master/stable/telegraf
+[influxdb]: https://github.com/helm/charts/tree/master/stable/influxdb
+[chronograf]: https://github.com/helm/charts/tree/master/stable/chronograf
+[kapacitor]: https://github.com/helm/charts/tree/master/stable/kapacitor
diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go
index fdeb78ec48939..a9bb6ef4850d8 100644
--- a/plugins/inputs/kubernetes/kubernetes.go
+++ b/plugins/inputs/kubernetes/kubernetes.go
@@ -10,8 +10,9 @@ import (
"time"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -23,6 +24,11 @@ type Kubernetes struct {
BearerToken string `toml:"bearer_token"`
BearerTokenString string `toml:"bearer_token_string"`
+ LabelInclude []string `toml:"label_include"`
+ LabelExclude []string `toml:"label_exclude"`
+
+ labelFilter filter.Filter
+
// HTTP Timeout specified as a string - 3s, 1m, 1h
ResponseTimeout internal.Duration
@@ -36,10 +42,17 @@ var sampleConfig = `
url = "http://127.0.0.1:10255"
## Use bearer token for authorization. ('bearer_token' takes priority)
+ ## If both of these are empty, we'll use the default serviceaccount:
+ ## at: /run/secrets/kubernetes.io/serviceaccount/token
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
+ ## Pod labels to be added as tags. An empty array for both include and
+ ## exclude will include all labels.
+ # label_include = []
+ # label_exclude = ["*"]
+
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
@@ -52,12 +65,16 @@ var sampleConfig = `
`
const (
- summaryEndpoint = `%s/stats/summary`
+ summaryEndpoint = `%s/stats/summary`
+ defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token"
)
func init() {
inputs.Add("kubernetes", func() telegraf.Input {
- return &Kubernetes{}
+ return &Kubernetes{
+ LabelInclude: []string{},
+ LabelExclude: []string{"*"},
+ }
})
}
@@ -71,6 +88,30 @@ func (k *Kubernetes) Description() string {
return "Read metrics from the kubernetes kubelet api"
}
+func (k *Kubernetes) Init() error {
+
+ // If neither are provided, use the default service account.
+ if k.BearerToken == "" && k.BearerTokenString == "" {
+ k.BearerToken = defaultServiceAccountPath
+ }
+
+ if k.BearerToken != "" {
+ token, err := ioutil.ReadFile(k.BearerToken)
+ if err != nil {
+ return err
+ }
+ k.BearerTokenString = strings.TrimSpace(string(token))
+ }
+
+ labelFilter, err := filter.NewIncludeExcludeFilter(k.LabelInclude, k.LabelExclude)
+ if err != nil {
+ return err
+ }
+ k.labelFilter = labelFilter
+
+ return nil
+}
+
//Gather collects kubernetes metrics from a given URL
func (k *Kubernetes) Gather(acc telegraf.Accumulator) error {
acc.AddError(k.gatherSummary(k.URL, acc))
@@ -87,56 +128,19 @@ func buildURL(endpoint string, base string) (*url.URL, error) {
}
func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error {
- url := fmt.Sprintf("%s/stats/summary", baseURL)
- var req, err = http.NewRequest("GET", url, nil)
- var resp *http.Response
-
- tlsCfg, err := k.ClientConfig.TLSConfig()
+ summaryMetrics := &SummaryMetrics{}
+ err := k.LoadJson(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics)
if err != nil {
return err
}
- if k.RoundTripper == nil {
- // Set default values
- if k.ResponseTimeout.Duration < time.Second {
- k.ResponseTimeout.Duration = time.Second * 5
- }
- k.RoundTripper = &http.Transport{
- TLSHandshakeTimeout: 5 * time.Second,
- TLSClientConfig: tlsCfg,
- ResponseHeaderTimeout: k.ResponseTimeout.Duration,
- }
- }
-
- if k.BearerToken != "" {
- token, err := ioutil.ReadFile(k.BearerToken)
- if err != nil {
- return err
- }
- req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(string(token)))
- } else if k.BearerTokenString != "" {
- req.Header.Set("Authorization", "Bearer "+k.BearerTokenString)
- }
- req.Header.Add("Accept", "application/json")
-
- resp, err = k.RoundTripper.RoundTrip(req)
+ podInfos, err := k.gatherPodInfo(baseURL)
if err != nil {
- return fmt.Errorf("error making HTTP request to %s: %s", url, err)
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
- }
-
- summaryMetrics := &SummaryMetrics{}
- err = json.NewDecoder(resp.Body).Decode(summaryMetrics)
- if err != nil {
- return fmt.Errorf(`Error parsing response: %s`, err)
+ return err
}
buildSystemContainerMetrics(summaryMetrics, acc)
buildNodeMetrics(summaryMetrics, acc)
- buildPodMetrics(summaryMetrics, acc)
+ buildPodMetrics(baseURL, summaryMetrics, podInfos, k.labelFilter, acc)
return nil
}
@@ -156,7 +160,7 @@ func buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Ac
fields["memory_major_page_faults"] = container.Memory.MajorPageFaults
fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes
fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes
- fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes
+ fields["logsfs_available_bytes"] = container.LogsFS.AvailableBytes
fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes
acc.AddFields("kubernetes_system_container", fields, tags)
}
@@ -188,7 +192,59 @@ func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator)
acc.AddFields("kubernetes_node", fields, tags)
}
-func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
+func (k *Kubernetes) gatherPodInfo(baseURL string) ([]Metadata, error) {
+ var podApi Pods
+ err := k.LoadJson(fmt.Sprintf("%s/pods", baseURL), &podApi)
+ if err != nil {
+ return nil, err
+ }
+ var podInfos []Metadata
+ for _, podMetadata := range podApi.Items {
+ podInfos = append(podInfos, podMetadata.Metadata)
+ }
+ return podInfos, nil
+}
+
+func (k *Kubernetes) LoadJson(url string, v interface{}) error {
+ var req, err = http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+ var resp *http.Response
+ tlsCfg, err := k.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+ if k.RoundTripper == nil {
+ if k.ResponseTimeout.Duration < time.Second {
+ k.ResponseTimeout.Duration = time.Second * 5
+ }
+ k.RoundTripper = &http.Transport{
+ TLSHandshakeTimeout: 5 * time.Second,
+ TLSClientConfig: tlsCfg,
+ ResponseHeaderTimeout: k.ResponseTimeout.Duration,
+ }
+ }
+ req.Header.Set("Authorization", "Bearer "+k.BearerTokenString)
+ req.Header.Add("Accept", "application/json")
+ resp, err = k.RoundTripper.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("error making HTTP request to %s: %s", url, err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(v)
+ if err != nil {
+ return fmt.Errorf(`Error parsing response: %s`, err)
+ }
+
+ return nil
+}
+
+func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) {
for _, pod := range summaryMetrics.Pods {
for _, container := range pod.Containers {
tags := map[string]string{
@@ -197,6 +253,16 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
"container_name": container.Name,
"pod_name": pod.PodRef.Name,
}
+ for _, info := range podInfo {
+ if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace {
+ for k, v := range info.Labels {
+ if labelFilter.Match(k) {
+ tags[k] = v
+ }
+ }
+ }
+ }
+
fields := make(map[string]interface{})
fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores
fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds
@@ -208,7 +274,7 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes
fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes
fields["rootfs_used_bytes"] = container.RootFS.UsedBytes
- fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes
+ fields["logsfs_available_bytes"] = container.LogsFS.AvailableBytes
fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes
fields["logsfs_used_bytes"] = container.LogsFS.UsedBytes
acc.AddFields("kubernetes_pod_container", fields, tags)
diff --git a/plugins/inputs/kubernetes/kubernetes_metrics.go b/plugins/inputs/kubernetes/kubernetes_metrics.go
index 96814bcbebae7..d45d4b5f105d3 100644
--- a/plugins/inputs/kubernetes/kubernetes_metrics.go
+++ b/plugins/inputs/kubernetes/kubernetes_metrics.go
@@ -2,7 +2,7 @@ package kubernetes
import "time"
-// SummaryMetrics represents all the summary data about a paritcular node retrieved from a kubelet
+// SummaryMetrics represents all the summary data about a particular node retrieved from a kubelet
type SummaryMetrics struct {
Node NodeMetrics `json:"node"`
Pods []PodMetrics `json:"pods"`
diff --git a/plugins/inputs/kubernetes/kubernetes_pods.go b/plugins/inputs/kubernetes/kubernetes_pods.go
new file mode 100644
index 0000000000000..672608e54fe25
--- /dev/null
+++ b/plugins/inputs/kubernetes/kubernetes_pods.go
@@ -0,0 +1,17 @@
+package kubernetes
+
+type Pods struct {
+ Kind string `json:"kind"`
+ ApiVersion string `json:"apiVersion"`
+ Items []Item `json:"items"`
+}
+
+type Item struct {
+ Metadata Metadata `json:"metadata"`
+}
+
+type Metadata struct {
+ Name string `json:"name"`
+ Namespace string `json:"namespace"`
+ Labels map[string]string `json:"labels"`
+}
diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go
index 289e36ae498dc..faf40be3e1000 100644
--- a/plugins/inputs/kubernetes/kubernetes_test.go
+++ b/plugins/inputs/kubernetes/kubernetes_test.go
@@ -2,6 +2,7 @@ package kubernetes
import (
"fmt"
+ "github.com/influxdata/telegraf/filter"
"net/http"
"net/http/httptest"
"testing"
@@ -12,13 +13,23 @@ import (
func TestKubernetesStats(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusOK)
- fmt.Fprintln(w, response)
+ if r.RequestURI == "/stats/summary" {
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w, responseStatsSummery)
+ }
+ if r.RequestURI == "/pods" {
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w, responsePods)
+ }
+
}))
defer ts.Close()
+ labelFilter, _ := filter.NewIncludeExcludeFilter([]string{"app", "superkey"}, nil)
+
k := &Kubernetes{
- URL: ts.URL,
+ URL: ts.URL,
+ labelFilter: labelFilter,
}
var acc testutil.Accumulator
@@ -35,7 +46,7 @@ func TestKubernetesStats(t *testing.T) {
"memory_major_page_faults": int64(13),
"rootfs_available_bytes": int64(84379979776),
"rootfs_capacity_bytes": int64(105553100800),
- "logsfs_avaialble_bytes": int64(84379979776),
+ "logsfs_available_bytes": int64(84379979776),
"logsfs_capacity_bytes": int64(105553100800),
}
tags := map[string]string{
@@ -80,7 +91,7 @@ func TestKubernetesStats(t *testing.T) {
"rootfs_available_bytes": int64(84379979776),
"rootfs_capacity_bytes": int64(105553100800),
"rootfs_used_bytes": int64(57344),
- "logsfs_avaialble_bytes": int64(84379979776),
+ "logsfs_available_bytes": int64(84379979776),
"logsfs_capacity_bytes": int64(105553100800),
"logsfs_used_bytes": int64(24576),
}
@@ -89,6 +100,8 @@ func TestKubernetesStats(t *testing.T) {
"container_name": "foocontainer",
"namespace": "foons",
"pod_name": "foopod",
+ "app": "foo",
+ "superkey": "foobar",
}
acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags)
@@ -103,7 +116,7 @@ func TestKubernetesStats(t *testing.T) {
"rootfs_available_bytes": int64(0),
"rootfs_capacity_bytes": int64(0),
"rootfs_used_bytes": int64(0),
- "logsfs_avaialble_bytes": int64(0),
+ "logsfs_available_bytes": int64(0),
"logsfs_capacity_bytes": int64(0),
"logsfs_used_bytes": int64(0),
}
@@ -112,6 +125,8 @@ func TestKubernetesStats(t *testing.T) {
"container_name": "stopped-container",
"namespace": "foons",
"pod_name": "stopped-pod",
+ "app": "foo-stop",
+ "superkey": "superfoo",
}
acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags)
@@ -143,7 +158,39 @@ func TestKubernetesStats(t *testing.T) {
}
-var response = `
+var responsePods = `
+{
+ "kind": "PodList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "metadata": {
+ "name": "foopod",
+ "namespace": "foons",
+ "labels": {
+ "superkey": "foobar",
+ "app": "foo",
+ "exclude": "exclude0"
+ }
+ }
+ },
+ {
+ "metadata": {
+ "name": "stopped-pod",
+ "namespace": "foons",
+ "labels": {
+ "superkey": "superfoo",
+ "app": "foo-stop",
+ "exclude": "exclude1"
+ }
+ }
+ }
+ ]
+}
+`
+
+var responseStatsSummery = `
{
"node": {
"nodeName": "node1",
diff --git a/plugins/inputs/lanz/README.md b/plugins/inputs/lanz/README.md
new file mode 100644
index 0000000000000..c47b22fee1dd1
--- /dev/null
+++ b/plugins/inputs/lanz/README.md
@@ -0,0 +1,87 @@
+# Arista LANZ Consumer Input Plugin
+
+This plugin provides a consumer for use with Arista Networks’ Latency Analyzer (LANZ)
+
+Metrics are read from a stream of data via TCP through port 50001 on the
+switches management IP. The data is in Protobuffers format. For more information on Arista LANZ
+
+- https://www.arista.com/en/um-eos/eos-latency-analyzer-lanz
+
+This plugin uses Arista's sdk.
+
+- https://github.com/aristanetworks/goarista
+
+### Configuration
+
+You will need to configure LANZ and enable streaming LANZ data.
+
+- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz
+- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz#ww1149292
+
+```toml
+[[inputs.lanz]]
+ servers = [
+ "tcp://switch1.int.example.com:50001",
+ "tcp://switch2.int.example.com:50001",
+ ]
+```
+
+### Metrics
+
+For more details on the metrics see https://github.com/aristanetworks/goarista/blob/master/lanz/proto/lanz.proto
+
+- lanz_congestion_record:
+ - tags:
+ - intf_name
+ - switch_id
+ - port_id
+ - entry_type
+ - traffic_class
+ - fabric_peer_intf_name
+ - source
+ - port
+ - fields:
+ - timestamp (integer)
+ - queue_size (integer)
+ - time_of_max_qlen (integer)
+ - tx_latency (integer)
+ - q_drop_count (integer)
+
++ lanz_global_buffer_usage_record
+ - tags:
+ - entry_type
+ - source
+ - port
+ - fields:
+ - timestamp (integer)
+ - buffer_size (integer)
+ - duration (integer)
+
+
+
+### Sample Queries
+
+Get the max tx_latency for the last hour for all interfaces on all switches.
+```sql
+SELECT max("tx_latency") AS "max_tx_latency" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name"
+```
+
+Get the max tx_latency for the last hour for all interfaces on all switches.
+```sql
+SELECT max("queue_size") AS "max_queue_size" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name"
+```
+
+Get the max buffer_size for over the last hour for all switches.
+```sql
+SELECT max("buffer_size") AS "max_buffer_size" FROM "global_buffer_usage_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname"
+```
+
+### Example output
+```
+lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=505i,duration=0i 1583341058300643815
+lanz_congestion_record,entry_type=2,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 time_of_max_qlen=0i,tx_latency=564480i,q_drop_count=0i,timestamp=158334105824919i,queue_size=225i 1583341058300636045
+lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=589i,duration=0i 1583341058300457464
+lanz_congestion_record,entry_type=1,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 q_drop_count=0i,timestamp=158334105824919i,queue_size=232i,time_of_max_qlen=0i,tx_latency=584640i 1583341058300450302
+```
+
+
diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go
new file mode 100644
index 0000000000000..7553c33c777b2
--- /dev/null
+++ b/plugins/inputs/lanz/lanz.go
@@ -0,0 +1,137 @@
+package lanz
+
+import (
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/aristanetworks/goarista/lanz"
+ pb "github.com/aristanetworks/goarista/lanz/proto"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+var sampleConfig = `
+ ## URL to Arista LANZ endpoint
+ servers = [
+ "tcp://127.0.0.1:50001"
+ ]
+`
+
+func init() {
+ inputs.Add("lanz", func() telegraf.Input {
+ return NewLanz()
+ })
+}
+
+type Lanz struct {
+ Servers []string `toml:"servers"`
+ clients []lanz.Client
+ wg sync.WaitGroup
+}
+
+func NewLanz() *Lanz {
+ return &Lanz{}
+}
+
+func (l *Lanz) SampleConfig() string {
+ return sampleConfig
+}
+
+func (l *Lanz) Description() string {
+ return "Read metrics off Arista LANZ, via socket"
+}
+
+func (l *Lanz) Gather(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func (l *Lanz) Start(acc telegraf.Accumulator) error {
+
+ if len(l.Servers) == 0 {
+ l.Servers = append(l.Servers, "tcp://127.0.0.1:50001")
+ }
+
+ for _, server := range l.Servers {
+ deviceUrl, err := url.Parse(server)
+ if err != nil {
+ return err
+ }
+ client := lanz.New(
+ lanz.WithAddr(deviceUrl.Host),
+ lanz.WithBackoff(1*time.Second),
+ lanz.WithTimeout(10*time.Second),
+ )
+ l.clients = append(l.clients, client)
+
+ in := make(chan *pb.LanzRecord)
+ go func() {
+ client.Run(in)
+ }()
+ l.wg.Add(1)
+ go func() {
+ l.wg.Done()
+ receive(acc, in, deviceUrl)
+ }()
+ }
+ return nil
+}
+
+func (l *Lanz) Stop() {
+ for _, client := range l.clients {
+ client.Stop()
+ }
+ l.wg.Wait()
+}
+
+func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceUrl *url.URL) {
+ for {
+ select {
+ case msg, ok := <-in:
+ if !ok {
+ return
+ }
+ msgToAccumulator(acc, msg, deviceUrl)
+ }
+ }
+}
+
+func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *url.URL) {
+ cr := msg.GetCongestionRecord()
+ if cr != nil {
+ vals := map[string]interface{}{
+ "timestamp": int64(cr.GetTimestamp()),
+ "queue_size": int64(cr.GetQueueSize()),
+ "time_of_max_qlen": int64(cr.GetTimeOfMaxQLen()),
+ "tx_latency": int64(cr.GetTxLatency()),
+ "q_drop_count": int64(cr.GetQDropCount()),
+ }
+ tags := map[string]string{
+ "intf_name": cr.GetIntfName(),
+ "switch_id": strconv.FormatInt(int64(cr.GetSwitchId()), 10),
+ "port_id": strconv.FormatInt(int64(cr.GetPortId()), 10),
+ "entry_type": strconv.FormatInt(int64(cr.GetEntryType()), 10),
+ "traffic_class": strconv.FormatInt(int64(cr.GetTrafficClass()), 10),
+ "fabric_peer_intf_name": cr.GetFabricPeerIntfName(),
+ "source": deviceUrl.Hostname(),
+ "port": deviceUrl.Port(),
+ }
+ acc.AddFields("lanz_congestion_record", vals, tags)
+ }
+
+ gbur := msg.GetGlobalBufferUsageRecord()
+ if gbur != nil {
+ vals := map[string]interface{}{
+ "timestamp": int64(gbur.GetTimestamp()),
+ "buffer_size": int64(gbur.GetBufferSize()),
+ "duration": int64(gbur.GetDuration()),
+ }
+ tags := map[string]string{
+ "entry_type": strconv.FormatInt(int64(gbur.GetEntryType()), 10),
+ "source": deviceUrl.Hostname(),
+ "port": deviceUrl.Port(),
+ }
+ acc.AddFields("lanz_global_buffer_usage_record", vals, tags)
+ }
+}
diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go
new file mode 100644
index 0000000000000..5f9c7ab24cb40
--- /dev/null
+++ b/plugins/inputs/lanz/lanz_test.go
@@ -0,0 +1,137 @@
+package lanz
+
+import (
+ "net/url"
+ "strconv"
+ "testing"
+
+ pb "github.com/aristanetworks/goarista/lanz/proto"
+ "github.com/golang/protobuf/proto"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+var testProtoBufCongestionRecord1 = &pb.LanzRecord{
+ CongestionRecord: &pb.CongestionRecord{
+ Timestamp: proto.Uint64(100000000000000),
+ IntfName: proto.String("eth1"),
+ SwitchId: proto.Uint32(1),
+ PortId: proto.Uint32(1),
+ QueueSize: proto.Uint32(1),
+ EntryType: pb.CongestionRecord_EntryType.Enum(1),
+ TrafficClass: proto.Uint32(1),
+ TimeOfMaxQLen: proto.Uint64(100000000000000),
+ TxLatency: proto.Uint32(100),
+ QDropCount: proto.Uint32(1),
+ FabricPeerIntfName: proto.String("FabricPeerIntfName1"),
+ },
+}
+var testProtoBufCongestionRecord2 = &pb.LanzRecord{
+ CongestionRecord: &pb.CongestionRecord{
+ Timestamp: proto.Uint64(200000000000000),
+ IntfName: proto.String("eth2"),
+ SwitchId: proto.Uint32(2),
+ PortId: proto.Uint32(2),
+ QueueSize: proto.Uint32(2),
+ EntryType: pb.CongestionRecord_EntryType.Enum(2),
+ TrafficClass: proto.Uint32(2),
+ TimeOfMaxQLen: proto.Uint64(200000000000000),
+ TxLatency: proto.Uint32(200),
+ QDropCount: proto.Uint32(2),
+ FabricPeerIntfName: proto.String("FabricPeerIntfName2"),
+ },
+}
+
+var testProtoBufGlobalBufferUsageRecord = &pb.LanzRecord{
+ GlobalBufferUsageRecord: &pb.GlobalBufferUsageRecord{
+ EntryType: pb.GlobalBufferUsageRecord_EntryType.Enum(1),
+ Timestamp: proto.Uint64(100000000000000),
+ BufferSize: proto.Uint32(1),
+ Duration: proto.Uint32(10),
+ },
+}
+
+func TestLanzGeneratesMetrics(t *testing.T) {
+
+ var acc testutil.Accumulator
+
+ l := NewLanz()
+
+ l.Servers = append(l.Servers, "tcp://switch01.int.example.com:50001")
+ l.Servers = append(l.Servers, "tcp://switch02.int.example.com:50001")
+ deviceUrl1, err := url.Parse(l.Servers[0])
+ if err != nil {
+ t.Fail()
+ }
+ deviceUrl2, err := url.Parse(l.Servers[1])
+ if err != nil {
+ t.Fail()
+ }
+
+ msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceUrl1)
+ acc.Wait(1)
+
+ vals1 := map[string]interface{}{
+ "timestamp": int64(100000000000000),
+ "queue_size": int64(1),
+ "time_of_max_qlen": int64(100000000000000),
+ "tx_latency": int64(100),
+ "q_drop_count": int64(1),
+ }
+ tags1 := map[string]string{
+ "intf_name": "eth1",
+ "switch_id": strconv.FormatInt(int64(1), 10),
+ "port_id": strconv.FormatInt(int64(1), 10),
+ "entry_type": strconv.FormatInt(int64(1), 10),
+ "traffic_class": strconv.FormatInt(int64(1), 10),
+ "fabric_peer_intf_name": "FabricPeerIntfName1",
+ "source": "switch01.int.example.com",
+ "port": "50001",
+ }
+
+ acc.AssertContainsFields(t, "lanz_congestion_record", vals1)
+ acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals1, tags1)
+
+ acc.ClearMetrics()
+ msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceUrl2)
+ acc.Wait(1)
+
+ vals2 := map[string]interface{}{
+ "timestamp": int64(200000000000000),
+ "queue_size": int64(2),
+ "time_of_max_qlen": int64(200000000000000),
+ "tx_latency": int64(200),
+ "q_drop_count": int64(2),
+ }
+ tags2 := map[string]string{
+ "intf_name": "eth2",
+ "switch_id": strconv.FormatInt(int64(2), 10),
+ "port_id": strconv.FormatInt(int64(2), 10),
+ "entry_type": strconv.FormatInt(int64(2), 10),
+ "traffic_class": strconv.FormatInt(int64(2), 10),
+ "fabric_peer_intf_name": "FabricPeerIntfName2",
+ "source": "switch02.int.example.com",
+ "port": "50001",
+ }
+
+ acc.AssertContainsFields(t, "lanz_congestion_record", vals2)
+ acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals2, tags2)
+
+ acc.ClearMetrics()
+ msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceUrl1)
+ acc.Wait(1)
+
+ gburVals1 := map[string]interface{}{
+ "timestamp": int64(100000000000000),
+ "buffer_size": int64(1),
+ "duration": int64(10),
+ }
+ gburTags1 := map[string]string{
+ "entry_type": strconv.FormatInt(int64(1), 10),
+ "source": "switch01.int.example.com",
+ "port": "50001",
+ }
+
+ acc.AssertContainsFields(t, "lanz_global_buffer_usage_record", gburVals1)
+ acc.AssertContainsTaggedFields(t, "lanz_global_buffer_usage_record", gburVals1, gburTags1)
+
+}
diff --git a/plugins/inputs/linux_sysctl_fs/README.md b/plugins/inputs/linux_sysctl_fs/README.md
index e9341c322d05d..d6598e16ff30a 100644
--- a/plugins/inputs/linux_sysctl_fs/README.md
+++ b/plugins/inputs/linux_sysctl_fs/README.md
@@ -1,4 +1,4 @@
-# Linux Sysctl FS Input
+# Linux Sysctl FS Input Plugin
The linux_sysctl_fs input provides Linux system level file metrics. The documentation on these fields can be found at https://www.kernel.org/doc/Documentation/sysctl/fs.txt.
diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md
index 47edbd2968401..0abdba2c972df 100644
--- a/plugins/inputs/logparser/README.md
+++ b/plugins/inputs/logparser/README.md
@@ -4,10 +4,43 @@ The `logparser` plugin streams and parses the given logfiles. Currently it
has the capability of parsing "grok" patterns from logfiles, which also supports
regex patterns.
-### Configuration:
+**Deprecated in Telegraf 1.15**: Please use the [tail][] plugin along with the [`grok` data format][grok parser].
+
+The `tail` plugin now provides all the functionality of the `logparser` plugin.
+Most options can be translated directly to the `tail` plugin:
+- For options in the `[inputs.logparser.grok]` section, the equivalent option
+ will have add the `grok_` prefix when using them in the `tail` input.
+- The grok `measurement` option can be replaced using the standard plugin
+ `name_override` option.
+
+Migration Example:
+```diff
+- [[inputs.logparser]]
+- files = ["/var/log/apache/access.log"]
+- from_beginning = false
+- [inputs.logparser.grok]
+- patterns = ["%{COMBINED_LOG_FORMAT}"]
+- measurement = "apache_access_log"
+- custom_pattern_files = []
+- custom_patterns = '''
+- '''
+- timezone = "Canada/Eastern"
+
++ [[inputs.tail]]
++ files = ["/var/log/apache/access.log"]
++ from_beginning = false
++ grok_patterns = ["%{COMBINED_LOG_FORMAT}"]
++ name_override = "apache_access_log"
++ grok_custom_pattern_files = []
++ grok_custom_patterns = '''
++ '''
++ grok_timezone = "Canada/Eastern"
++ data_format = "grok"
+```
+
+### Configuration
```toml
-# Stream and parse log file(s).
[[inputs.logparser]]
## Log files to parse.
## These accept standard unix glob matching rules, but with the addition of
@@ -59,226 +92,13 @@ regex patterns.
### Grok Parser
-The best way to get acquainted with grok patterns is to read the logstash docs,
-which are available here:
- https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
+Reference the [grok parser][] documentation to setup the grok section of the
+configuration.
-The Telegraf grok parser uses a slightly modified version of logstash "grok"
-patterns, with the format
-
-```
-%{[:][:]}
-```
-
-The `capture_syntax` defines the grok pattern that's used to parse the input
-line and the `semantic_name` is used to name the field or tag. The extension
-`modifier` controls the data type that the parsed item is converted to or
-other special handling.
-
-By default all named captures are converted into string fields.
-If a pattern does not have a semantic name it will not be captured.
-Timestamp modifiers can be used to convert captures to the timestamp of the
-parsed metric. If no timestamp is parsed the metric will be created using the
-current time.
-
-You must capture at least one field per line.
-
-- Available modifiers:
- - string (default if nothing is specified)
- - int
- - float
- - duration (ie, 5.23ms gets converted to int nanoseconds)
- - tag (converts the field into a tag)
- - drop (drops the field completely)
-- Timestamp modifiers:
- - ts (This will auto-learn the timestamp format)
- - ts-ansic ("Mon Jan _2 15:04:05 2006")
- - ts-unix ("Mon Jan _2 15:04:05 MST 2006")
- - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
- - ts-rfc822 ("02 Jan 06 15:04 MST")
- - ts-rfc822z ("02 Jan 06 15:04 -0700")
- - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
- - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
- - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
- - ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
- - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
- - ts-httpd ("02/Jan/2006:15:04:05 -0700")
- - ts-epoch (seconds since unix epoch, may contain decimal)
- - ts-epochnano (nanoseconds since unix epoch)
- - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year)
- - ts-"CUSTOM"
-
-CUSTOM time layouts must be within quotes and be the representation of the
-"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`.
-To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"`
-To match a comma decimal point you can use a period in the pattern string.
-See https://golang.org/pkg/time/#Parse for more details.
-
-Telegraf has many of its own [built-in patterns](/plugins/parsers/grok/influx_patterns.go),
-as well as support for most of
-[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns).
-_Golang regular expressions do not support lookahead or lookbehind.
-logstash patterns that depend on these are not supported._
-
-If you need help building patterns to match your logs,
-you will find the https://grokdebug.herokuapp.com application quite useful!
-
-#### Timestamp Examples
-
-This example input and config parses a file using a custom timestamp conversion:
-
-```
-2017-02-21 13:10:34 value=42
-```
-
-```toml
-[[inputs.logparser]]
- [inputs.logparser.grok]
- patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}']
-```
-
-This example input and config parses a file using a timestamp in unix time:
-
-```
-1466004605 value=42
-1466004605.123456789 value=42
-```
-
-```toml
-[[inputs.logparser]]
- [inputs.logparser.grok]
- patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}']
-```
-
-This example parses a file using a built-in conversion and a custom pattern:
-
-```
-Wed Apr 12 13:10:34 PST 2017 value=42
-```
-
-```toml
-[[inputs.logparser]]
- [inputs.logparser.grok]
- patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"]
- custom_patterns = '''
- TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR}
- '''
-```
-
-For cases where the timestamp itself is without offset, the `timezone` config var is available
-to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times
-are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp
-will be processed based on the current machine timezone configuration. Lastly, if using a
-timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), the logparser grok will attempt to offset
-the timestamp accordingly. See test cases for more detailed examples.
-
-#### TOML Escaping
-
-When saving patterns to the configuration file, keep in mind the different TOML
-[string](https://github.com/toml-lang/toml#string) types and the escaping
-rules for each. These escaping rules must be applied in addition to the
-escaping required by the grok syntax. Using the Multi-line line literal
-syntax with `'''` may be useful.
-
-The following config examples will parse this input file:
-
-```
-|42|\uD83D\uDC2F|'telegraf'|
-```
-
-Since `|` is a special character in the grok language, we must escape it to
-get a literal `|`. With a basic TOML string, special characters such as
-backslash must be escaped, requiring us to escape the backslash a second time.
-
-```toml
-[[inputs.logparser]]
- [inputs.logparser.grok]
- patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"]
- custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+"
-```
-
-We cannot use a literal TOML string for the pattern, because we cannot match a
-`'` within it. However, it works well for the custom pattern.
-```toml
-[[inputs.logparser]]
- [inputs.logparser.grok]
- patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"]
- custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+'
-```
-
-A multi-line literal string allows us to encode the pattern:
-```toml
-[[inputs.logparser]]
- [inputs.logparser.grok]
- patterns = ['''
- \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\|
- ''']
- custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+'
-```
-
-#### Parsing Telegraf log file
-We can use logparser to convert the log lines generated by Telegraf in metrics.
-
-To do this we need to configure Telegraf to write logs to a file.
-This could be done using the ``agent.logfile`` parameter or configuring syslog.
-```toml
-[agent]
- logfile = "/var/log/telegraf/telegraf.log"
-```
-
-Logparser configuration:
-```toml
-[[inputs.logparser]]
- files = ["/var/log/telegraf/telegraf.log"]
-
- [inputs.logparser.grok]
- measurement = "telegraf_log"
- patterns = ['^%{TIMESTAMP_ISO8601:timestamp:ts-rfc3339} %{TELEGRAF_LOG_LEVEL:level:tag}! %{GREEDYDATA:msg}']
- custom_patterns = '''
-TELEGRAF_LOG_LEVEL (?:[DIWE]+)
-'''
-```
-
-Example log lines:
-```
-2018-06-14T06:41:35Z I! Starting Telegraf v1.6.4
-2018-06-14T06:41:35Z I! Agent Config: Interval:3s, Quiet:false, Hostname:"archer", Flush Interval:3s
-2018-02-20T22:39:20Z E! Error in plugin [inputs.docker]: took longer to collect than collection interval (10s)
-2018-06-01T10:34:05Z W! Skipping a scheduled flush because there is already a flush ongoing.
-2018-06-14T07:33:33Z D! Output [file] buffer fullness: 0 / 10000 metrics.
-```
-
-Generated metrics:
-```
-telegraf_log,host=somehostname,level=I msg="Starting Telegraf v1.6.4" 1528958495000000000
-telegraf_log,host=somehostname,level=I msg="Agent Config: Interval:3s, Quiet:false, Hostname:\"somehostname\", Flush Interval:3s" 1528958495001000000
-telegraf_log,host=somehostname,level=E msg="Error in plugin [inputs.docker]: took longer to collect than collection interval (10s)" 1519166360000000000
-telegraf_log,host=somehostname,level=W msg="Skipping a scheduled flush because there is already a flush ongoing." 1527849245000000000
-telegraf_log,host=somehostname,level=D msg="Output [file] buffer fullness: 0 / 10000 metrics." 1528961613000000000
-```
-
-
-### Tips for creating patterns
-
-Writing complex patterns can be difficult, here is some advice for writing a
-new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com).
-
-Create a file output that writes to stdout, and disable other outputs while
-testing. This will allow you to see the captured metrics. Keep in mind that
-the file output will only print once per `flush_interval`.
-
-```toml
-[[outputs.file]]
- files = ["stdout"]
-```
-
-- Start with a file containing only a single line of your input.
-- Remove all but the first token or piece of the line.
-- Add the section of your pattern to match this piece to your configuration file.
-- Verify that the metric is parsed successfully by running Telegraf.
-- If successful, add the next token, update the pattern and retest.
-- Continue one token at a time until the entire line is successfully parsed.
### Additional Resources
- https://www.influxdata.com/telegraf-correlate-log-metrics-data-performance-bottlenecks/
+
+[tail]: /plugins/inputs/tail/README.md
+[grok parser]: /plugins/parsers/grok/README.md
diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go
index a7fd97e8ef925..4fbd2e90d921c 100644
--- a/plugins/inputs/logparser/logparser.go
+++ b/plugins/inputs/logparser/logparser.go
@@ -3,7 +3,7 @@
package logparser
import (
- "log"
+ "fmt"
"strings"
"sync"
@@ -12,13 +12,17 @@ import (
"github.com/influxdata/telegraf/internal/globpath"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
- // Parsers
)
const (
defaultWatchMethod = "inotify"
)
+var (
+ offsets = make(map[string]int64)
+ offsetsMutex = new(sync.Mutex)
+)
+
// LogParser in the primary interface for the plugin
type GrokConfig struct {
MeasurementName string `toml:"measurement"`
@@ -41,7 +45,10 @@ type LogParserPlugin struct {
FromBeginning bool
WatchMethod string
+ Log telegraf.Logger
+
tailers map[string]*tail.Tail
+ offsets map[string]int64
lines chan logEntry
done chan struct{}
wg sync.WaitGroup
@@ -53,6 +60,20 @@ type LogParserPlugin struct {
GrokConfig GrokConfig `toml:"grok"`
}
+func NewLogParser() *LogParserPlugin {
+ offsetsMutex.Lock()
+ offsetsCopy := make(map[string]int64, len(offsets))
+ for k, v := range offsets {
+ offsetsCopy[k] = v
+ }
+ offsetsMutex.Unlock()
+
+ return &LogParserPlugin{
+ WatchMethod: defaultWatchMethod,
+ offsets: offsetsCopy,
+ }
+}
+
const sampleConfig = `
## Log files to parse.
## These accept standard unix glob matching rules, but with the addition of
@@ -116,6 +137,11 @@ func (l *LogParserPlugin) Description() string {
return "Stream and parse log file(s)."
}
+func (l *LogParserPlugin) Init() error {
+ l.Log.Warnf(`The logparser plugin is deprecated; please use the 'tail' input with the 'grok' data_format`)
+ return nil
+}
+
// Gather is the primary function to collect the metrics for the plugin
func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error {
l.Lock()
@@ -161,18 +187,21 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
l.wg.Add(1)
go l.parser()
- return l.tailNewfiles(l.FromBeginning)
+ err = l.tailNewfiles(l.FromBeginning)
+
+ // clear offsets
+ l.offsets = make(map[string]int64)
+ // assumption that once Start is called, all parallel plugins have already been initialized
+ offsetsMutex.Lock()
+ offsets = make(map[string]int64)
+ offsetsMutex.Unlock()
+
+ return err
}
// check the globs against files on disk, and start tailing any new files.
// Assumes l's lock is held!
func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
- var seek tail.SeekInfo
- if !fromBeginning {
- seek.Whence = 2
- seek.Offset = 0
- }
-
var poll bool
if l.WatchMethod == "poll" {
poll = true
@@ -182,7 +211,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
for _, filepath := range l.Files {
g, err := globpath.Compile(filepath)
if err != nil {
- log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
+ l.Log.Errorf("Glob %q failed to compile: %s", filepath, err)
continue
}
files := g.Match()
@@ -193,11 +222,27 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
continue
}
+ var seek *tail.SeekInfo
+ if !fromBeginning {
+ if offset, ok := l.offsets[file]; ok {
+ l.Log.Debugf("Using offset %d for file: %v", offset, file)
+ seek = &tail.SeekInfo{
+ Whence: 0,
+ Offset: offset,
+ }
+ } else {
+ seek = &tail.SeekInfo{
+ Whence: 2,
+ Offset: 0,
+ }
+ }
+ }
+
tailer, err := tail.TailFile(file,
tail.Config{
ReOpen: true,
Follow: true,
- Location: &seek,
+ Location: seek,
MustExist: true,
Poll: poll,
Logger: tail.DiscardingLogger,
@@ -207,7 +252,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
continue
}
- log.Printf("D! [inputs.logparser] tail added for file: %v", file)
+ l.Log.Debugf("Tail added for file: %v", file)
// create a goroutine for each "tailer"
l.wg.Add(1)
@@ -228,7 +273,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
for line = range tailer.Lines {
if line.Err != nil {
- log.Printf("E! Error tailing file %s, Error: %s\n",
+ l.Log.Errorf("Error tailing file %s, Error: %s",
tailer.Filename, line.Err)
continue
}
@@ -274,7 +319,7 @@ func (l *LogParserPlugin) parser() {
l.acc.AddFields(m.Name(), m.Fields(), tags, m.Time())
}
} else {
- log.Println("E! Error parsing log line: " + err.Error())
+ l.Log.Errorf("Error parsing log line: %s", err.Error())
}
}
@@ -286,24 +331,38 @@ func (l *LogParserPlugin) Stop() {
defer l.Unlock()
for _, t := range l.tailers {
+ if !l.FromBeginning {
+ // store offset for resume
+ offset, err := t.Tell()
+ if err == nil {
+ l.offsets[t.Filename] = offset
+ l.Log.Debugf("Recording offset %d for file: %v", offset, t.Filename)
+ } else {
+ l.acc.AddError(fmt.Errorf("error recording offset for file %s", t.Filename))
+ }
+ }
err := t.Stop()
//message for a stopped tailer
- log.Printf("D! tail dropped for file: %v", t.Filename)
+ l.Log.Debugf("Tail dropped for file: %v", t.Filename)
if err != nil {
- log.Printf("E! Error stopping tail on file %s\n", t.Filename)
+ l.Log.Errorf("Error stopping tail on file %s", t.Filename)
}
- t.Cleanup()
}
close(l.done)
l.wg.Wait()
+
+ // persist offsets
+ offsetsMutex.Lock()
+ for k, v := range l.offsets {
+ offsets[k] = v
+ }
+ offsetsMutex.Unlock()
}
func init() {
inputs.Add("logparser", func() telegraf.Input {
- return &LogParserPlugin{
- WatchMethod: defaultWatchMethod,
- }
+ return NewLogParser()
})
}
diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go
index 90ae39161f0cf..142f78d464963 100644
--- a/plugins/inputs/logparser/logparser_test.go
+++ b/plugins/inputs/logparser/logparser_test.go
@@ -6,14 +6,17 @@ import (
"runtime"
"strings"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
-
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestStartNoParsers(t *testing.T) {
logparser := &LogParserPlugin{
+ Log: testutil.Logger{},
FromBeginning: true,
Files: []string{"testdata/*.log"},
}
@@ -26,6 +29,7 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) {
thisdir := getCurrentDir()
logparser := &LogParserPlugin{
+ Log: testutil.Logger{},
FromBeginning: true,
Files: []string{thisdir + "testdata/*.log"},
GrokConfig: GrokConfig{
@@ -43,9 +47,10 @@ func TestGrokParseLogFiles(t *testing.T) {
thisdir := getCurrentDir()
logparser := &LogParserPlugin{
+ Log: testutil.Logger{},
GrokConfig: GrokConfig{
MeasurementName: "logparser_grok",
- Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
+ Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}", "%{TEST_LOG_C}"},
CustomPatternFiles: []string{thisdir + "testdata/test-patterns"},
},
FromBeginning: true,
@@ -53,32 +58,56 @@ func TestGrokParseLogFiles(t *testing.T) {
}
acc := testutil.Accumulator{}
- assert.NoError(t, logparser.Start(&acc))
- acc.Wait(2)
+ require.NoError(t, logparser.Start(&acc))
+ acc.Wait(3)
logparser.Stop()
- acc.AssertContainsTaggedFields(t, "logparser_grok",
- map[string]interface{}{
- "clientip": "192.168.1.1",
- "myfloat": float64(1.25),
- "response_time": int64(5432),
- "myint": int64(101),
- },
- map[string]string{
- "response_code": "200",
- "path": thisdir + "testdata/test_a.log",
- })
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "logparser_grok",
+ map[string]string{
+ "response_code": "200",
+ "path": thisdir + "testdata/test_a.log",
+ },
+ map[string]interface{}{
+ "clientip": "192.168.1.1",
+ "myfloat": float64(1.25),
+ "response_time": int64(5432),
+ "myint": int64(101),
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "logparser_grok",
+ map[string]string{
+ "path": thisdir + "testdata/test_b.log",
+ },
+ map[string]interface{}{
+ "myfloat": 1.25,
+ "mystring": "mystring",
+ "nomodifier": "nomodifier",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "logparser_grok",
+ map[string]string{
+ "path": thisdir + "testdata/test_c.log",
+ "response_code": "200",
+ },
+ map[string]interface{}{
+ "clientip": "192.168.1.1",
+ "myfloat": 1.25,
+ "myint": 101,
+ "response_time": 5432,
+ },
+ time.Unix(0, 0),
+ ),
+ }
- acc.AssertContainsTaggedFields(t, "logparser_grok",
- map[string]interface{}{
- "myfloat": 1.25,
- "mystring": "mystring",
- "nomodifier": "nomodifier",
- },
- map[string]string{
- "path": thisdir + "testdata/test_b.log",
- })
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime(), testutil.SortMetrics())
}
func TestGrokParseLogFilesAppearLater(t *testing.T) {
@@ -89,6 +118,7 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) {
thisdir := getCurrentDir()
logparser := &LogParserPlugin{
+ Log: testutil.Logger{},
FromBeginning: true,
Files: []string{emptydir + "/*.log"},
GrokConfig: GrokConfig{
@@ -128,6 +158,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) {
thisdir := getCurrentDir()
logparser := &LogParserPlugin{
+ Log: testutil.Logger{},
FromBeginning: true,
Files: []string{thisdir + "testdata/test_a.log"},
GrokConfig: GrokConfig{
@@ -157,6 +188,40 @@ func TestGrokParseLogFilesOneBad(t *testing.T) {
})
}
+func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) {
+ thisdir := getCurrentDir()
+
+ logparser := &LogParserPlugin{
+ Log: testutil.Logger{},
+ GrokConfig: GrokConfig{
+ MeasurementName: "logparser_grok",
+ Patterns: []string{"%{TEST_LOG_C}"},
+ CustomPatternFiles: []string{thisdir + "testdata/test-patterns"},
+ },
+ FromBeginning: true,
+ Files: []string{thisdir + "testdata/test_c.log"},
+ }
+
+ acc := testutil.Accumulator{}
+ acc.SetDebug(true)
+ assert.NoError(t, logparser.Start(&acc))
+ acc.Wait(1)
+
+ logparser.Stop()
+
+ acc.AssertContainsTaggedFields(t, "logparser_grok",
+ map[string]interface{}{
+ "clientip": "192.168.1.1",
+ "myfloat": float64(1.25),
+ "response_time": int64(5432),
+ "myint": int64(101),
+ },
+ map[string]string{
+ "response_code": "200",
+ "path": thisdir + "testdata/test_c.log",
+ })
+}
+
func getCurrentDir() string {
_, filename, _, _ := runtime.Caller(1)
return strings.Replace(filename, "logparser_test.go", "", 1)
diff --git a/plugins/inputs/logparser/testdata/test-patterns b/plugins/inputs/logparser/testdata/test-patterns
index ba995fbd1770f..45970a9c8ed12 100644
--- a/plugins/inputs/logparser/testdata/test-patterns
+++ b/plugins/inputs/logparser/testdata/test-patterns
@@ -12,3 +12,7 @@ TEST_LOG_B \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:my
TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME}
TEST_LOG_BAD \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:int} %{WORD:dropme:drop} %{WORD:nomodifier}
+
+# Test C log line:
+# 1568723594631 1.25 200 192.168.1.1 5.432µs 101
+TEST_LOG_C %{POSINT:timestamp:ts-epochmilli} %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} %{NUMBER:myint:int}
diff --git a/plugins/inputs/logparser/testdata/test_c.log b/plugins/inputs/logparser/testdata/test_c.log
new file mode 100644
index 0000000000000..f814c0c30dfe8
--- /dev/null
+++ b/plugins/inputs/logparser/testdata/test_c.log
@@ -0,0 +1 @@
+1568723594631 1.25 200 192.168.1.1 5.432µs 101
diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md
new file mode 100644
index 0000000000000..9571de5fd8873
--- /dev/null
+++ b/plugins/inputs/logstash/README.md
@@ -0,0 +1,154 @@
+# Logstash Input Plugin
+
+This plugin reads metrics exposed by
+[Logstash Monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html).
+
+Logstash 5 and later is supported.
+
+### Configuration
+
+```toml
+[[inputs.logstash]]
+ ## The URL of the exposed Logstash API endpoint.
+ url = "http://127.0.0.1:9600"
+
+ ## Use Logstash 5 single pipeline API, set to true when monitoring
+ ## Logstash 5.
+ # single_pipeline = false
+
+ ## Enable optional collection components. Can contain
+ ## "pipelines", "process", and "jvm".
+ # collect = ["pipelines", "process", "jvm"]
+
+ ## Timeout for HTTP requests.
+ # timeout = "5s"
+
+ ## Optional HTTP Basic Auth credentials.
+ # username = "username"
+ # password = "pa$$word"
+
+ ## Optional TLS Config.
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## Use TLS but skip chain & host verification.
+ # insecure_skip_verify = false
+
+ ## Optional HTTP headers.
+ # [inputs.logstash.headers]
+ # "X-Special-Header" = "Special-Value"
+```
+
+### Metrics
+
+- logstash_jvm
+ - tags:
+ - node_id
+ - node_name
+ - node_host
+ - node_version
+ - fields:
+ - threads_peak_count
+ - mem_pools_survivor_peak_max_in_bytes
+ - mem_pools_survivor_max_in_bytes
+ - mem_pools_old_peak_used_in_bytes
+ - mem_pools_young_used_in_bytes
+ - mem_non_heap_committed_in_bytes
+ - threads_count
+ - mem_pools_old_committed_in_bytes
+ - mem_pools_young_peak_max_in_bytes
+ - mem_heap_used_percent
+ - gc_collectors_young_collection_time_in_millis
+ - mem_pools_survivor_peak_used_in_bytes
+ - mem_pools_young_committed_in_bytes
+ - gc_collectors_old_collection_time_in_millis
+ - gc_collectors_old_collection_count
+ - mem_pools_survivor_used_in_bytes
+ - mem_pools_old_used_in_bytes
+ - mem_pools_young_max_in_bytes
+ - mem_heap_max_in_bytes
+ - mem_non_heap_used_in_bytes
+ - mem_pools_survivor_committed_in_bytes
+ - mem_pools_old_max_in_bytes
+ - mem_heap_committed_in_bytes
+ - mem_pools_old_peak_max_in_bytes
+ - mem_pools_young_peak_used_in_bytes
+ - mem_heap_used_in_bytes
+ - gc_collectors_young_collection_count
+ - uptime_in_millis
+
++ logstash_process
+ - tags:
+ - node_id
+ - node_name
+ - source
+ - node_version
+ - fields:
+ - open_file_descriptors
+ - cpu_load_average_1m
+ - cpu_load_average_5m
+ - cpu_load_average_15m
+ - cpu_total_in_millis
+ - cpu_percent
+ - peak_open_file_descriptors
+ - max_file_descriptors
+ - mem_total_virtual_in_bytes
+ - mem_total_virtual_in_bytes
+
+- logstash_events
+ - tags:
+ - node_id
+ - node_name
+ - source
+ - node_version
+ - pipeline (for Logstash 6+)
+ - fields:
+ - queue_push_duration_in_millis
+ - duration_in_millis
+ - in
+ - filtered
+ - out
+
++ logstash_plugins
+ - tags:
+ - node_id
+ - node_name
+ - source
+ - node_version
+ - pipeline (for Logstash 6+)
+ - plugin_id
+ - plugin_name
+ - plugin_type
+ - fields:
+ - queue_push_duration_in_millis (for input plugins only)
+ - duration_in_millis
+ - in
+ - out
+
+- logstash_queue
+ - tags:
+ - node_id
+ - node_name
+ - source
+ - node_version
+ - pipeline (for Logstash 6+)
+ - queue_type
+ - fields:
+ - events
+ - free_space_in_bytes
+ - max_queue_size_in_bytes
+ - max_unread_events
+ - page_capacity_in_bytes
+ - queue_size_in_bytes
+
+### Example Output
+
+```
+logstash_jvm,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt gc_collectors_old_collection_count=2,gc_collectors_old_collection_time_in_millis=100,gc_collectors_young_collection_count=26,gc_collectors_young_collection_time_in_millis=1028,mem_heap_committed_in_bytes=1056309248,mem_heap_max_in_bytes=1056309248,mem_heap_used_in_bytes=207216328,mem_heap_used_percent=19,mem_non_heap_committed_in_bytes=160878592,mem_non_heap_used_in_bytes=140838184,mem_pools_old_committed_in_bytes=899284992,mem_pools_old_max_in_bytes=899284992,mem_pools_old_peak_max_in_bytes=899284992,mem_pools_old_peak_used_in_bytes=189468088,mem_pools_old_used_in_bytes=189468088,mem_pools_survivor_committed_in_bytes=17432576,mem_pools_survivor_max_in_bytes=17432576,mem_pools_survivor_peak_max_in_bytes=17432576,mem_pools_survivor_peak_used_in_bytes=17432576,mem_pools_survivor_used_in_bytes=12572640,mem_pools_young_committed_in_bytes=139591680,mem_pools_young_max_in_bytes=139591680,mem_pools_young_peak_max_in_bytes=139591680,mem_pools_young_peak_used_in_bytes=139591680,mem_pools_young_used_in_bytes=5175600,threads_count=20,threads_peak_count=24,uptime_in_millis=739089 1566425244000000000
+logstash_process,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt cpu_load_average_15m=0.03,cpu_load_average_1m=0.01,cpu_load_average_5m=0.04,cpu_percent=0,cpu_total_in_millis=83230,max_file_descriptors=16384,mem_total_virtual_in_bytes=3689132032,open_file_descriptors=118,peak_open_file_descriptors=118 1566425244000000000
+logstash_events,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,source=debian-stretch-logstash6.virt duration_in_millis=0,filtered=0,in=0,out=0,queue_push_duration_in_millis=0 1566425244000000000
+logstash_plugins,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,plugin_id=2807cb8610ba7854efa9159814fcf44c3dda762b43bd088403b30d42c88e69ab,plugin_name=beats,plugin_type=input,source=debian-stretch-logstash6.virt out=0,queue_push_duration_in_millis=0 1566425244000000000
+logstash_plugins,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,plugin_id=7a6c973366186a695727c73935634a00bccd52fceedf30d0746983fce572d50c,plugin_name=file,plugin_type=output,source=debian-stretch-logstash6.virt duration_in_millis=0,in=0,out=0 1566425244000000000
+logstash_queue,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,queue_type=memory,source=debian-stretch-logstash6.virt events=0 1566425244000000000
+```
diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go
new file mode 100644
index 0000000000000..e360ba032ff35
--- /dev/null
+++ b/plugins/inputs/logstash/logstash.go
@@ -0,0 +1,482 @@
+package logstash
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/choice"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ jsonParser "github.com/influxdata/telegraf/plugins/parsers/json"
+)
+
+const sampleConfig = `
+ ## The URL of the exposed Logstash API endpoint.
+ url = "http://127.0.0.1:9600"
+
+ ## Use Logstash 5 single pipeline API, set to true when monitoring
+ ## Logstash 5.
+ # single_pipeline = false
+
+ ## Enable optional collection components. Can contain
+ ## "pipelines", "process", and "jvm".
+ # collect = ["pipelines", "process", "jvm"]
+
+ ## Timeout for HTTP requests.
+ # timeout = "5s"
+
+ ## Optional HTTP Basic Auth credentials.
+ # username = "username"
+ # password = "pa$$word"
+
+ ## Optional TLS Config.
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## Use TLS but skip chain & host verification.
+ # insecure_skip_verify = false
+
+ ## Optional HTTP headers.
+ # [inputs.logstash.headers]
+ # "X-Special-Header" = "Special-Value"
+`
+
+type Logstash struct {
+ URL string `toml:"url"`
+
+ SinglePipeline bool `toml:"single_pipeline"`
+ Collect []string `toml:"collect"`
+
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ Headers map[string]string `toml:"headers"`
+ Timeout internal.Duration `toml:"timeout"`
+ tls.ClientConfig
+
+ client *http.Client
+}
+
+// NewLogstash create an instance of the plugin with default settings
+func NewLogstash() *Logstash {
+ return &Logstash{
+ URL: "http://127.0.0.1:9600",
+ SinglePipeline: false,
+ Collect: []string{"pipelines", "process", "jvm"},
+ Headers: make(map[string]string),
+ Timeout: internal.Duration{Duration: time.Second * 5},
+ }
+}
+
+// Description returns short info about plugin
+func (logstash *Logstash) Description() string {
+ return "Read metrics exposed by Logstash"
+}
+
+// SampleConfig returns details how to configure plugin
+func (logstash *Logstash) SampleConfig() string {
+ return sampleConfig
+}
+
+type ProcessStats struct {
+ ID string `json:"id"`
+ Process interface{} `json:"process"`
+ Name string `json:"name"`
+ Host string `json:"host"`
+ Version string `json:"version"`
+}
+
+type JVMStats struct {
+ ID string `json:"id"`
+ JVM interface{} `json:"jvm"`
+ Name string `json:"name"`
+ Host string `json:"host"`
+ Version string `json:"version"`
+}
+
+type PipelinesStats struct {
+ ID string `json:"id"`
+ Pipelines map[string]Pipeline `json:"pipelines"`
+ Name string `json:"name"`
+ Host string `json:"host"`
+ Version string `json:"version"`
+}
+
+type PipelineStats struct {
+ ID string `json:"id"`
+ Pipeline Pipeline `json:"pipeline"`
+ Name string `json:"name"`
+ Host string `json:"host"`
+ Version string `json:"version"`
+}
+
+type Pipeline struct {
+ Events interface{} `json:"events"`
+ Plugins PipelinePlugins `json:"plugins"`
+ Reloads interface{} `json:"reloads"`
+ Queue PipelineQueue `json:"queue"`
+}
+
+type Plugin struct {
+ ID string `json:"id"`
+ Events interface{} `json:"events"`
+ Name string `json:"name"`
+}
+
+type PipelinePlugins struct {
+ Inputs []Plugin `json:"inputs"`
+ Filters []Plugin `json:"filters"`
+ Outputs []Plugin `json:"outputs"`
+}
+
+type PipelineQueue struct {
+ Events float64 `json:"events"`
+ Type string `json:"type"`
+ Capacity interface{} `json:"capacity"`
+ Data interface{} `json:"data"`
+}
+
+const jvmStats = "/_node/stats/jvm"
+const processStats = "/_node/stats/process"
+const pipelinesStats = "/_node/stats/pipelines"
+const pipelineStats = "/_node/stats/pipeline"
+
+func (i *Logstash) Init() error {
+ err := choice.CheckSlice(i.Collect, []string{"pipelines", "process", "jvm"})
+ if err != nil {
+ return fmt.Errorf(`cannot verify "collect" setting: %v`, err)
+ }
+ return nil
+}
+
+// createHttpClient create a clients to access API
+func (logstash *Logstash) createHttpClient() (*http.Client, error) {
+ tlsConfig, err := logstash.ClientConfig.TLSConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConfig,
+ },
+ Timeout: logstash.Timeout.Duration,
+ }
+
+ return client, nil
+}
+
+// gatherJsonData query the data source and parse the response JSON
+func (logstash *Logstash) gatherJsonData(url string, value interface{}) error {
+ request, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ if (logstash.Username != "") || (logstash.Password != "") {
+ request.SetBasicAuth(logstash.Username, logstash.Password)
+ }
+
+ for header, value := range logstash.Headers {
+ if strings.ToLower(header) == "host" {
+ request.Host = value
+ } else {
+ request.Header.Add(header, value)
+ }
+ }
+
+ response, err := logstash.client.Do(request)
+ if err != nil {
+ return err
+ }
+
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusOK {
+ // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
+ body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200))
+ return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body)
+ }
+
+ err = json.NewDecoder(response.Body).Decode(value)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// gatherJVMStats gather the JVM metrics and add results to the accumulator
+func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error {
+ jvmStats := &JVMStats{}
+
+ err := logstash.gatherJsonData(url, jvmStats)
+ if err != nil {
+ return err
+ }
+
+ tags := map[string]string{
+ "node_id": jvmStats.ID,
+ "node_name": jvmStats.Name,
+ "node_version": jvmStats.Version,
+ "source": jvmStats.Host,
+ }
+
+ flattener := jsonParser.JSONFlattener{}
+ err = flattener.FlattenJSON("", jvmStats.JVM)
+ if err != nil {
+ return err
+ }
+ accumulator.AddFields("logstash_jvm", flattener.Fields, tags)
+
+ return nil
+}
+
+// gatherJVMStats gather the Process metrics and add results to the accumulator
+func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error {
+ processStats := &ProcessStats{}
+
+ err := logstash.gatherJsonData(url, processStats)
+ if err != nil {
+ return err
+ }
+
+ tags := map[string]string{
+ "node_id": processStats.ID,
+ "node_name": processStats.Name,
+ "node_version": processStats.Version,
+ "source": processStats.Host,
+ }
+
+ flattener := jsonParser.JSONFlattener{}
+ err = flattener.FlattenJSON("", processStats.Process)
+ if err != nil {
+ return err
+ }
+ accumulator.AddFields("logstash_process", flattener.Fields, tags)
+
+ return nil
+}
+
+// gatherPluginsStats go through a list of plugins and add their metrics to the accumulator
+func (logstash *Logstash) gatherPluginsStats(
+ plugins []Plugin,
+ pluginType string,
+ tags map[string]string,
+ accumulator telegraf.Accumulator) error {
+
+ for _, plugin := range plugins {
+ pluginTags := map[string]string{
+ "plugin_name": plugin.Name,
+ "plugin_id": plugin.ID,
+ "plugin_type": pluginType,
+ }
+ for tag, value := range tags {
+ pluginTags[tag] = value
+ }
+ flattener := jsonParser.JSONFlattener{}
+ err := flattener.FlattenJSON("", plugin.Events)
+ if err != nil {
+ return err
+ }
+ accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags)
+ }
+
+ return nil
+}
+
+func (logstash *Logstash) gatherQueueStats(
+ queue *PipelineQueue,
+ tags map[string]string,
+ accumulator telegraf.Accumulator) error {
+
+ var err error
+ queueTags := map[string]string{
+ "queue_type": queue.Type,
+ }
+ for tag, value := range tags {
+ queueTags[tag] = value
+ }
+
+ queueFields := map[string]interface{}{
+ "events": queue.Events,
+ }
+
+ if queue.Type != "memory" {
+ flattener := jsonParser.JSONFlattener{}
+ err = flattener.FlattenJSON("", queue.Capacity)
+ if err != nil {
+ return err
+ }
+ err = flattener.FlattenJSON("", queue.Data)
+ if err != nil {
+ return err
+ }
+ for field, value := range flattener.Fields {
+ queueFields[field] = value
+ }
+ }
+
+ accumulator.AddFields("logstash_queue", queueFields, queueTags)
+
+ return nil
+}
+
+// gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6)
+func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error {
+ pipelineStats := &PipelineStats{}
+
+ err := logstash.gatherJsonData(url, pipelineStats)
+ if err != nil {
+ return err
+ }
+
+ tags := map[string]string{
+ "node_id": pipelineStats.ID,
+ "node_name": pipelineStats.Name,
+ "node_version": pipelineStats.Version,
+ "source": pipelineStats.Host,
+ }
+
+ flattener := jsonParser.JSONFlattener{}
+ err = flattener.FlattenJSON("", pipelineStats.Pipeline.Events)
+ if err != nil {
+ return err
+ }
+ accumulator.AddFields("logstash_events", flattener.Fields, tags)
+
+ err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator)
+ if err != nil {
+ return err
+ }
+ err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator)
+ if err != nil {
+ return err
+ }
+ err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator)
+ if err != nil {
+ return err
+ }
+
+ err = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6)
+func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error {
+ pipelinesStats := &PipelinesStats{}
+
+ err := logstash.gatherJsonData(url, pipelinesStats)
+ if err != nil {
+ return err
+ }
+
+ for pipelineName, pipeline := range pipelinesStats.Pipelines {
+ tags := map[string]string{
+ "node_id": pipelinesStats.ID,
+ "node_name": pipelinesStats.Name,
+ "node_version": pipelinesStats.Version,
+ "pipeline": pipelineName,
+ "source": pipelinesStats.Host,
+ }
+
+ flattener := jsonParser.JSONFlattener{}
+ err := flattener.FlattenJSON("", pipeline.Events)
+ if err != nil {
+ return err
+ }
+ accumulator.AddFields("logstash_events", flattener.Fields, tags)
+
+ err = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator)
+ if err != nil {
+ return err
+ }
+ err = logstash.gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator)
+ if err != nil {
+ return err
+ }
+ err = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator)
+ if err != nil {
+ return err
+ }
+
+ err = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Gather ask this plugin to start gathering metrics
+func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {
+ if logstash.client == nil {
+ client, err := logstash.createHttpClient()
+
+ if err != nil {
+ return err
+ }
+ logstash.client = client
+ }
+
+ if choice.Contains("jvm", logstash.Collect) {
+ jvmUrl, err := url.Parse(logstash.URL + jvmStats)
+ if err != nil {
+ return err
+ }
+ if err := logstash.gatherJVMStats(jvmUrl.String(), accumulator); err != nil {
+ return err
+ }
+ }
+
+ if choice.Contains("process", logstash.Collect) {
+ processUrl, err := url.Parse(logstash.URL + processStats)
+ if err != nil {
+ return err
+ }
+ if err := logstash.gatherProcessStats(processUrl.String(), accumulator); err != nil {
+ return err
+ }
+ }
+
+ if choice.Contains("pipelines", logstash.Collect) {
+ if logstash.SinglePipeline {
+ pipelineUrl, err := url.Parse(logstash.URL + pipelineStats)
+ if err != nil {
+ return err
+ }
+ if err := logstash.gatherPipelineStats(pipelineUrl.String(), accumulator); err != nil {
+ return err
+ }
+ } else {
+ pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats)
+ if err != nil {
+ return err
+ }
+ if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// init registers this plugin instance
+func init() {
+ inputs.Add("logstash", func() telegraf.Input {
+ return NewLogstash()
+ })
+}
diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go
new file mode 100644
index 0000000000000..aeb4e46f8dbb6
--- /dev/null
+++ b/plugins/inputs/logstash/logstash_test.go
@@ -0,0 +1,691 @@
+package logstash
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+)
+
+var logstashTest = NewLogstash()
+
+var (
+ logstash5accPipelineStats testutil.Accumulator
+ logstash6accPipelinesStats testutil.Accumulator
+ logstash5accProcessStats testutil.Accumulator
+ logstash6accProcessStats testutil.Accumulator
+ logstash5accJVMStats testutil.Accumulator
+ logstash6accJVMStats testutil.Accumulator
+)
+
+func Test_Logstash5GatherProcessStats(test *testing.T) {
+ fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
+ writer.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON))
+ }))
+ requestURL, err := url.Parse(logstashTest.URL)
+ if err != nil {
+ test.Logf("Can't connect to: %s", logstashTest.URL)
+ }
+ fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port()))
+ fakeServer.Start()
+ defer fakeServer.Close()
+
+ if logstashTest.client == nil {
+ client, err := logstashTest.createHttpClient()
+
+ if err != nil {
+ test.Logf("Can't createHttpClient")
+ }
+ logstashTest.client = client
+ }
+
+ if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats); err != nil {
+ test.Logf("Can't gather Process stats")
+ }
+
+ logstash5accProcessStats.AssertContainsTaggedFields(
+ test,
+ "logstash_process",
+ map[string]interface{}{
+ "open_file_descriptors": float64(89.0),
+ "max_file_descriptors": float64(1.048576e+06),
+ "cpu_percent": float64(3.0),
+ "cpu_load_average_5m": float64(0.61),
+ "cpu_load_average_15m": float64(0.54),
+ "mem_total_virtual_in_bytes": float64(4.809506816e+09),
+ "cpu_total_in_millis": float64(1.5526e+11),
+ "cpu_load_average_1m": float64(0.49),
+ "peak_open_file_descriptors": float64(100.0),
+ },
+ map[string]string{
+ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"),
+ "node_name": string("node-5-test"),
+ "source": string("node-5"),
+ "node_version": string("5.3.0"),
+ },
+ )
+}
+
+func Test_Logstash6GatherProcessStats(test *testing.T) {
+ fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
+ writer.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON))
+ }))
+ requestURL, err := url.Parse(logstashTest.URL)
+ if err != nil {
+ test.Logf("Can't connect to: %s", logstashTest.URL)
+ }
+ fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port()))
+ fakeServer.Start()
+ defer fakeServer.Close()
+
+ if logstashTest.client == nil {
+ client, err := logstashTest.createHttpClient()
+
+ if err != nil {
+ test.Logf("Can't createHttpClient")
+ }
+ logstashTest.client = client
+ }
+
+ if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats); err != nil {
+ test.Logf("Can't gather Process stats")
+ }
+
+ logstash6accProcessStats.AssertContainsTaggedFields(
+ test,
+ "logstash_process",
+ map[string]interface{}{
+ "open_file_descriptors": float64(133.0),
+ "max_file_descriptors": float64(262144.0),
+ "cpu_percent": float64(0.0),
+ "cpu_load_average_5m": float64(42.4),
+ "cpu_load_average_15m": float64(38.95),
+ "mem_total_virtual_in_bytes": float64(17923452928.0),
+ "cpu_total_in_millis": float64(5841460),
+ "cpu_load_average_1m": float64(48.2),
+ "peak_open_file_descriptors": float64(145.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ },
+ )
+}
+
+func Test_Logstash5GatherPipelineStats(test *testing.T) {
+ //logstash5accPipelineStats.SetDebug(true)
+ fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
+ writer.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON))
+ }))
+ requestURL, err := url.Parse(logstashTest.URL)
+ if err != nil {
+ test.Logf("Can't connect to: %s", logstashTest.URL)
+ }
+ fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port()))
+ fakeServer.Start()
+ defer fakeServer.Close()
+
+ if logstashTest.client == nil {
+ client, err := logstashTest.createHttpClient()
+
+ if err != nil {
+ test.Logf("Can't createHttpClient")
+ }
+ logstashTest.client = client
+ }
+
+ if err := logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats); err != nil {
+ test.Logf("Can't gather Pipeline stats")
+ }
+
+ logstash5accPipelineStats.AssertContainsTaggedFields(
+ test,
+ "logstash_events",
+ map[string]interface{}{
+ "duration_in_millis": float64(1151.0),
+ "in": float64(1269.0),
+ "filtered": float64(1269.0),
+ "out": float64(1269.0),
+ },
+ map[string]string{
+ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"),
+ "node_name": string("node-5-test"),
+ "source": string("node-5"),
+ "node_version": string("5.3.0"),
+ },
+ )
+
+ fields := make(map[string]interface{})
+ fields["queue_push_duration_in_millis"] = float64(32.0)
+ fields["out"] = float64(2.0)
+
+ logstash5accPipelineStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ fields,
+ map[string]string{
+ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"),
+ "node_name": string("node-5-test"),
+ "source": string("node-5"),
+ "node_version": string("5.3.0"),
+ "plugin_name": string("beats"),
+ "plugin_id": string("a35197a509596954e905e38521bae12b1498b17d-1"),
+ "plugin_type": string("input"),
+ },
+ )
+
+ logstash5accPipelineStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(360.0),
+ "in": float64(1269.0),
+ "out": float64(1269.0),
+ },
+ map[string]string{
+ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"),
+ "node_name": string("node-5-test"),
+ "source": string("node-5"),
+ "node_version": string("5.3.0"),
+ "plugin_name": string("stdout"),
+ "plugin_id": string("582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-2"),
+ "plugin_type": string("output"),
+ },
+ )
+
+ logstash5accPipelineStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(228.0),
+ "in": float64(1269.0),
+ "out": float64(1269.0),
+ },
+ map[string]string{
+ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"),
+ "node_name": string("node-5-test"),
+ "source": string("node-5"),
+ "node_version": string("5.3.0"),
+ "plugin_name": string("s3"),
+ "plugin_id": string("582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-3"),
+ "plugin_type": string("output"),
+ },
+ )
+}
+
+func Test_Logstash6GatherPipelinesStats(test *testing.T) {
+ //logstash6accPipelinesStats.SetDebug(true)
+ fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
+ writer.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON))
+ }))
+ requestURL, err := url.Parse(logstashTest.URL)
+ if err != nil {
+ test.Logf("Can't connect to: %s", logstashTest.URL)
+ }
+ fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port()))
+ fakeServer.Start()
+ defer fakeServer.Close()
+
+ if logstashTest.client == nil {
+ client, err := logstashTest.createHttpClient()
+
+ if err != nil {
+ test.Logf("Can't createHttpClient")
+ }
+ logstashTest.client = client
+ }
+
+ if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats); err != nil {
+ test.Logf("Can't gather Pipeline stats")
+ }
+
+ fields := make(map[string]interface{})
+ fields["duration_in_millis"] = float64(8540751.0)
+ fields["queue_push_duration_in_millis"] = float64(366.0)
+ fields["in"] = float64(180659.0)
+ fields["filtered"] = float64(180659.0)
+ fields["out"] = float64(180659.0)
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_events",
+ fields,
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ },
+ )
+
+ fields = make(map[string]interface{})
+ fields["queue_push_duration_in_millis"] = float64(366.0)
+ fields["out"] = float64(180659.0)
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ fields,
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("kafka"),
+ "plugin_id": string("input-kafka"),
+ "plugin_type": string("input"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(2117.0),
+ "in": float64(27641.0),
+ "out": float64(27641.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("mutate"),
+ "plugin_id": string("155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(2117.0),
+ "in": float64(27641.0),
+ "out": float64(27641.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("mutate"),
+ "plugin_id": string("155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(13149.0),
+ "in": float64(180659.0),
+ "out": float64(177549.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("date"),
+ "plugin_id": string("d079424bb6b7b8c7c61d9c5e0ddae445e92fa9ffa2e8690b0a669f7c690542f0"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(2814.0),
+ "in": float64(76602.0),
+ "out": float64(76602.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("mutate"),
+ "plugin_id": string("25afa60ab6dc30512fe80efa3493e4928b5b1b109765b7dc46a3e4bbf293d2d4"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(9.0),
+ "in": float64(934.0),
+ "out": float64(934.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("mutate"),
+ "plugin_id": string("2d9fa8f74eeb137bfa703b8050bad7d76636fface729e4585b789b5fc9bed668"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(173.0),
+ "in": float64(3110.0),
+ "out": float64(0.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("drop"),
+ "plugin_id": string("4ed14c9ef0198afe16c31200041e98d321cb5c2e6027e30b077636b8c4842110"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(5605.0),
+ "in": float64(75482.0),
+ "out": float64(75482.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("mutate"),
+ "plugin_id": string("358ce1eb387de7cd5711c2fb4de64cd3b12e5ca9a4c45f529516bcb053a31df4"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(313992.0),
+ "in": float64(180659.0),
+ "out": float64(180659.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("csv"),
+ "plugin_id": string("82a9bbb02fff37a63c257c1f146b0a36273c7cbbebe83c0a51f086e5280bf7bb"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(0.0),
+ "in": float64(0.0),
+ "out": float64(0.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("mutate"),
+ "plugin_id": string("8fb13a8cdd4257b52724d326aa1549603ffdd4e4fde6d20720c96b16238c18c3"),
+ "plugin_type": string("filter"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(651386.0),
+ "in": float64(177549.0),
+ "out": float64(177549.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("elasticsearch"),
+ "plugin_id": string("output-elk"),
+ "plugin_type": string("output"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(186751.0),
+ "in": float64(177549.0),
+ "out": float64(177549.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("kafka"),
+ "plugin_id": string("output-kafka1"),
+ "plugin_type": string("output"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_plugins",
+ map[string]interface{}{
+ "duration_in_millis": float64(7335196.0),
+ "in": float64(177549.0),
+ "out": float64(177549.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "plugin_name": string("kafka"),
+ "plugin_id": string("output-kafka2"),
+ "plugin_type": string("output"),
+ },
+ )
+
+ logstash6accPipelinesStats.AssertContainsTaggedFields(
+ test,
+ "logstash_queue",
+ map[string]interface{}{
+ "events": float64(103),
+ "free_space_in_bytes": float64(36307369984),
+ "max_queue_size_in_bytes": float64(1073741824),
+ "max_unread_events": float64(0),
+ "page_capacity_in_bytes": float64(67108864),
+ "queue_size_in_bytes": float64(1872391),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ "pipeline": string("main"),
+ "queue_type": string("persisted"),
+ },
+ )
+
+}
+
+func Test_Logstash5GatherJVMStats(test *testing.T) {
+ fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
+ writer.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(writer, "%s", string(logstash5JvmJSON))
+ }))
+ requestURL, err := url.Parse(logstashTest.URL)
+ if err != nil {
+ test.Logf("Can't connect to: %s", logstashTest.URL)
+ }
+ fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port()))
+ fakeServer.Start()
+ defer fakeServer.Close()
+
+ if logstashTest.client == nil {
+ client, err := logstashTest.createHttpClient()
+
+ if err != nil {
+ test.Logf("Can't createHttpClient")
+ }
+ logstashTest.client = client
+ }
+
+ if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats); err != nil {
+ test.Logf("Can't gather JVM stats")
+ }
+
+ logstash5accJVMStats.AssertContainsTaggedFields(
+ test,
+ "logstash_jvm",
+ map[string]interface{}{
+ "mem_pools_young_max_in_bytes": float64(5.5836672e+08),
+ "mem_pools_young_committed_in_bytes": float64(1.43261696e+08),
+ "mem_heap_committed_in_bytes": float64(5.1904512e+08),
+ "threads_count": float64(29.0),
+ "mem_pools_old_peak_used_in_bytes": float64(1.27900864e+08),
+ "mem_pools_old_peak_max_in_bytes": float64(7.2482816e+08),
+ "mem_heap_used_percent": float64(16.0),
+ "gc_collectors_young_collection_time_in_millis": float64(3235.0),
+ "mem_pools_survivor_committed_in_bytes": float64(1.7825792e+07),
+ "mem_pools_young_used_in_bytes": float64(7.6049384e+07),
+ "mem_non_heap_committed_in_bytes": float64(2.91487744e+08),
+ "mem_pools_survivor_peak_max_in_bytes": float64(3.4865152e+07),
+ "mem_pools_young_peak_max_in_bytes": float64(2.7918336e+08),
+ "uptime_in_millis": float64(4.803461e+06),
+ "mem_pools_survivor_peak_used_in_bytes": float64(8.912896e+06),
+ "mem_pools_survivor_max_in_bytes": float64(6.9730304e+07),
+ "gc_collectors_old_collection_count": float64(2.0),
+ "mem_pools_survivor_used_in_bytes": float64(9.419672e+06),
+ "mem_pools_old_used_in_bytes": float64(2.55801728e+08),
+ "mem_pools_old_max_in_bytes": float64(1.44965632e+09),
+ "mem_pools_young_peak_used_in_bytes": float64(7.1630848e+07),
+ "mem_heap_used_in_bytes": float64(3.41270784e+08),
+ "mem_heap_max_in_bytes": float64(2.077753344e+09),
+ "gc_collectors_young_collection_count": float64(616.0),
+ "threads_peak_count": float64(31.0),
+ "mem_pools_old_committed_in_bytes": float64(3.57957632e+08),
+ "gc_collectors_old_collection_time_in_millis": float64(114.0),
+ "mem_non_heap_used_in_bytes": float64(2.68905936e+08),
+ },
+ map[string]string{
+ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"),
+ "node_name": string("node-5-test"),
+ "source": string("node-5"),
+ "node_version": string("5.3.0"),
+ },
+ )
+
+}
+
+func Test_Logstash6GatherJVMStats(test *testing.T) {
+ fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
+ writer.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(writer, "%s", string(logstash6JvmJSON))
+ }))
+ requestURL, err := url.Parse(logstashTest.URL)
+ if err != nil {
+ test.Logf("Can't connect to: %s", logstashTest.URL)
+ }
+ fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port()))
+ fakeServer.Start()
+ defer fakeServer.Close()
+
+ if logstashTest.client == nil {
+ client, err := logstashTest.createHttpClient()
+
+ if err != nil {
+ test.Logf("Can't createHttpClient")
+ }
+ logstashTest.client = client
+ }
+
+ if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats); err != nil {
+ test.Logf("Can't gather JVM stats")
+ }
+
+ logstash6accJVMStats.AssertContainsTaggedFields(
+ test,
+ "logstash_jvm",
+ map[string]interface{}{
+ "mem_pools_young_max_in_bytes": float64(1605304320.0),
+ "mem_pools_young_committed_in_bytes": float64(71630848.0),
+ "mem_heap_committed_in_bytes": float64(824963072.0),
+ "threads_count": float64(60.0),
+ "mem_pools_old_peak_used_in_bytes": float64(696572600.0),
+ "mem_pools_old_peak_max_in_bytes": float64(6583418880.0),
+ "mem_heap_used_percent": float64(2.0),
+ "gc_collectors_young_collection_time_in_millis": float64(107321.0),
+ "mem_pools_survivor_committed_in_bytes": float64(8912896.0),
+ "mem_pools_young_used_in_bytes": float64(11775120.0),
+ "mem_non_heap_committed_in_bytes": float64(222986240.0),
+ "mem_pools_survivor_peak_max_in_bytes": float64(200605696),
+ "mem_pools_young_peak_max_in_bytes": float64(1605304320.0),
+ "uptime_in_millis": float64(281850926.0),
+ "mem_pools_survivor_peak_used_in_bytes": float64(8912896.0),
+ "mem_pools_survivor_max_in_bytes": float64(200605696.0),
+ "gc_collectors_old_collection_count": float64(37.0),
+ "mem_pools_survivor_used_in_bytes": float64(835008.0),
+ "mem_pools_old_used_in_bytes": float64(189750576.0),
+ "mem_pools_old_max_in_bytes": float64(6583418880.0),
+ "mem_pools_young_peak_used_in_bytes": float64(71630848.0),
+ "mem_heap_used_in_bytes": float64(202360704.0),
+ "mem_heap_max_in_bytes": float64(8389328896.0),
+ "gc_collectors_young_collection_count": float64(2094.0),
+ "threads_peak_count": float64(62.0),
+ "mem_pools_old_committed_in_bytes": float64(744419328.0),
+ "gc_collectors_old_collection_time_in_millis": float64(7492.0),
+ "mem_non_heap_used_in_bytes": float64(197878896.0),
+ },
+ map[string]string{
+ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"),
+ "node_name": string("node-6-test"),
+ "source": string("node-6"),
+ "node_version": string("6.4.2"),
+ },
+ )
+
+}
diff --git a/plugins/inputs/logstash/samples_logstash5.go b/plugins/inputs/logstash/samples_logstash5.go
new file mode 100644
index 0000000000000..598f6dab5e9df
--- /dev/null
+++ b/plugins/inputs/logstash/samples_logstash5.go
@@ -0,0 +1,156 @@
+package logstash
+
+const logstash5ProcessJSON = `
+{
+ "host" : "node-5",
+ "version" : "5.3.0",
+ "http_address" : "0.0.0.0:9600",
+ "id" : "a360d8cf-6289-429d-8419-6145e324b574",
+ "name" : "node-5-test",
+ "process" : {
+ "open_file_descriptors" : 89,
+ "peak_open_file_descriptors" : 100,
+ "max_file_descriptors" : 1048576,
+ "mem" : {
+ "total_virtual_in_bytes" : 4809506816
+ },
+ "cpu" : {
+ "total_in_millis" : 155260000000,
+ "percent" : 3,
+ "load_average" : {
+ "1m" : 0.49,
+ "5m" : 0.61,
+ "15m" : 0.54
+ }
+ }
+ }
+}
+`
+
+const logstash5JvmJSON = `
+{
+ "host" : "node-5",
+ "version" : "5.3.0",
+ "http_address" : "0.0.0.0:9600",
+ "id" : "a360d8cf-6289-429d-8419-6145e324b574",
+ "name" : "node-5-test",
+ "jvm" : {
+ "threads" : {
+ "count" : 29,
+ "peak_count" : 31
+ },
+ "mem" : {
+ "heap_used_in_bytes" : 341270784,
+ "heap_used_percent" : 16,
+ "heap_committed_in_bytes" : 519045120,
+ "heap_max_in_bytes" : 2077753344,
+ "non_heap_used_in_bytes" : 268905936,
+ "non_heap_committed_in_bytes" : 291487744,
+ "pools" : {
+ "survivor" : {
+ "peak_used_in_bytes" : 8912896,
+ "used_in_bytes" : 9419672,
+ "peak_max_in_bytes" : 34865152,
+ "max_in_bytes" : 69730304,
+ "committed_in_bytes" : 17825792
+ },
+ "old" : {
+ "peak_used_in_bytes" : 127900864,
+ "used_in_bytes" : 255801728,
+ "peak_max_in_bytes" : 724828160,
+ "max_in_bytes" : 1449656320,
+ "committed_in_bytes" : 357957632
+ },
+ "young" : {
+ "peak_used_in_bytes" : 71630848,
+ "used_in_bytes" : 76049384,
+ "peak_max_in_bytes" : 279183360,
+ "max_in_bytes" : 558366720,
+ "committed_in_bytes" : 143261696
+ }
+ }
+ },
+ "gc" : {
+ "collectors" : {
+ "old" : {
+ "collection_time_in_millis" : 114,
+ "collection_count" : 2
+ },
+ "young" : {
+ "collection_time_in_millis" : 3235,
+ "collection_count" : 616
+ }
+ }
+ },
+ "uptime_in_millis" : 4803461
+ }
+}
+`
+
+const logstash5PipelineJSON = `
+{
+ "host" : "node-5",
+ "version" : "5.3.0",
+ "http_address" : "0.0.0.0:9600",
+ "id" : "a360d8cf-6289-429d-8419-6145e324b574",
+ "name" : "node-5-test",
+ "pipeline" : {
+ "events" : {
+ "duration_in_millis" : 1151,
+ "in" : 1269,
+ "filtered" : 1269,
+ "out" : 1269
+ },
+ "plugins" : {
+ "inputs" : [ {
+ "id" : "a35197a509596954e905e38521bae12b1498b17d-1",
+ "events" : {
+ "out" : 2,
+ "queue_push_duration_in_millis" : 32
+ },
+ "name" : "beats"
+ } ],
+ "filters" : [ ],
+ "outputs" : [ {
+ "id" : "582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-3",
+ "events" : {
+ "duration_in_millis" : 228,
+ "in" : 1269,
+ "out" : 1269
+ },
+ "name" : "s3"
+ }, {
+ "id" : "582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-2",
+ "events" : {
+ "duration_in_millis" : 360,
+ "in" : 1269,
+ "out" : 1269
+ },
+ "name" : "stdout"
+ } ]
+ },
+ "reloads" : {
+ "last_error" : null,
+ "successes" : 0,
+ "last_success_timestamp" : null,
+ "last_failure_timestamp" : null,
+ "failures" : 0
+ },
+ "queue" : {
+ "events" : 208,
+ "type" : "persisted",
+ "capacity" : {
+ "page_capacity_in_bytes" : 262144000,
+ "max_queue_size_in_bytes" : 8589934592,
+ "max_unread_events" : 0
+ },
+ "data" : {
+ "path" : "/path/to/data/queue",
+ "free_space_in_bytes" : 89280552960,
+ "storage_type" : "hfs"
+ }
+ },
+ "id" : "main"
+ }
+}
+`
diff --git a/plugins/inputs/logstash/samples_logstash6.go b/plugins/inputs/logstash/samples_logstash6.go
new file mode 100644
index 0000000000000..16df2b0fdd33e
--- /dev/null
+++ b/plugins/inputs/logstash/samples_logstash6.go
@@ -0,0 +1,256 @@
+package logstash
+
+const logstash6ProcessJSON = `
+{
+ "host" : "node-6",
+ "version" : "6.4.2",
+ "http_address" : "127.0.0.1:9600",
+ "id" : "3044f675-21ce-4335-898a-8408aa678245",
+ "name" : "node-6-test",
+ "process" : {
+ "open_file_descriptors" : 133,
+ "peak_open_file_descriptors" : 145,
+ "max_file_descriptors" : 262144,
+ "mem" : {
+ "total_virtual_in_bytes" : 17923452928
+ },
+ "cpu" : {
+ "total_in_millis" : 5841460,
+ "percent" : 0,
+ "load_average" : {
+ "1m" : 48.2,
+ "5m" : 42.4,
+ "15m" : 38.95
+ }
+ }
+ }
+}
+`
+const logstash6JvmJSON = `
+{
+ "host" : "node-6",
+ "version" : "6.4.2",
+ "http_address" : "127.0.0.1:9600",
+ "id" : "3044f675-21ce-4335-898a-8408aa678245",
+ "name" : "node-6-test",
+ "jvm" : {
+ "threads" : {
+ "count" : 60,
+ "peak_count" : 62
+ },
+ "mem" : {
+ "heap_used_percent" : 2,
+ "heap_committed_in_bytes" : 824963072,
+ "heap_max_in_bytes" : 8389328896,
+ "heap_used_in_bytes" : 202360704,
+ "non_heap_used_in_bytes" : 197878896,
+ "non_heap_committed_in_bytes" : 222986240,
+ "pools" : {
+ "survivor" : {
+ "peak_used_in_bytes" : 8912896,
+ "used_in_bytes" : 835008,
+ "peak_max_in_bytes" : 200605696,
+ "max_in_bytes" : 200605696,
+ "committed_in_bytes" : 8912896
+ },
+ "old" : {
+ "peak_used_in_bytes" : 696572600,
+ "used_in_bytes" : 189750576,
+ "peak_max_in_bytes" : 6583418880,
+ "max_in_bytes" : 6583418880,
+ "committed_in_bytes" : 744419328
+ },
+ "young" : {
+ "peak_used_in_bytes" : 71630848,
+ "used_in_bytes" : 11775120,
+ "peak_max_in_bytes" : 1605304320,
+ "max_in_bytes" : 1605304320,
+ "committed_in_bytes" : 71630848
+ }
+ }
+ },
+ "gc" : {
+ "collectors" : {
+ "old" : {
+ "collection_time_in_millis" : 7492,
+ "collection_count" : 37
+ },
+ "young" : {
+ "collection_time_in_millis" : 107321,
+ "collection_count" : 2094
+ }
+ }
+ },
+ "uptime_in_millis" : 281850926
+ }
+}
+`
+
+const logstash6PipelinesJSON = `
+{
+ "host" : "node-6",
+ "version" : "6.4.2",
+ "http_address" : "127.0.0.1:9600",
+ "id" : "3044f675-21ce-4335-898a-8408aa678245",
+ "name" : "node-6-test",
+ "pipelines" : {
+ "main" : {
+ "events" : {
+ "duration_in_millis" : 8540751,
+ "in" : 180659,
+ "out" : 180659,
+ "filtered" : 180659,
+ "queue_push_duration_in_millis" : 366
+ },
+ "plugins" : {
+ "inputs" : [
+ {
+ "id" : "input-kafka",
+ "events" : {
+ "out" : 180659,
+ "queue_push_duration_in_millis" : 366
+ },
+ "name" : "kafka"
+ }
+ ],
+ "filters" : [
+ {
+ "id" : "155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54",
+ "events" : {
+ "duration_in_millis" : 2117,
+ "in" : 27641,
+ "out" : 27641
+ },
+ "name" : "mutate"
+ },
+ {
+ "id" : "d079424bb6b7b8c7c61d9c5e0ddae445e92fa9ffa2e8690b0a669f7c690542f0",
+ "events" : {
+ "duration_in_millis" : 13149,
+ "in" : 180659,
+ "out" : 177549
+ },
+ "matches" : 177546,
+ "failures" : 2,
+ "name" : "date"
+ },
+ {
+ "id" : "25afa60ab6dc30512fe80efa3493e4928b5b1b109765b7dc46a3e4bbf293d2d4",
+ "events" : {
+ "duration_in_millis" : 2814,
+ "in" : 76602,
+ "out" : 76602
+ },
+ "name" : "mutate"
+ },
+ {
+ "id" : "2d9fa8f74eeb137bfa703b8050bad7d76636fface729e4585b789b5fc9bed668",
+ "events" : {
+ "duration_in_millis" : 9,
+ "in" : 934,
+ "out" : 934
+ },
+ "name" : "mutate"
+ },
+ {
+ "id" : "4ed14c9ef0198afe16c31200041e98d321cb5c2e6027e30b077636b8c4842110",
+ "events" : {
+ "duration_in_millis" : 173,
+ "in" : 3110,
+ "out" : 0
+ },
+ "name" : "drop"
+ },
+ {
+ "id" : "358ce1eb387de7cd5711c2fb4de64cd3b12e5ca9a4c45f529516bcb053a31df4",
+ "events" : {
+ "duration_in_millis" : 5605,
+ "in" : 75482,
+ "out" : 75482
+ },
+ "name" : "mutate"
+ },
+ {
+ "id" : "82a9bbb02fff37a63c257c1f146b0a36273c7cbbebe83c0a51f086e5280bf7bb",
+ "events" : {
+ "duration_in_millis" : 313992,
+ "in" : 180659,
+ "out" : 180659
+ },
+ "name" : "csv"
+ },
+ {
+ "id" : "8fb13a8cdd4257b52724d326aa1549603ffdd4e4fde6d20720c96b16238c18c3",
+ "events" : {
+ "duration_in_millis" : 0,
+ "in" : 0,
+ "out" : 0
+ },
+ "name" : "mutate"
+ }
+ ],
+ "outputs" : [
+ {
+ "id" : "output-elk",
+ "documents" : {
+ "successes" : 221
+ },
+ "events" : {
+ "duration_in_millis" : 651386,
+ "in" : 177549,
+ "out" : 177549
+ },
+ "bulk_requests" : {
+ "successes" : 1,
+ "responses" : {
+ "200" : 748
+ }
+ },
+ "name" : "elasticsearch"
+ },
+ {
+ "id" : "output-kafka1",
+ "events" : {
+ "duration_in_millis" : 186751,
+ "in" : 177549,
+ "out" : 177549
+ },
+ "name" : "kafka"
+ },
+ {
+ "id" : "output-kafka2",
+ "events" : {
+ "duration_in_millis" : 7335196,
+ "in" : 177549,
+ "out" : 177549
+ },
+ "name" : "kafka"
+ }
+ ]
+ },
+ "reloads" : {
+ "last_error" : null,
+ "successes" : 0,
+ "last_success_timestamp" : null,
+ "last_failure_timestamp" : null,
+ "failures" : 0
+ },
+ "queue": {
+ "events": 103,
+ "type": "persisted",
+ "capacity": {
+ "queue_size_in_bytes": 1872391,
+ "page_capacity_in_bytes": 67108864,
+ "max_queue_size_in_bytes": 1073741824,
+ "max_unread_events": 0
+ },
+ "data": {
+ "path": "/var/lib/logstash/queue/main",
+ "free_space_in_bytes": 36307369984,
+ "storage_type": "ext4"
+ }
+ }
+ }
+ }
+}
+`
diff --git a/plugins/inputs/lustre2/README.md b/plugins/inputs/lustre2/README.md
new file mode 100644
index 0000000000000..dbdf58f73b257
--- /dev/null
+++ b/plugins/inputs/lustre2/README.md
@@ -0,0 +1,133 @@
+# Lustre Input Plugin
+
+The [Lustre][]® file system is an open-source, parallel file system that supports
+many requirements of leadership class HPC simulation environments.
+
+This plugin monitors the Lustre file system using its entries in the proc filesystem.
+
+### Configuration
+
+```toml
+# Read metrics from local Lustre service on OST, MDS
+[[inputs.lustre2]]
+ ## An array of /proc globs to search for Lustre stats
+ ## If not specified, the default will work on Lustre 2.5.x
+ ##
+ # ost_procfiles = [
+ # "/proc/fs/lustre/obdfilter/*/stats",
+ # "/proc/fs/lustre/osd-ldiskfs/*/stats",
+ # "/proc/fs/lustre/obdfilter/*/job_stats",
+ # ]
+ # mds_procfiles = [
+ # "/proc/fs/lustre/mdt/*/md_stats",
+ # "/proc/fs/lustre/mdt/*/job_stats",
+ # ]
+```
+
+### Metrics
+
+From `/proc/fs/lustre/obdfilter/*/stats` and `/proc/fs/lustre/osd-ldiskfs/*/stats`:
+
+- lustre2
+ - tags:
+ - name
+ - fields:
+ - write_bytes
+ - write_calls
+ - read_bytes
+ - read_calls
+ - cache_hit
+ - cache_miss
+ - cache_access
+
+From `/proc/fs/lustre/obdfilter/*/job_stats`:
+
+- lustre2
+ - tags:
+ - name
+ - jobid
+ - fields:
+ - jobstats_ost_getattr
+ - jobstats_ost_setattr
+ - jobstats_ost_sync
+ - jobstats_punch
+ - jobstats_destroy
+ - jobstats_create
+ - jobstats_ost_statfs
+ - jobstats_get_info
+ - jobstats_set_info
+ - jobstats_quotactl
+ - jobstats_read_bytes
+ - jobstats_read_calls
+ - jobstats_read_max_size
+ - jobstats_read_min_size
+ - jobstats_write_bytes
+ - jobstats_write_calls
+ - jobstats_write_max_size
+ - jobstats_write_min_size
+
+From `/proc/fs/lustre/mdt/*/md_stats`:
+
+- lustre2
+ - tags:
+ - name
+ - fields:
+ - open
+ - close
+ - mknod
+ - link
+ - unlink
+ - mkdir
+ - rmdir
+ - rename
+ - getattr
+ - setattr
+ - getxattr
+ - setxattr
+ - statfs
+ - sync
+ - samedir_rename
+ - crossdir_rename
+
+From `/proc/fs/lustre/mdt/*/job_stats`:
+
+- lustre2
+ - tags:
+ - name
+ - jobid
+ - fields:
+ - jobstats_close
+ - jobstats_crossdir_rename
+ - jobstats_getattr
+ - jobstats_getxattr
+ - jobstats_link
+ - jobstats_mkdir
+ - jobstats_mknod
+ - jobstats_open
+ - jobstats_rename
+ - jobstats_rmdir
+ - jobstats_samedir_rename
+ - jobstats_setattr
+ - jobstats_setxattr
+ - jobstats_statfs
+ - jobstats_sync
+ - jobstats_unlink
+
+
+### Troubleshooting
+
+Check for the default or custom procfiles in the proc filesystem, and reference
+the [Lustre Monitoring and Statistics Guide][guide]. This plugin does not
+report all information from these files, only a limited set of items
+corresponding to the above metric fields.
+
+### Example Output
+
+```
+lustre2,host=oss2,jobid=42990218,name=wrk-OST0041 jobstats_ost_setattr=0i,jobstats_ost_sync=0i,jobstats_punch=0i,jobstats_read_bytes=4096i,jobstats_read_calls=1i,jobstats_read_max_size=4096i,jobstats_read_min_size=4096i,jobstats_write_bytes=310206488i,jobstats_write_calls=7423i,jobstats_write_max_size=53048i,jobstats_write_min_size=8820i 1556525847000000000
+lustre2,host=mds1,jobid=42992017,name=wrk-MDT0000 jobstats_close=31798i,jobstats_crossdir_rename=0i,jobstats_getattr=34146i,jobstats_getxattr=15i,jobstats_link=0i,jobstats_mkdir=658i,jobstats_mknod=0i,jobstats_open=31797i,jobstats_rename=0i,jobstats_rmdir=0i,jobstats_samedir_rename=0i,jobstats_setattr=1788i,jobstats_setxattr=0i,jobstats_statfs=0i,jobstats_sync=0i,jobstats_unlink=0i 1556525828000000000
+
+```
+
+[lustre]: http://lustre.org/
+[guide]: http://wiki.lustre.org/Lustre_Monitoring_and_Statistics_Guide
diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go
index 8ef9223b53a34..611ba294dbc5c 100644
--- a/plugins/inputs/lustre2/lustre2.go
+++ b/plugins/inputs/lustre2/lustre2.go
@@ -9,23 +9,27 @@ for HPC environments. It stores statistics about its activity in
package lustre2
import (
+ "io/ioutil"
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
+type tags struct {
+ name, job string
+}
+
// Lustre proc files can change between versions, so we want to future-proof
// by letting people choose what to look at.
type Lustre2 struct {
- Ost_procfiles []string
- Mds_procfiles []string
+ Ost_procfiles []string `toml:"ost_procfiles"`
+ Mds_procfiles []string `toml:"mds_procfiles"`
// allFields maps and OST name to the metric fields associated with that OST
- allFields map[string]map[string]interface{}
+ allFields map[tags]map[string]interface{}
}
var sampleConfig = `
@@ -353,7 +357,7 @@ var wanted_mdt_jobstats_fields = []*mapping{
},
}
-func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc telegraf.Accumulator) error {
+func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, acc telegraf.Accumulator) error {
files, err := filepath.Glob(fileglob)
if err != nil {
return err
@@ -362,48 +366,61 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping,
for _, file := range files {
/* Turn /proc/fs/lustre/obdfilter//stats and similar
* into just the object store target name
- * Assumpion: the target name is always second to last,
+ * Assumption: the target name is always second to last,
* which is true in Lustre 2.1->2.8
*/
path := strings.Split(file, "/")
name := path[len(path)-2]
- var fields map[string]interface{}
- fields, ok := l.allFields[name]
- if !ok {
- fields = make(map[string]interface{})
- l.allFields[name] = fields
- }
- lines, err := internal.ReadLines(file)
+ //lines, err := internal.ReadLines(file)
+ wholeFile, err := ioutil.ReadFile(file)
if err != nil {
return err
}
+ jobs := strings.Split(string(wholeFile), "- ")
+ for _, job := range jobs {
+ lines := strings.Split(string(job), "\n")
+ jobid := ""
- for _, line := range lines {
- parts := strings.Fields(line)
- if strings.HasPrefix(line, "- job_id:") {
- // Set the job_id explicitly if present
- fields["jobid"] = parts[2]
+ // figure out if the data should be tagged with job_id here
+ parts := strings.Fields(lines[0])
+ if strings.TrimSuffix(parts[0], ":") == "job_id" {
+ jobid = parts[1]
}
- for _, wanted := range wanted_fields {
- var data uint64
- if strings.TrimSuffix(parts[0], ":") == wanted.inProc {
- wanted_field := wanted.field
- // if not set, assume field[1]. Shouldn't be field[0], as
- // that's a string
- if wanted_field == 0 {
- wanted_field = 1
- }
- data, err = strconv.ParseUint(strings.TrimSuffix((parts[wanted_field]), ","), 10, 64)
- if err != nil {
- return err
- }
- report_name := wanted.inProc
- if wanted.reportAs != "" {
- report_name = wanted.reportAs
+ for _, line := range lines {
+ // skip any empty lines
+ if len(line) < 1 {
+ continue
+ }
+ parts := strings.Fields(line)
+
+ var fields map[string]interface{}
+ fields, ok := l.allFields[tags{name, jobid}]
+ if !ok {
+ fields = make(map[string]interface{})
+ l.allFields[tags{name, jobid}] = fields
+ }
+
+ for _, wanted := range wantedFields {
+ var data uint64
+ if strings.TrimSuffix(parts[0], ":") == wanted.inProc {
+ wantedField := wanted.field
+ // if not set, assume field[1]. Shouldn't be field[0], as
+ // that's a string
+ if wantedField == 0 {
+ wantedField = 1
+ }
+ data, err = strconv.ParseUint(strings.TrimSuffix((parts[wantedField]), ","), 10, 64)
+ if err != nil {
+ return err
+ }
+ reportName := wanted.inProc
+ if wanted.reportAs != "" {
+ reportName = wanted.reportAs
+ }
+ fields[reportName] = data
}
- fields[report_name] = data
}
}
}
@@ -423,7 +440,8 @@ func (l *Lustre2) Description() string {
// Gather reads stats from all lustre targets
func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
- l.allFields = make(map[string]map[string]interface{})
+ //l.allFields = make(map[string]map[string]interface{})
+ l.allFields = make(map[tags]map[string]interface{})
if len(l.Ost_procfiles) == 0 {
// read/write bytes are in obdfilter//stats
@@ -483,15 +501,13 @@ func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
}
}
- for name, fields := range l.allFields {
+ for tgs, fields := range l.allFields {
+
tags := map[string]string{
- "name": name,
+ "name": tgs.name,
}
- if _, ok := fields["jobid"]; ok {
- if jobid, ok := fields["jobid"].(string); ok {
- tags["jobid"] = jobid
- }
- delete(fields, "jobid")
+ if len(tgs.job) > 0 {
+ tags["jobid"] = tgs.job
}
acc.AddFields("lustre2", fields, tags)
}
diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go
index 5cc9c0e435a64..8e93da8e81726 100644
--- a/plugins/inputs/lustre2/lustre2_test.go
+++ b/plugins/inputs/lustre2/lustre2_test.go
@@ -6,6 +6,9 @@ import (
"testing"
"github.com/influxdata/telegraf/testutil"
+ "github.com/influxdata/toml"
+ "github.com/influxdata/toml/ast"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -39,7 +42,7 @@ cache_miss 11653333250 samples [pages] 1 1 11653333250
`
const obdfilterJobStatsContents = `job_stats:
-- job_id: testjob1
+- job_id: cluster-testjob1
snapshot_time: 1461772761
read_bytes: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 }
write_bytes: { samples: 25, unit: bytes, min: 1048576, max: 1048576, sum: 26214400 }
@@ -53,6 +56,20 @@ const obdfilterJobStatsContents = `job_stats:
get_info: { samples: 0, unit: reqs }
set_info: { samples: 0, unit: reqs }
quotactl: { samples: 0, unit: reqs }
+- job_id: testjob2
+ snapshot_time: 1461772761
+ read_bytes: { samples: 1, unit: bytes, min: 1024, max: 1024, sum: 1024 }
+ write_bytes: { samples: 25, unit: bytes, min: 2048, max: 2048, sum: 51200 }
+ getattr: { samples: 0, unit: reqs }
+ setattr: { samples: 0, unit: reqs }
+ punch: { samples: 1, unit: reqs }
+ sync: { samples: 0, unit: reqs }
+ destroy: { samples: 0, unit: reqs }
+ create: { samples: 0, unit: reqs }
+ statfs: { samples: 0, unit: reqs }
+ get_info: { samples: 0, unit: reqs }
+ set_info: { samples: 0, unit: reqs }
+ quotactl: { samples: 0, unit: reqs }
`
const mdtProcContents = `snapshot_time 1438693238.20113 secs.usecs
@@ -75,7 +92,7 @@ crossdir_rename 369571 samples [reqs]
`
const mdtJobStatsContents = `job_stats:
-- job_id: testjob1
+- job_id: cluster-testjob1
snapshot_time: 1461772761
open: { samples: 5, unit: reqs }
close: { samples: 4, unit: reqs }
@@ -93,6 +110,24 @@ const mdtJobStatsContents = `job_stats:
sync: { samples: 2, unit: reqs }
samedir_rename: { samples: 705, unit: reqs }
crossdir_rename: { samples: 200, unit: reqs }
+- job_id: testjob2
+ snapshot_time: 1461772761
+ open: { samples: 6, unit: reqs }
+ close: { samples: 7, unit: reqs }
+ mknod: { samples: 8, unit: reqs }
+ link: { samples: 9, unit: reqs }
+ unlink: { samples: 20, unit: reqs }
+ mkdir: { samples: 200, unit: reqs }
+ rmdir: { samples: 210, unit: reqs }
+ rename: { samples: 8, unit: reqs }
+ getattr: { samples: 10, unit: reqs }
+ setattr: { samples: 2, unit: reqs }
+ getxattr: { samples: 4, unit: reqs }
+ setxattr: { samples: 5, unit: reqs }
+ statfs: { samples: 1207, unit: reqs }
+ sync: { samples: 3, unit: reqs }
+ samedir_rename: { samples: 706, unit: reqs }
+ crossdir_rename: { samples: 201, unit: reqs }
`
func TestLustre2GeneratesMetrics(t *testing.T) {
@@ -172,7 +207,7 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
ost_name := "OST0001"
- job_name := "testjob1"
+ job_names := []string{"cluster-testjob1", "testjob2"}
mdtdir := tempdir + "/mdt/"
err := os.MkdirAll(mdtdir+"/"+ost_name, 0755)
@@ -199,12 +234,23 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
err = m.Gather(&acc)
require.NoError(t, err)
- tags := map[string]string{
- "name": ost_name,
- "jobid": job_name,
+ // make this two tags
+ // and even further make this dependent on summing per OST
+ tags := []map[string]string{
+ {
+ "name": ost_name,
+ "jobid": job_names[0],
+ },
+ {
+ "name": ost_name,
+ "jobid": job_names[1],
+ },
}
- fields := map[string]interface{}{
+ // make this for two tags
+ var fields []map[string]interface{}
+
+ fields = append(fields, map[string]interface{}{
"jobstats_read_calls": uint64(1),
"jobstats_read_min_size": uint64(4096),
"jobstats_read_max_size": uint64(4096),
@@ -239,10 +285,86 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
"jobstats_sync": uint64(2),
"jobstats_samedir_rename": uint64(705),
"jobstats_crossdir_rename": uint64(200),
+ })
+
+ fields = append(fields, map[string]interface{}{
+ "jobstats_read_calls": uint64(1),
+ "jobstats_read_min_size": uint64(1024),
+ "jobstats_read_max_size": uint64(1024),
+ "jobstats_read_bytes": uint64(1024),
+ "jobstats_write_calls": uint64(25),
+ "jobstats_write_min_size": uint64(2048),
+ "jobstats_write_max_size": uint64(2048),
+ "jobstats_write_bytes": uint64(51200),
+ "jobstats_ost_getattr": uint64(0),
+ "jobstats_ost_setattr": uint64(0),
+ "jobstats_punch": uint64(1),
+ "jobstats_ost_sync": uint64(0),
+ "jobstats_destroy": uint64(0),
+ "jobstats_create": uint64(0),
+ "jobstats_ost_statfs": uint64(0),
+ "jobstats_get_info": uint64(0),
+ "jobstats_set_info": uint64(0),
+ "jobstats_quotactl": uint64(0),
+ "jobstats_open": uint64(6),
+ "jobstats_close": uint64(7),
+ "jobstats_mknod": uint64(8),
+ "jobstats_link": uint64(9),
+ "jobstats_unlink": uint64(20),
+ "jobstats_mkdir": uint64(200),
+ "jobstats_rmdir": uint64(210),
+ "jobstats_rename": uint64(8),
+ "jobstats_getattr": uint64(10),
+ "jobstats_setattr": uint64(2),
+ "jobstats_getxattr": uint64(4),
+ "jobstats_setxattr": uint64(5),
+ "jobstats_statfs": uint64(1207),
+ "jobstats_sync": uint64(3),
+ "jobstats_samedir_rename": uint64(706),
+ "jobstats_crossdir_rename": uint64(201),
+ })
+
+ for index := 0; index < len(fields); index++ {
+ acc.AssertContainsTaggedFields(t, "lustre2", fields[index], tags[index])
}
- acc.AssertContainsTaggedFields(t, "lustre2", fields, tags)
+ // run this over both tags
err = os.RemoveAll(os.TempDir() + "/telegraf")
require.NoError(t, err)
}
+
+func TestLustre2CanParseConfiguration(t *testing.T) {
+ config := []byte(`
+[[inputs.lustre2]]
+ ost_procfiles = [
+ "/proc/fs/lustre/obdfilter/*/stats",
+ "/proc/fs/lustre/osd-ldiskfs/*/stats",
+ ]
+ mds_procfiles = [
+ "/proc/fs/lustre/mdt/*/md_stats",
+ ]`)
+
+ table, err := toml.Parse([]byte(config))
+ require.NoError(t, err)
+
+ inputs, ok := table.Fields["inputs"]
+ require.True(t, ok)
+
+ lustre2, ok := inputs.(*ast.Table).Fields["lustre2"]
+ require.True(t, ok)
+
+ var plugin Lustre2
+
+ require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin))
+
+ assert.Equal(t, Lustre2{
+ Ost_procfiles: []string{
+ "/proc/fs/lustre/obdfilter/*/stats",
+ "/proc/fs/lustre/osd-ldiskfs/*/stats",
+ },
+ Mds_procfiles: []string{
+ "/proc/fs/lustre/mdt/*/md_stats",
+ },
+ }, plugin)
+}
diff --git a/plugins/inputs/mailchimp/README.md b/plugins/inputs/mailchimp/README.md
new file mode 100644
index 0000000000000..46750f6fc5efa
--- /dev/null
+++ b/plugins/inputs/mailchimp/README.md
@@ -0,0 +1,59 @@
+# Mailchimp Input Plugin
+
+Pulls campaign reports from the [Mailchimp API](https://developer.mailchimp.com/).
+
+### Configuration
+
+This section contains the default TOML to configure the plugin. You can
+generate it using `telegraf --usage mailchimp`.
+
+```toml
+[[inputs.mailchimp]]
+ ## MailChimp API key
+ ## get from https://admin.mailchimp.com/account/api/
+ api_key = "" # required
+
+ ## Reports for campaigns sent more than days_old ago will not be collected.
+ ## 0 means collect all and is the default value.
+ days_old = 0
+
+ ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
+ # campaign_id = ""
+```
+
+### Metrics
+
+- mailchimp
+ - tags:
+ - id
+ - campaign_title
+ - fields:
+ - emails_sent (integer, emails)
+ - abuse_reports (integer, reports)
+ - unsubscribed (integer, unsubscribes)
+ - hard_bounces (integer, emails)
+ - soft_bounces (integer, emails)
+ - syntax_errors (integer, errors)
+ - forwards_count (integer, emails)
+ - forwards_opens (integer, emails)
+ - opens_total (integer, emails)
+ - unique_opens (integer, emails)
+ - open_rate (double, percentage)
+ - clicks_total (integer, clicks)
+ - unique_clicks (integer, clicks)
+ - unique_subscriber_clicks (integer, clicks)
+ - click_rate (double, percentage)
+ - facebook_recipient_likes (integer, likes)
+ - facebook_unique_likes (integer, likes)
+ - facebook_likes (integer, likes)
+ - industry_type (string, type)
+ - industry_open_rate (double, percentage)
+ - industry_click_rate (double, percentage)
+ - industry_bounce_rate (double, percentage)
+ - industry_unopen_rate (double, percentage)
+ - industry_unsub_rate (double, percentage)
+ - industry_abuse_rate (double, percentage)
+ - list_stats_sub_rate (double, percentage)
+ - list_stats_unsub_rate (double, percentage)
+ - list_stats_open_rate (double, percentage)
+ - list_stats_click_rate (double, percentage)
diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go
index db0004ce2264f..a40614b1d0f7e 100644
--- a/plugins/inputs/mailchimp/chimp_api.go
+++ b/plugins/inputs/mailchimp/chimp_api.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "io"
"io/ioutil"
"log"
"net/http"
@@ -134,7 +135,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
req.URL.RawQuery = params.String()
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
if api.Debug {
- log.Printf("D! Request URL: %s", req.URL.String())
+ log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String())
}
resp, err := client.Do(req)
@@ -143,12 +144,18 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
}
defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
+ body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200))
+ return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body)
+ }
+
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if api.Debug {
- log.Printf("D! Response Body:%s", string(body))
+ log.Printf("D! [inputs.mailchimp] response Body: %q", string(body))
}
if err = chimpErrorCheck(body); err != nil {
@@ -171,7 +178,7 @@ type Report struct {
Unsubscribed int `json:"unsubscribed"`
SendTime string `json:"send_time"`
- TimeSeries []TimeSerie
+ TimeSeries []TimeSeries
Bounces Bounces `json:"bounces"`
Forwards Forwards `json:"forwards"`
Opens Opens `json:"opens"`
@@ -230,7 +237,7 @@ type ListStats struct {
ClickRate float64 `json:"click_rate"`
}
-type TimeSerie struct {
+type TimeSeries struct {
TimeStamp string `json:"timestamp"`
EmailsSent int `json:"emails_sent"`
UniqueOpens int `json:"unique_opens"`
diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go
index ed6898e6029e0..0c4dab56d5d12 100644
--- a/plugins/inputs/mailchimp/mailchimp_test.go
+++ b/plugins/inputs/mailchimp/mailchimp_test.go
@@ -140,7 +140,7 @@ func TestMailChimpGatherReport(t *testing.T) {
}
-func TestMailChimpGatherErroror(t *testing.T) {
+func TestMailChimpGatherError(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
diff --git a/plugins/inputs/marklogic/README.md b/plugins/inputs/marklogic/README.md
new file mode 100644
index 0000000000000..7feb4a10d9d04
--- /dev/null
+++ b/plugins/inputs/marklogic/README.md
@@ -0,0 +1,64 @@
+# MarkLogic Input Plugin
+
+The MarkLogic Telegraf plugin gathers health status metrics from one or more host.
+
+### Configuration:
+
+```toml
+[[inputs.marklogic]]
+ ## Base URL of the MarkLogic HTTP Server.
+ url = "http://localhost:8002"
+
+ ## List of specific hostnames to retrieve information. At least (1) required.
+ # hosts = ["hostname1", "hostname2"]
+
+ ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges
+ # username = "myuser"
+ # password = "mypassword"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+```
+
+### Metrics
+
+- marklogic
+ - tags:
+ - source (the hostname of the server address, ex. `ml1.local`)
+ - id (the host node unique id ex. `2592913110757471141`)
+ - fields:
+ - online
+ - total_load
+ - total_rate
+ - ncpus
+ - ncores
+ - total_cpu_stat_user
+ - total_cpu_stat_system
+ - total_cpu_stat_idle
+ - total_cpu_stat_iowait
+ - memory_process_size
+ - memory_process_rss
+ - memory_system_total
+ - memory_system_free
+ - memory_process_swap_size
+ - memory_size
+ - host_size
+ - log_device_space
+ - data_dir_space
+ - query_read_bytes
+ - query_read_load
+ - merge_read_bytes
+ - merge_write_load
+ - http_server_receive_bytes
+ - http_server_send_bytes
+
+### Example Output:
+
+```
+$> marklogic,host=localhost,id=2592913110757471141,source=ml1.local total_cpu_stat_iowait=0.0125649003311992,memory_process_swap_size=0i,host_size=380i,data_dir_space=28216i,query_read_load=0i,ncpus=1i,log_device_space=28216i,query_read_bytes=13947332i,merge_write_load=0i,http_server_receive_bytes=225893i,online=true,ncores=4i,total_cpu_stat_user=0.150778993964195,total_cpu_stat_system=0.598927974700928,total_cpu_stat_idle=99.2210006713867,memory_system_total=3947i,memory_system_free=2669i,memory_size=4096i,total_rate=14.7697010040283,http_server_send_bytes=0i,memory_process_size=903i,memory_process_rss=486i,merge_read_load=0i,total_load=0.00502600101754069 1566373000000000000
+
+```
diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go
new file mode 100644
index 0000000000000..b350466122dc7
--- /dev/null
+++ b/plugins/inputs/marklogic/marklogic.go
@@ -0,0 +1,260 @@
+package marklogic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// Marklogic configuration toml
+type Marklogic struct {
+ URL string `toml:"url"`
+ Hosts []string `toml:"hosts"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ Sources []string
+
+ tls.ClientConfig
+
+ client *http.Client
+}
+
+type MlPointInt struct {
+ Value int `json:"value"`
+}
+
+type MlPointFloat struct {
+ Value float64 `json:"value"`
+}
+
+type MlPointBool struct {
+ Value bool `json:"value"`
+}
+
+// MarkLogic v2 management api endpoints for hosts status
+const statsPath = "/manage/v2/hosts/"
+const viewFormat = "view=status&format=json"
+
+type MlHost struct {
+ HostStatus struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ StatusProperties struct {
+ Online MlPointBool `json:"online"`
+ LoadProperties struct {
+ TotalLoad MlPointFloat `json:"total-load"`
+ } `json:"load-properties"`
+ RateProperties struct {
+ TotalRate MlPointFloat `json:"total-rate"`
+ } `json:"rate-properties"`
+ StatusDetail struct {
+ Cpus MlPointInt `json:"cpus"`
+ Cores MlPointInt `json:"cores"`
+ TotalCPUStatUser float64 `json:"total-cpu-stat-user"`
+ TotalCPUStatSystem float64 `json:"total-cpu-stat-system"`
+ TotalCPUStatIdle float64 `json:"total-cpu-stat-idle"`
+ TotalCPUStatIowait float64 `json:"total-cpu-stat-iowait"`
+ MemoryProcessSize MlPointInt `json:"memory-process-size"`
+ MemoryProcessRss MlPointInt `json:"memory-process-rss"`
+ MemorySystemTotal MlPointInt `json:"memory-system-total"`
+ MemorySystemFree MlPointInt `json:"memory-system-free"`
+ MemoryProcessSwapSize MlPointInt `json:"memory-process-swap-size"`
+ MemorySize MlPointInt `json:"memory-size"`
+ HostSize MlPointInt `json:"host-size"`
+ LogDeviceSpace MlPointInt `json:"log-device-space"`
+ DataDirSpace MlPointInt `json:"data-dir-space"`
+ QueryReadBytes MlPointInt `json:"query-read-bytes"`
+ QueryReadLoad MlPointInt `json:"query-read-load"`
+ MergeReadLoad MlPointInt `json:"merge-read-load"`
+ MergeWriteLoad MlPointInt `json:"merge-write-load"`
+ HTTPServerReceiveBytes MlPointInt `json:"http-server-receive-bytes"`
+ HTTPServerSendBytes MlPointInt `json:"http-server-send-bytes"`
+ } `json:"status-detail"`
+ } `json:"status-properties"`
+ } `json:"host-status"`
+}
+
+// Description of plugin returned
+func (c *Marklogic) Description() string {
+ return "Retrieves information on a specific host in a MarkLogic Cluster"
+}
+
+var sampleConfig = `
+ ## Base URL of the MarkLogic HTTP Server.
+ url = "http://localhost:8002"
+
+ ## List of specific hostnames to retrieve information. At least (1) required.
+ # hosts = ["hostname1", "hostname2"]
+
+ ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges
+ # username = "myuser"
+ # password = "mypassword"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+`
+
+// Init parse all source URLs and place on the Marklogic struct
+func (c *Marklogic) Init() error {
+
+ if len(c.URL) == 0 {
+ c.URL = "http://localhost:8002/"
+ }
+
+ for _, u := range c.Hosts {
+ base, err := url.Parse(c.URL)
+ if err != nil {
+ return err
+ }
+
+ base.Path = path.Join(base.Path, statsPath, u)
+ addr := base.ResolveReference(base)
+
+ addr.RawQuery = viewFormat
+ u := addr.String()
+ c.Sources = append(c.Sources, u)
+ }
+ return nil
+}
+
+// SampleConfig to gather stats from localhost, default port.
+func (c *Marklogic) SampleConfig() string {
+ return sampleConfig
+}
+
+// Gather metrics from HTTP Server.
+func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error {
+ var wg sync.WaitGroup
+
+ if c.client == nil {
+ client, err := c.createHTTPClient()
+
+ if err != nil {
+ return err
+ }
+ c.client = client
+ }
+
+ // Range over all source URL's appended to the struct
+ for _, serv := range c.Sources {
+ //fmt.Printf("Encoded URL is %q\n", serv)
+ wg.Add(1)
+ go func(serv string) {
+ defer wg.Done()
+ if err := c.fetchAndInsertData(accumulator, serv); err != nil {
+ accumulator.AddError(fmt.Errorf("[host=%s]: %s", serv, err))
+ }
+ }(serv)
+ }
+
+ wg.Wait()
+
+ return nil
+}
+
+func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, url string) error {
+ ml := &MlHost{}
+ if err := c.gatherJSONData(url, ml); err != nil {
+ return err
+ }
+
+ // Build a map of tags
+ tags := map[string]string{
+ "source": ml.HostStatus.Name,
+ "id": ml.HostStatus.ID,
+ }
+
+ // Build a map of field values
+ fields := map[string]interface{}{
+ "online": ml.HostStatus.StatusProperties.Online.Value,
+ "total_load": ml.HostStatus.StatusProperties.LoadProperties.TotalLoad.Value,
+ "total_rate": ml.HostStatus.StatusProperties.RateProperties.TotalRate.Value,
+ "ncpus": ml.HostStatus.StatusProperties.StatusDetail.Cpus.Value,
+ "ncores": ml.HostStatus.StatusProperties.StatusDetail.Cores.Value,
+ "total_cpu_stat_user": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatUser,
+ "total_cpu_stat_system": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatSystem,
+ "total_cpu_stat_idle": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatIdle,
+ "total_cpu_stat_iowait": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatIowait,
+ "memory_process_size": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessSize.Value,
+ "memory_process_rss": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessRss.Value,
+ "memory_system_total": ml.HostStatus.StatusProperties.StatusDetail.MemorySystemTotal.Value,
+ "memory_system_free": ml.HostStatus.StatusProperties.StatusDetail.MemorySystemFree.Value,
+ "memory_process_swap_size": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessSwapSize.Value,
+ "memory_size": ml.HostStatus.StatusProperties.StatusDetail.MemorySize.Value,
+ "host_size": ml.HostStatus.StatusProperties.StatusDetail.HostSize.Value,
+ "log_device_space": ml.HostStatus.StatusProperties.StatusDetail.LogDeviceSpace.Value,
+ "data_dir_space": ml.HostStatus.StatusProperties.StatusDetail.DataDirSpace.Value,
+ "query_read_bytes": ml.HostStatus.StatusProperties.StatusDetail.QueryReadBytes.Value,
+ "query_read_load": ml.HostStatus.StatusProperties.StatusDetail.QueryReadLoad.Value,
+ "merge_read_load": ml.HostStatus.StatusProperties.StatusDetail.MergeReadLoad.Value,
+ "merge_write_load": ml.HostStatus.StatusProperties.StatusDetail.MergeWriteLoad.Value,
+ "http_server_receive_bytes": ml.HostStatus.StatusProperties.StatusDetail.HTTPServerReceiveBytes.Value,
+ "http_server_send_bytes": ml.HostStatus.StatusProperties.StatusDetail.HTTPServerSendBytes.Value,
+ }
+
+ // Accumulate the tags and values
+ acc.AddFields("marklogic", fields, tags)
+
+ return nil
+}
+
+func (c *Marklogic) createHTTPClient() (*http.Client, error) {
+ tlsCfg, err := c.ClientConfig.TLSConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ },
+ Timeout: time.Duration(5 * time.Second),
+ }
+
+ return client, nil
+}
+
+func (c *Marklogic) gatherJSONData(url string, v interface{}) error {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ if c.Username != "" || c.Password != "" {
+ req.SetBasicAuth(c.Username, c.Password)
+ }
+
+ response, err := c.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusOK {
+ return fmt.Errorf("marklogic: API responded with status-code %d, expected %d",
+ response.StatusCode, http.StatusOK)
+ }
+
+ if err = json.NewDecoder(response.Body).Decode(v); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func init() {
+ inputs.Add("marklogic", func() telegraf.Input {
+ return &Marklogic{}
+ })
+}
diff --git a/plugins/inputs/marklogic/marklogic_test.go b/plugins/inputs/marklogic/marklogic_test.go
new file mode 100644
index 0000000000000..34e4bbd6bb7e9
--- /dev/null
+++ b/plugins/inputs/marklogic/marklogic_test.go
@@ -0,0 +1,1282 @@
+package marklogic
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMarklogic(t *testing.T) {
+ // Create a test server with the const response JSON
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w, response)
+ }))
+ defer ts.Close()
+
+ // Parse the URL of the test server, used to verify the expected host
+ _, err := url.Parse(ts.URL)
+ require.NoError(t, err)
+
+ // Create a new Marklogic instance with our given test server
+
+ ml := &Marklogic{
+ Hosts: []string{"example1"},
+ URL: string(ts.URL),
+ //Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"},
+ }
+
+ // Create a test accumulator
+ acc := &testutil.Accumulator{}
+
+ // Init() call to parse all source URL's
+ err = ml.Init()
+ require.NoError(t, err)
+
+ // Gather data from the test server
+ err = ml.Gather(acc)
+ require.NoError(t, err)
+
+ // Expect the correct values for all known keys
+ expectFields := map[string]interface{}{
+ "online": true,
+ "total_load": 0.00429263804107904,
+ "ncpus": 1,
+ "ncores": 4,
+ "total_rate": 15.6527042388916,
+ "total_cpu_stat_user": 0.276381999254227,
+ "total_cpu_stat_system": 0.636515974998474,
+ "total_cpu_stat_idle": 99.0578002929688,
+ "total_cpu_stat_iowait": 0.0125628001987934,
+ "memory_process_size": 1234,
+ "memory_process_rss": 815,
+ "memory_system_total": 3947,
+ "memory_system_free": 2761,
+ "memory_process_swap_size": 0,
+ "memory_size": 4096,
+ "host_size": 64,
+ "log_device_space": 34968,
+ "data_dir_space": 34968,
+ "query_read_bytes": 11492428,
+ "query_read_load": 0,
+ "merge_read_load": 0,
+ "merge_write_load": 0,
+ "http_server_receive_bytes": 285915,
+ "http_server_send_bytes": 0,
+ }
+ // Expect the correct values for all tags
+ expectTags := map[string]string{
+ "source": "ml1.local",
+ "id": "2592913110757471141",
+ }
+
+ acc.AssertContainsTaggedFields(t, "marklogic", expectFields, expectTags)
+
+}
+
+var response = `
+{
+ "host-status": {
+ "id": "2592913110757471141",
+ "name": "ml1.local",
+ "version": "10.0-1",
+ "effective-version": 10000100,
+ "host-mode": "normal",
+ "host-mode-description": "",
+ "meta": {
+ "uri": "/manage/v2/hosts/ml1.local?view=status",
+ "current-time": "2019-07-28T22:32:19.056203Z",
+ "elapsed-time": {
+ "units": "sec",
+ "value": 0.013035
+ }
+ },
+ "relations": {
+ "relation-group": [
+ {
+ "uriref": "/manage/v2/forests?view=status&host-id=ml1.local",
+ "typeref": "forests",
+ "relation": [
+ {
+ "uriref": "/manage/v2/forests/App-Services",
+ "idref": "8573569457346659714",
+ "nameref": "App-Services"
+ },
+ {
+ "uriref": "/manage/v2/forests/Documents",
+ "idref": "17189472171231792168",
+ "nameref": "Documents"
+ },
+ {
+ "uriref": "/manage/v2/forests/Extensions",
+ "idref": "1510244530748962553",
+ "nameref": "Extensions"
+ },
+ {
+ "uriref": "/manage/v2/forests/Fab",
+ "idref": "16221965829238302106",
+ "nameref": "Fab"
+ },
+ {
+ "uriref": "/manage/v2/forests/Last-Login",
+ "idref": "1093671762706318022",
+ "nameref": "Last-Login"
+ },
+ {
+ "uriref": "/manage/v2/forests/Meters",
+ "idref": "1573439446779995954",
+ "nameref": "Meters"
+ },
+ {
+ "uriref": "/manage/v2/forests/Modules",
+ "idref": "18320951141685848719",
+ "nameref": "Modules"
+ },
+ {
+ "uriref": "/manage/v2/forests/Schemas",
+ "idref": "18206720449696085936",
+ "nameref": "Schemas"
+ },
+ {
+ "uriref": "/manage/v2/forests/Security",
+ "idref": "9348728036360382939",
+ "nameref": "Security"
+ },
+ {
+ "uriref": "/manage/v2/forests/Triggers",
+ "idref": "10142793547905338229",
+ "nameref": "Triggers"
+ }
+ ]
+ },
+ {
+ "typeref": "groups",
+ "relation": [
+ {
+ "uriref": "/manage/v2/groups/Default?view=status",
+ "idref": "16808579782544283978",
+ "nameref": "Default"
+ }
+ ]
+ }
+ ]
+ },
+ "status-properties": {
+ "online": {
+ "units": "bool",
+ "value": true
+ },
+ "secure": {
+ "units": "bool",
+ "value": false
+ },
+ "cache-properties": {
+ "cache-detail": {
+ "compressed-tree-cache-partition": [
+ {
+ "partition-size": 64,
+ "partition-table": 3.40000009536743,
+ "partition-used": 29.7000007629395,
+ "partition-free": 70.1999969482422,
+ "partition-overhead": 0.100000001490116
+ }
+ ],
+ "expanded-tree-cache-partition": [
+ {
+ "partition-size": 128,
+ "partition-table": 6.19999980926514,
+ "partition-busy": 0,
+ "partition-used": 87.3000030517578,
+ "partition-free": 12.3999996185303,
+ "partition-overhead": 0.300000011920929
+ }
+ ],
+ "triple-cache-partition": [
+ {
+ "partition-size": 64,
+ "partition-busy": 0,
+ "partition-used": 0,
+ "partition-free": 100
+ }
+ ],
+ "triple-value-cache-partition": [
+ {
+ "partition-size": 128,
+ "partition-busy": 0,
+ "partition-used": 0,
+ "partition-free": 100,
+ "value-count": 0,
+ "value-bytes-total": 0,
+ "value-bytes-average": 0
+ }
+ ]
+ }
+ },
+ "load-properties": {
+ "total-load": {
+ "units": "sec/sec",
+ "value": 0.00429263804107904
+ },
+ "load-detail": {
+ "query-read-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "journal-write-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "save-write-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "merge-read-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "merge-write-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "backup-read-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "backup-write-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "restore-read-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "restore-write-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "large-read-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "large-write-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "external-binary-read-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "xdqp-client-receive-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "xdqp-client-send-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "xdqp-server-receive-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "xdqp-server-send-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "foreign-xdqp-client-receive-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "foreign-xdqp-client-send-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "foreign-xdqp-server-receive-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "foreign-xdqp-server-send-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "read-lock-wait-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "read-lock-hold-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "write-lock-wait-load": {
+ "units": "sec/sec",
+ "value": 0
+ },
+ "write-lock-hold-load": {
+ "units": "sec/sec",
+ "value": 0.00429263804107904
+ },
+ "deadlock-wait-load": {
+ "units": "sec/sec",
+ "value": 0
+ }
+ }
+ },
+ "rate-properties": {
+ "total-rate": {
+ "units": "MB/sec",
+ "value": 15.6527042388916
+ },
+ "rate-detail": {
+ "memory-system-pagein-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "memory-system-pageout-rate": {
+ "units": "MB/sec",
+ "value": 15.6420001983643
+ },
+ "memory-system-swapin-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "memory-system-swapout-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "query-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "journal-write-rate": {
+ "units": "MB/sec",
+ "value": 0.00372338597662747
+ },
+ "save-write-rate": {
+ "units": "MB/sec",
+ "value": 0.0024786819703877
+ },
+ "merge-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "merge-write-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "backup-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "backup-write-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "restore-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "restore-write-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "large-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "large-write-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "external-binary-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "xdqp-client-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "xdqp-client-send-rate": {
+ "units": "MB/sec",
+ "value": 0.00293614692054689
+ },
+ "xdqp-server-receive-rate": {
+ "units": "MB/sec",
+ "value": 0.00156576896551996
+ },
+ "xdqp-server-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "foreign-xdqp-client-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "foreign-xdqp-client-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "foreign-xdqp-server-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "foreign-xdqp-server-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "read-lock-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "write-lock-rate": {
+ "units": "MB/sec",
+ "value": 0.251882910728455
+ },
+ "deadlock-rate": {
+ "units": "MB/sec",
+ "value": 0
+ }
+ }
+ },
+ "status-detail": {
+ "bind-port": 7999,
+ "connect-port": 7999,
+ "ssl-fips-enabled": {
+ "units": "bool",
+ "value": true
+ },
+ "foreign-bind-port": 7998,
+ "foreign-connect-port": 7998,
+ "background-io-limit": {
+ "units": "quantity",
+ "value": 0
+ },
+ "metering-enabled": {
+ "units": "bool",
+ "value": true
+ },
+ "meters-database": {
+ "units": "quantity",
+ "value": "11952918530142281790"
+ },
+ "performance-metering-enabled": {
+ "units": "bool",
+ "value": true
+ },
+ "performance-metering-period": {
+ "units": "second",
+ "value": 60
+ },
+ "performance-metering-retain-raw": {
+ "units": "day",
+ "value": 7
+ },
+ "performance-metering-retain-hourly": {
+ "units": "day",
+ "value": 30
+ },
+ "performance-metering-retain-daily": {
+ "units": "day",
+ "value": 90
+ },
+ "last-startup": {
+ "units": "datetime",
+ "value": "2019-07-26T17:23:36.412644Z"
+ },
+ "version": "10.0-1",
+ "effective-version": {
+ "units": "quantity",
+ "value": 10000100
+ },
+ "software-version": {
+ "units": "quantity",
+ "value": 10000100
+ },
+ "os-version": "NA",
+ "converters-version": "10.0-1",
+ "host-mode": {
+ "units": "enum",
+ "value": "normal"
+ },
+ "architecture": "x86_64",
+ "platform": "linux",
+ "license-key": "000-000-000-000-000-000-000",
+ "licensee": "NA",
+ "license-key-expires": {
+ "units": "datetime",
+ "value": "2999-01-23T00:00:00Z"
+ },
+ "license-key-cpus": {
+ "units": "quantity",
+ "value": 0
+ },
+ "license-key-cores": {
+ "units": "quantity",
+ "value": 0
+ },
+ "license-key-size": {
+ "units": "MB",
+ "value": 0
+ },
+ "license-key-option": [
+ {
+ "units": "enum",
+ "value": "conversion"
+ },
+ {
+ "units": "enum",
+ "value": "failover"
+ },
+ {
+ "units": "enum",
+ "value": "alerting"
+ },
+ {
+ "units": "enum",
+ "value": "geospatial"
+ },
+ {
+ "units": "enum",
+ "value": "flexible replication"
+ },
+ {
+ "units": "enum",
+ "value": "tiered storage"
+ },
+ {
+ "units": "enum",
+ "value": "semantics"
+ },
+ {
+ "units": "enum",
+ "value": "French"
+ },
+ {
+ "units": "enum",
+ "value": "Italian"
+ },
+ {
+ "units": "enum",
+ "value": "German"
+ },
+ {
+ "units": "enum",
+ "value": "Spanish"
+ },
+ {
+ "units": "enum",
+ "value": "Traditional Chinese"
+ },
+ {
+ "units": "enum",
+ "value": "Simplified Chinese"
+ },
+ {
+ "units": "enum",
+ "value": "Arabic"
+ },
+ {
+ "units": "enum",
+ "value": "Russian"
+ },
+ {
+ "units": "enum",
+ "value": "Dutch"
+ },
+ {
+ "units": "enum",
+ "value": "Korean"
+ },
+ {
+ "units": "enum",
+ "value": "Persian"
+ },
+ {
+ "units": "enum",
+ "value": "Japanese"
+ },
+ {
+ "units": "enum",
+ "value": "Portuguese"
+ },
+ {
+ "units": "enum",
+ "value": "English"
+ }
+ ],
+ "edition": {
+ "units": "enum",
+ "value": "Enterprise Edition"
+ },
+ "environment": {
+ "units": "enum",
+ "value": "developer"
+ },
+ "cpus": {
+ "units": "quantity",
+ "value": 1
+ },
+ "cores": {
+ "units": "quantity",
+ "value": 4
+ },
+ "core-threads": {
+ "units": "quantity",
+ "value": 4
+ },
+ "total-cpu-stat-user": 0.276381999254227,
+ "total-cpu-stat-nice": 0,
+ "total-cpu-stat-system": 0.636515974998474,
+ "total-cpu-stat-idle": 99.0578002929688,
+ "total-cpu-stat-iowait": 0.0125628001987934,
+ "total-cpu-stat-irq": 0,
+ "total-cpu-stat-softirq": 0.0167504008859396,
+ "total-cpu-stat-steal": 0,
+ "total-cpu-stat-guest": 0,
+ "total-cpu-stat-guest-nice": 0,
+ "memory-process-size": {
+ "units": "fraction",
+ "value": 1234
+ },
+ "memory-process-rss": {
+ "units": "fraction",
+ "value": 815
+ },
+ "memory-process-anon": {
+ "units": "fraction",
+ "value": 743
+ },
+ "memory-process-rss-hwm": {
+ "units": "fraction",
+ "value": 1072
+ },
+ "memory-process-swap-size": {
+ "units": "fraction",
+ "value": 0
+ },
+ "memory-process-huge-pages-size": {
+ "units": "fraction",
+ "value": 0
+ },
+ "memory-system-total": {
+ "units": "fraction",
+ "value": 3947
+ },
+ "memory-system-free": {
+ "units": "fraction",
+ "value": 2761
+ },
+ "memory-system-pagein-rate": {
+ "units": "fraction",
+ "value": 0
+ },
+ "memory-system-pageout-rate": {
+ "units": "fraction",
+ "value": 15.6420001983643
+ },
+ "memory-system-swapin-rate": {
+ "units": "fraction",
+ "value": 0
+ },
+ "memory-system-swapout-rate": {
+ "units": "fraction",
+ "value": 0
+ },
+ "memory-size": {
+ "units": "quantity",
+ "value": 4096
+ },
+ "memory-file-size": {
+ "units": "quantity",
+ "value": 5
+ },
+ "memory-forest-size": {
+ "units": "quantity",
+ "value": 849
+ },
+ "memory-unclosed-size": {
+ "units": "quantity",
+ "value": 0
+ },
+ "memory-cache-size": {
+ "units": "quantity",
+ "value": 320
+ },
+ "memory-registry-size": {
+ "units": "quantity",
+ "value": 1
+ },
+ "memory-join-size": {
+ "units": "quantity",
+ "value": 0
+ },
+ "host-size": {
+ "units": "MB",
+ "value": 64
+ },
+ "host-large-data-size": {
+ "units": "MB",
+ "value": 0
+ },
+ "log-device-space": {
+ "units": "MB",
+ "value": 34968
+ },
+ "data-dir-space": {
+ "units": "MB",
+ "value": 34968
+ },
+ "query-read-bytes": {
+ "units": "bytes",
+ "value": 11492428
+ },
+ "query-read-time": {
+ "units": "time",
+ "value": "PT0.141471S"
+ },
+ "query-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "query-read-load": {
+ "units": "",
+ "value": 0
+ },
+ "journal-write-bytes": {
+ "units": "bytes",
+ "value": 285717868
+ },
+ "journal-write-time": {
+ "units": "time",
+ "value": "PT17.300832S"
+ },
+ "journal-write-rate": {
+ "units": "MB/sec",
+ "value": 0.00372338597662747
+ },
+ "journal-write-load": {
+ "units": "",
+ "value": 0
+ },
+ "save-write-bytes": {
+ "units": "bytes",
+ "value": 95818597
+ },
+ "save-write-time": {
+ "units": "time",
+ "value": "PT2.972855S"
+ },
+ "save-write-rate": {
+ "units": "MB/sec",
+ "value": 0.0024786819703877
+ },
+ "save-write-load": {
+ "units": "",
+ "value": 0
+ },
+ "merge-read-bytes": {
+ "units": "bytes",
+ "value": 55374848
+ },
+ "merge-read-time": {
+ "units": "time",
+ "value": "PT0.535705S"
+ },
+ "merge-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "merge-read-load": {
+ "units": "",
+ "value": 0
+ },
+ "merge-write-bytes": {
+ "units": "bytes",
+ "value": 146451731
+ },
+ "merge-write-time": {
+ "units": "time",
+ "value": "PT5.392288S"
+ },
+ "merge-write-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "merge-write-load": {
+ "units": "",
+ "value": 0
+ },
+ "backup-read-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "backup-read-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "backup-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "backup-read-load": {
+ "units": "",
+ "value": 0
+ },
+ "backup-write-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "backup-write-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "backup-write-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "backup-write-load": {
+ "units": "",
+ "value": 0
+ },
+ "restore-read-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "restore-read-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "restore-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "restore-read-load": {
+ "units": "",
+ "value": 0
+ },
+ "restore-write-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "restore-write-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "restore-write-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "restore-write-load": {
+ "units": "",
+ "value": 0
+ },
+ "large-read-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "large-read-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "large-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "large-read-load": {
+ "units": "",
+ "value": 0
+ },
+ "large-write-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "large-write-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "large-write-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "large-write-load": {
+ "units": "",
+ "value": 0
+ },
+ "external-binary-read-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "external-binary-read-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "external-binary-read-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "external-binary-read-load": {
+ "units": "",
+ "value": 0
+ },
+ "webDAV-server-receive-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "webDAV-server-receive-time": {
+ "units": "sec",
+ "value": "PT0S"
+ },
+ "webDAV-server-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "webDAV-server-receive-load": {
+ "units": "",
+ "value": 0
+ },
+ "webDAV-server-send-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "webDAV-server-send-time": {
+ "units": "sec",
+ "value": "PT0S"
+ },
+ "webDAV-server-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "webDAV-server-send-load": {
+ "units": "",
+ "value": 0
+ },
+ "http-server-receive-bytes": {
+ "units": "bytes",
+ "value": 285915
+ },
+ "http-server-receive-time": {
+ "units": "sec",
+ "value": "PT0.02028S"
+ },
+ "http-server-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "http-server-receive-load": {
+ "units": "",
+ "value": 0
+ },
+ "http-server-send-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "http-server-send-time": {
+ "units": "sec",
+ "value": "PT0S"
+ },
+ "http-server-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "http-server-send-load": {
+ "units": "",
+ "value": 0
+ },
+ "xdbc-server-receive-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "xdbc-server-receive-time": {
+ "units": "sec",
+ "value": "PT0S"
+ },
+ "xdbc-server-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "xdbc-server-receive-load": {
+ "units": "",
+ "value": 0
+ },
+ "xdbc-server-send-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "xdbc-server-send-time": {
+ "units": "sec",
+ "value": "PT0S"
+ },
+ "xdbc-server-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "xdbc-server-send-load": {
+ "units": "",
+ "value": 0
+ },
+ "odbc-server-receive-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "odbc-server-receive-time": {
+ "units": "sec",
+ "value": "PT0S"
+ },
+ "odbc-server-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "odbc-server-receive-load": {
+ "units": "",
+ "value": 0
+ },
+ "odbc-server-send-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "odbc-server-send-time": {
+ "units": "sec",
+ "value": "PT0S"
+ },
+ "odbc-server-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "odbc-server-send-load": {
+ "units": "",
+ "value": 0
+ },
+ "xdqp-client-receive-bytes": {
+ "units": "bytes",
+ "value": 3020032
+ },
+ "xdqp-client-receive-time": {
+ "units": "time",
+ "value": "PT0.046612S"
+ },
+ "xdqp-client-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "xdqp-client-receive-load": {
+ "units": "",
+ "value": 0
+ },
+ "xdqp-client-send-bytes": {
+ "units": "bytes",
+ "value": 163513952
+ },
+ "xdqp-client-send-time": {
+ "units": "time",
+ "value": "PT22.700289S"
+ },
+ "xdqp-client-send-rate": {
+ "units": "MB/sec",
+ "value": 0.00293614692054689
+ },
+ "xdqp-client-send-load": {
+ "units": "",
+ "value": 0
+ },
+ "xdqp-server-receive-bytes": {
+ "units": "bytes",
+ "value": 131973888
+ },
+ "xdqp-server-receive-time": {
+ "units": "time",
+ "value": "PT3.474521S"
+ },
+ "xdqp-server-receive-rate": {
+ "units": "MB/sec",
+ "value": 0.00156576896551996
+ },
+ "xdqp-server-receive-load": {
+ "units": "",
+ "value": 0
+ },
+ "xdqp-server-send-bytes": {
+ "units": "bytes",
+ "value": 10035300
+ },
+ "xdqp-server-send-time": {
+ "units": "time",
+ "value": "PT4.275597S"
+ },
+ "xdqp-server-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "xdqp-server-send-load": {
+ "units": "",
+ "value": 0
+ },
+ "xdqp-server-request-time": {
+ "units": "milliseconds",
+ "value": 0.743777990341187
+ },
+ "xdqp-server-request-rate": {
+ "units": "requests/sec",
+ "value": 0.371862411499023
+ },
+ "foreign-xdqp-client-receive-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "foreign-xdqp-client-receive-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "foreign-xdqp-client-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "foreign-xdqp-client-receive-load": {
+ "units": "",
+ "value": 0
+ },
+ "foreign-xdqp-client-send-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "foreign-xdqp-client-send-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "foreign-xdqp-client-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "foreign-xdqp-client-send-load": {
+ "units": "",
+ "value": 0
+ },
+ "foreign-xdqp-server-receive-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "foreign-xdqp-server-receive-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "foreign-xdqp-server-receive-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "foreign-xdqp-server-receive-load": {
+ "units": "",
+ "value": 0
+ },
+ "foreign-xdqp-server-send-bytes": {
+ "units": "bytes",
+ "value": 0
+ },
+ "foreign-xdqp-server-send-time": {
+ "units": "time",
+ "value": "PT0S"
+ },
+ "foreign-xdqp-server-send-rate": {
+ "units": "MB/sec",
+ "value": 0
+ },
+ "foreign-xdqp-server-send-load": {
+ "units": "",
+ "value": 0
+ },
+ "read-lock-count": {
+ "units": "locks",
+ "value": 104
+ },
+ "read-lock-wait-time": {
+ "units": "seconds",
+ "value": "PT0.001464S"
+ },
+ "read-lock-hold-time": {
+ "units": "seconds",
+ "value": "PT3.022913S"
+ },
+ "read-lock-rate": {
+ "units": "locks/sec",
+ "value": 0
+ },
+ "read-lock-wait-load": {
+ "units": "",
+ "value": 0
+ },
+ "read-lock-hold-load": {
+ "units": "",
+ "value": 0
+ },
+ "write-lock-count": {
+ "units": "locks",
+ "value": 15911
+ },
+ "write-lock-wait-time": {
+ "units": "seconds",
+ "value": "PT0.317098S"
+ },
+ "write-lock-hold-time": {
+ "units": "seconds",
+ "value": "PT11M46.9923759S"
+ },
+ "write-lock-rate": {
+ "units": "locks/sec",
+ "value": 0.251882910728455
+ },
+ "write-lock-wait-load": {
+ "units": "",
+ "value": 0
+ },
+ "write-lock-hold-load": {
+ "units": "",
+ "value": 0.00429263804107904
+ },
+ "deadlock-count": {
+ "units": "locks",
+ "value": 0
+ },
+ "deadlock-wait-time": {
+ "units": "seconds",
+ "value": "PT0S"
+ },
+ "deadlock-rate": {
+ "units": "locks/sec",
+ "value": 0
+ },
+ "deadlock-wait-load": {
+ "units": "",
+ "value": 0
+ },
+ "external-kms-request-rate": {
+ "units": "requests/sec",
+ "value": 0
+ },
+ "external-kms-request-time": {
+ "units": "milliseconds",
+ "value": 0
+ },
+ "keystore-status": "normal",
+ "ldap-request-rate": {
+ "units": "requests/sec",
+ "value": 0
+ },
+ "ldap-request-time": {
+ "units": "milliseconds",
+ "value": 0
+ }
+ }
+ },
+ "related-views": {
+ "related-view": [
+ {
+ "view-type": "item",
+ "view-name": "default",
+ "view-uri": "/manage/v2/hosts/example"
+ }
+ ]
+ }
+ }
+}
+`
diff --git a/plugins/inputs/mem/README.md b/plugins/inputs/mem/README.md
index 8425468256d5a..9122b885a09e1 100644
--- a/plugins/inputs/mem/README.md
+++ b/plugins/inputs/mem/README.md
@@ -1,4 +1,4 @@
-# Mem Input Plugin
+# Memory Input Plugin
The mem plugin collects system memory metrics.
@@ -18,41 +18,44 @@ Available fields are dependent on platform.
- mem
- fields:
- - active (integer)
+ - active (integer, Darwin, FreeBSD, Linux, OpenBSD)
- available (integer)
- - buffered (integer)
- - cached (integer)
- - free (integer)
- - inactive (integer)
- - slab (integer)
+ - available_percent (float)
+ - buffered (integer, FreeBSD, Linux)
+ - cached (integer, FreeBSD, Linux, OpenBSD)
+ - commit_limit (integer, Linux)
+ - committed_as (integer, Linux)
+ - dirty (integer, Linux)
+ - free (integer, Darwin, FreeBSD, Linux, OpenBSD)
+ - high_free (integer, Linux)
+ - high_total (integer, Linux)
+ - huge_pages_free (integer, Linux)
+ - huge_page_size (integer, Linux)
+ - huge_pages_total (integer, Linux)
+ - inactive (integer, Darwin, FreeBSD, Linux, OpenBSD)
+ - laundry (integer, FreeBSD)
+ - low_free (integer, Linux)
+ - low_total (integer, Linux)
+ - mapped (integer, Linux)
+ - page_tables (integer, Linux)
+ - shared (integer, Linux)
+ - slab (integer, Linux)
+ - sreclaimable (integer, Linux)
+ - sunreclaim (integer, Linux)
+ - swap_cached (integer, Linux)
+ - swap_free (integer, Linux)
+ - swap_total (integer, Linux)
- total (integer)
- used (integer)
- - available_percent (float)
- used_percent (float)
- - wired (integer)
- - commit_limit (integer)
- - committed_as (integer)
- - dirty (integer)
- - high_free (integer)
- - high_total (integer)
- - huge_page_size (integer)
- - huge_pages_free (integer)
- - huge_pages_total (integer)
- - low_free (integer)
- - low_total (integer)
- - mapped (integer)
- - page_tables (integer)
- - shared (integer)
- - swap_cached (integer)
- - swap_free (integer)
- - swap_total (integer)
- - vmalloc_chunk (integer)
- - vmalloc_total (integer)
- - vmalloc_used (integer)
- - write_back (integer)
- - write_back_tmp (integer)
+ - vmalloc_chunk (integer, Linux)
+ - vmalloc_total (integer, Linux)
+ - vmalloc_used (integer, Linux)
+ - wired (integer, Darwin, FreeBSD, OpenBSD)
+ - write_back (integer, Linux)
+ - write_back_tmp (integer, Linux)
### Example Output:
```
-mem active=11347566592i,available=18705133568i,available_percent=89.4288960571006,buffered=1976709120i,cached=13975572480i,commit_limit=14753067008i,committed_as=2872422400i,dirty=87461888i,free=1352400896i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=6201593856i,low_free=0i,low_total=0i,mapped=310427648i,page_tables=14397440i,shared=200781824i,slab=1937526784i,swap_cached=0i,swap_free=4294963200i,swap_total=4294963200i,total=20916207616i,used=3611525120i,used_percent=17.26663449848977,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1536704085000000000
+mem active=9299595264i,available=16818249728i,available_percent=80.41654254645131,buffered=2383761408i,cached=13316689920i,commit_limit=14751920128i,committed_as=11781156864i,dirty=122880i,free=1877688320i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=7549939712i,low_free=0i,low_total=0i,mapped=416763904i,page_tables=19787776i,shared=670679040i,slab=2081071104i,sreclaimable=1923395584i,sunreclaim=157675520i,swap_cached=1302528i,swap_free=4286128128i,swap_total=4294963200i,total=20913917952i,used=3335778304i,used_percent=15.95004011996231,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1574712869000000000
```
diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/memory.go
index a7d887cbe8bec..c8dbd0c2a43b5 100644
--- a/plugins/inputs/mem/memory.go
+++ b/plugins/inputs/mem/memory.go
@@ -2,6 +2,7 @@ package mem
import (
"fmt"
+ "runtime"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -9,7 +10,8 @@ import (
)
type MemStats struct {
- ps system.PS
+ ps system.PS
+ platform string
}
func (_ *MemStats) Description() string {
@@ -18,6 +20,11 @@ func (_ *MemStats) Description() string {
func (_ *MemStats) SampleConfig() string { return "" }
+func (m *MemStats) Init() error {
+ m.platform = runtime.GOOS
+ return nil
+}
+
func (s *MemStats) Gather(acc telegraf.Accumulator) error {
vm, err := s.ps.VMStat()
if err != nil {
@@ -28,37 +35,62 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error {
"total": vm.Total,
"available": vm.Available,
"used": vm.Used,
- "free": vm.Free,
- "cached": vm.Cached,
- "buffered": vm.Buffers,
- "active": vm.Active,
- "inactive": vm.Inactive,
- "wired": vm.Wired,
- "slab": vm.Slab,
"used_percent": 100 * float64(vm.Used) / float64(vm.Total),
"available_percent": 100 * float64(vm.Available) / float64(vm.Total),
- "commit_limit": vm.CommitLimit,
- "committed_as": vm.CommittedAS,
- "dirty": vm.Dirty,
- "high_free": vm.HighFree,
- "high_total": vm.HighTotal,
- "huge_page_size": vm.HugePageSize,
- "huge_pages_free": vm.HugePagesFree,
- "huge_pages_total": vm.HugePagesTotal,
- "low_free": vm.LowFree,
- "low_total": vm.LowTotal,
- "mapped": vm.Mapped,
- "page_tables": vm.PageTables,
- "shared": vm.Shared,
- "swap_cached": vm.SwapCached,
- "swap_free": vm.SwapFree,
- "swap_total": vm.SwapTotal,
- "vmalloc_chunk": vm.VMallocChunk,
- "vmalloc_total": vm.VMallocTotal,
- "vmalloc_used": vm.VMallocUsed,
- "write_back": vm.Writeback,
- "write_back_tmp": vm.WritebackTmp,
}
+
+ switch s.platform {
+ case "darwin":
+ fields["active"] = vm.Active
+ fields["free"] = vm.Free
+ fields["inactive"] = vm.Inactive
+ fields["wired"] = vm.Wired
+ case "openbsd":
+ fields["active"] = vm.Active
+ fields["cached"] = vm.Cached
+ fields["free"] = vm.Free
+ fields["inactive"] = vm.Inactive
+ fields["wired"] = vm.Wired
+ case "freebsd":
+ fields["active"] = vm.Active
+ fields["buffered"] = vm.Buffers
+ fields["cached"] = vm.Cached
+ fields["free"] = vm.Free
+ fields["inactive"] = vm.Inactive
+ fields["laundry"] = vm.Laundry
+ fields["wired"] = vm.Wired
+ case "linux":
+ fields["active"] = vm.Active
+ fields["buffered"] = vm.Buffers
+ fields["cached"] = vm.Cached
+ fields["commit_limit"] = vm.CommitLimit
+ fields["committed_as"] = vm.CommittedAS
+ fields["dirty"] = vm.Dirty
+ fields["free"] = vm.Free
+ fields["high_free"] = vm.HighFree
+ fields["high_total"] = vm.HighTotal
+ fields["huge_pages_free"] = vm.HugePagesFree
+ fields["huge_page_size"] = vm.HugePageSize
+ fields["huge_pages_total"] = vm.HugePagesTotal
+ fields["inactive"] = vm.Inactive
+ fields["low_free"] = vm.LowFree
+ fields["low_total"] = vm.LowTotal
+ fields["mapped"] = vm.Mapped
+ fields["page_tables"] = vm.PageTables
+ fields["shared"] = vm.Shared
+ fields["slab"] = vm.Slab
+ fields["sreclaimable"] = vm.SReclaimable
+ fields["sunreclaim"] = vm.SUnreclaim
+ fields["swap_cached"] = vm.SwapCached
+ fields["swap_free"] = vm.SwapFree
+ fields["swap_total"] = vm.SwapTotal
+ fields["vmalloc_chunk"] = vm.VMallocChunk
+ fields["vmalloc_total"] = vm.VMallocTotal
+ fields["vmalloc_used"] = vm.VMallocUsed
+ fields["write_back_tmp"] = vm.WritebackTmp
+ fields["write_back"] = vm.Writeback
+ }
+
acc.AddGauge("mem", fields, nil)
return nil
diff --git a/plugins/inputs/mem/memory_test.go b/plugins/inputs/mem/memory_test.go
index 06f2f6ea97fd0..626a1806c4055 100644
--- a/plugins/inputs/mem/memory_test.go
+++ b/plugins/inputs/mem/memory_test.go
@@ -2,7 +2,9 @@ package mem
import (
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs/system"
"github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/mem"
@@ -40,6 +42,8 @@ func TestMemStats(t *testing.T) {
Mapped: 42236,
PageTables: 1236,
Shared: 0,
+ SReclaimable: 1923022848,
+ SUnreclaim: 157728768,
SwapCached: 0,
SwapFree: 524280,
SwapTotal: 524280,
@@ -51,46 +55,62 @@ func TestMemStats(t *testing.T) {
}
mps.On("VMStat").Return(vms, nil)
+ plugin := &MemStats{ps: &mps}
- err = (&MemStats{&mps}).Gather(&acc)
+ err = plugin.Init()
require.NoError(t, err)
- memfields := map[string]interface{}{
- "total": uint64(12400),
- "available": uint64(7600),
- "used": uint64(5000),
- "available_percent": float64(7600) / float64(12400) * 100,
- "used_percent": float64(5000) / float64(12400) * 100,
- "free": uint64(1235),
- "cached": uint64(0),
- "buffered": uint64(0),
- "active": uint64(8134),
- "inactive": uint64(1124),
- "wired": uint64(134),
- "slab": uint64(1234),
- "commit_limit": uint64(1),
- "committed_as": uint64(118680),
- "dirty": uint64(4),
- "high_free": uint64(0),
- "high_total": uint64(0),
- "huge_page_size": uint64(4096),
- "huge_pages_free": uint64(0),
- "huge_pages_total": uint64(0),
- "low_free": uint64(69936),
- "low_total": uint64(255908),
- "mapped": uint64(42236),
- "page_tables": uint64(1236),
- "shared": uint64(0),
- "swap_cached": uint64(0),
- "swap_free": uint64(524280),
- "swap_total": uint64(524280),
- "vmalloc_chunk": uint64(3872908),
- "vmalloc_total": uint64(3874808),
- "vmalloc_used": uint64(1416),
- "write_back": uint64(0),
- "write_back_tmp": uint64(0),
+ plugin.platform = "linux"
+
+ require.NoError(t, err)
+ err = plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "mem",
+ map[string]string{},
+ map[string]interface{}{
+ "total": uint64(12400),
+ "available": uint64(7600),
+ "used": uint64(5000),
+ "available_percent": float64(7600) / float64(12400) * 100,
+ "used_percent": float64(5000) / float64(12400) * 100,
+ "free": uint64(1235),
+ "cached": uint64(0),
+ "buffered": uint64(0),
+ "active": uint64(8134),
+ "inactive": uint64(1124),
+ // "wired": uint64(134),
+ "slab": uint64(1234),
+ "commit_limit": uint64(1),
+ "committed_as": uint64(118680),
+ "dirty": uint64(4),
+ "high_free": uint64(0),
+ "high_total": uint64(0),
+ "huge_page_size": uint64(4096),
+ "huge_pages_free": uint64(0),
+ "huge_pages_total": uint64(0),
+ "low_free": uint64(69936),
+ "low_total": uint64(255908),
+ "mapped": uint64(42236),
+ "page_tables": uint64(1236),
+ "shared": uint64(0),
+ "sreclaimable": uint64(1923022848),
+ "sunreclaim": uint64(157728768),
+ "swap_cached": uint64(0),
+ "swap_free": uint64(524280),
+ "swap_total": uint64(524280),
+ "vmalloc_chunk": uint64(3872908),
+ "vmalloc_total": uint64(3874808),
+ "vmalloc_used": uint64(1416),
+ "write_back": uint64(0),
+ "write_back_tmp": uint64(0),
+ },
+ time.Unix(0, 0),
+ telegraf.Gauge,
+ ),
}
- acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string))
- acc.Metrics = nil
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md
index b18908b8a3b9a..2845881880d95 100644
--- a/plugins/inputs/mesos/README.md
+++ b/plugins/inputs/mesos/README.md
@@ -10,8 +10,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso
[[inputs.mesos]]
## Timeout, in ms.
timeout = 100
+
## A list of Mesos masters.
masters = ["http://localhost:5050"]
+
## Master metrics groups to be collected, by default, all enabled.
master_collections = [
"resources",
@@ -19,13 +21,17 @@ For more information, please check the [Mesos Observability Metrics](http://meso
"system",
"agents",
"frameworks",
+ "framework_offers",
"tasks",
"messages",
"evqueue",
"registrar",
+ "allocator",
]
+
## A list of Mesos slaves, default is []
# slaves = []
+
## Slave metrics groups to be collected, by default, all enabled.
# slave_collections = [
# "resources",
@@ -100,6 +106,10 @@ Mesos master metric groups
- master/slaves_connected
- master/slaves_disconnected
- master/slaves_inactive
+ - master/slave_unreachable_canceled
+ - master/slave_unreachable_completed
+ - master/slave_unreachable_scheduled
+ - master/slaves_unreachable
- frameworks
- master/frameworks_active
@@ -108,6 +118,22 @@ Mesos master metric groups
- master/frameworks_inactive
- master/outstanding_offers
+- framework offers
+ - master/frameworks/subscribed
+ - master/frameworks/calls_total
+ - master/frameworks/calls
+ - master/frameworks/events_total
+ - master/frameworks/events
+ - master/frameworks/operations_total
+ - master/frameworks/operations
+ - master/frameworks/tasks/active
+ - master/frameworks/tasks/terminal
+ - master/frameworks/offers/sent
+ - master/frameworks/offers/accepted
+ - master/frameworks/offers/declined
+ - master/frameworks/offers/rescinded
+ - master/frameworks/roles/suppressed
+
- tasks
- master/tasks_error
- master/tasks_failed
@@ -117,6 +143,11 @@ Mesos master metric groups
- master/tasks_running
- master/tasks_staging
- master/tasks_starting
+ - master/tasks_dropped
+ - master/tasks_gone
+ - master/tasks_gone_by_operator
+ - master/tasks_killing
+ - master/tasks_unreachable
- messages
- master/invalid_executor_to_framework_messages
@@ -155,11 +186,17 @@ Mesos master metric groups
- master/task_lost/source_master/reason_slave_removed
- master/task_lost/source_slave/reason_executor_terminated
- master/valid_executor_to_framework_messages
+ - master/invalid_operation_status_update_acknowledgements
+ - master/messages_operation_status_update_acknowledgement
+ - master/messages_reconcile_operations
+ - master/messages_suppress_offers
+ - master/valid_operation_status_update_acknowledgements
- evqueue
- master/event_queue_dispatches
- master/event_queue_http_requests
- master/event_queue_messages
+ - master/operator_event_stream_subscribers
- registrar
- registrar/state_fetch_ms
@@ -172,6 +209,45 @@ Mesos master metric groups
- registrar/state_store_ms/p99
- registrar/state_store_ms/p999
- registrar/state_store_ms/p9999
+ - registrar/state_store_ms/count
+ - registrar/log/ensemble_size
+ - registrar/log/recovered
+ - registrar/queued_operations
+ - registrar/registry_size_bytes
+
+- allocator
+ - allocator/allocation_run_ms
+ - allocator/allocation_run_ms/count
+ - allocator/allocation_run_ms/max
+ - allocator/allocation_run_ms/min
+ - allocator/allocation_run_ms/p50
+ - allocator/allocation_run_ms/p90
+ - allocator/allocation_run_ms/p95
+ - allocator/allocation_run_ms/p99
+ - allocator/allocation_run_ms/p999
+ - allocator/allocation_run_ms/p9999
+ - allocator/allocation_runs
+ - allocator/allocation_run_latency_ms
+ - allocator/allocation_run_latency_ms/count
+ - allocator/allocation_run_latency_ms/max
+ - allocator/allocation_run_latency_ms/min
+ - allocator/allocation_run_latency_ms/p50
+ - allocator/allocation_run_latency_ms/p90
+ - allocator/allocation_run_latency_ms/p95
+ - allocator/allocation_run_latency_ms/p99
+ - allocator/allocation_run_latency_ms/p999
+ - allocator/allocation_run_latency_ms/p9999
+ - allocator/roles/shares/dominant
+ - allocator/event_queue_dispatches
+ - allocator/offer_filters/roles/active
+ - allocator/quota/roles/resources/offered_or_allocated
+ - allocator/quota/roles/resources/guarantee
+ - allocator/resources/cpus/offered_or_allocated
+ - allocator/resources/cpus/total
+ - allocator/resources/disk/offered_or_allocated
+ - allocator/resources/disk/total
+ - allocator/resources/mem/offered_or_allocated
+ - allocator/resources/mem/total
Mesos slave metric groups
- resources
diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go
index 9190ceae8784b..acc836cba34bb 100644
--- a/plugins/inputs/mesos/mesos.go
+++ b/plugins/inputs/mesos/mesos.go
@@ -14,7 +14,7 @@ import (
"time"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
)
@@ -32,9 +32,10 @@ type Mesos struct {
MasterCols []string `toml:"master_collections"`
Slaves []string
SlaveCols []string `toml:"slave_collections"`
- //SlaveTasks bool
tls.ClientConfig
+ Log telegraf.Logger
+
initialized bool
client *http.Client
masterURLs []*url.URL
@@ -42,15 +43,17 @@ type Mesos struct {
}
var allMetrics = map[Role][]string{
- MASTER: {"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"},
+ MASTER: {"resources", "master", "system", "agents", "frameworks", "framework_offers", "tasks", "messages", "evqueue", "registrar", "allocator"},
SLAVE: {"resources", "agent", "system", "executors", "tasks", "messages"},
}
var sampleConfig = `
## Timeout, in ms.
timeout = 100
+
## A list of Mesos masters.
masters = ["http://localhost:5050"]
+
## Master metrics groups to be collected, by default, all enabled.
master_collections = [
"resources",
@@ -58,13 +61,17 @@ var sampleConfig = `
"system",
"agents",
"frameworks",
+ "framework_offers",
"tasks",
"messages",
"evqueue",
"registrar",
+ "allocator",
]
+
## A list of Mesos slaves, default is []
# slaves = []
+
## Slave metrics groups to be collected, by default, all enabled.
# slave_collections = [
# "resources",
@@ -108,7 +115,7 @@ func parseURL(s string, role Role) (*url.URL, error) {
}
s = "http://" + host + ":" + port
- log.Printf("W! [inputs.mesos] Using %q as connection URL; please update your configuration to use an URL", s)
+ log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s)
}
return url.Parse(s)
@@ -124,7 +131,7 @@ func (m *Mesos) initialize() error {
}
if m.Timeout == 0 {
- log.Println("I! [inputs.mesos] Missing timeout value, setting default value (100ms)")
+ m.Log.Info("Missing timeout value, setting default value (100ms)")
m.Timeout = 100
}
@@ -189,17 +196,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
wg.Done()
return
}(slave)
-
- // if !m.SlaveTasks {
- // continue
- // }
-
- // wg.Add(1)
- // go func(c string) {
- // acc.AddError(m.gatherSlaveTaskMetrics(slave, acc))
- // wg.Done()
- // return
- // }(v)
}
wg.Wait()
@@ -246,7 +242,7 @@ func metricsDiff(role Role, w []string) []string {
return b
}
-// masterBlocks serves as kind of metrics registry groupping them in sets
+// masterBlocks serves as kind of metrics registry grouping them in sets
func getMetrics(role Role, group string) []string {
var m map[string][]string
@@ -305,6 +301,10 @@ func getMetrics(role Role, group string) []string {
"master/slaves_connected",
"master/slaves_disconnected",
"master/slaves_inactive",
+ "master/slave_unreachable_canceled",
+ "master/slave_unreachable_completed",
+ "master/slave_unreachable_scheduled",
+ "master/slaves_unreachable",
}
m["frameworks"] = []string{
@@ -315,6 +315,12 @@ func getMetrics(role Role, group string) []string {
"master/outstanding_offers",
}
+ // framework_offers and allocator metrics have unpredictable names, so they can't be listed here.
+ // These empty groups are included to prevent the "unknown metrics group" info log below.
+ // filterMetrics() filters these metrics by looking for names with the corresponding prefix.
+ m["framework_offers"] = []string{}
+ m["allocator"] = []string{}
+
m["tasks"] = []string{
"master/tasks_error",
"master/tasks_failed",
@@ -324,6 +330,11 @@ func getMetrics(role Role, group string) []string {
"master/tasks_running",
"master/tasks_staging",
"master/tasks_starting",
+ "master/tasks_dropped",
+ "master/tasks_gone",
+ "master/tasks_gone_by_operator",
+ "master/tasks_killing",
+ "master/tasks_unreachable",
}
m["messages"] = []string{
@@ -363,12 +374,18 @@ func getMetrics(role Role, group string) []string {
"master/task_lost/source_master/reason_slave_removed",
"master/task_lost/source_slave/reason_executor_terminated",
"master/valid_executor_to_framework_messages",
+ "master/invalid_operation_status_update_acknowledgements",
+ "master/messages_operation_status_update_acknowledgement",
+ "master/messages_reconcile_operations",
+ "master/messages_suppress_offers",
+ "master/valid_operation_status_update_acknowledgements",
}
m["evqueue"] = []string{
"master/event_queue_dispatches",
"master/event_queue_http_requests",
"master/event_queue_messages",
+ "master/operator_event_stream_subscribers",
}
m["registrar"] = []string{
@@ -382,6 +399,11 @@ func getMetrics(role Role, group string) []string {
"registrar/state_store_ms/p99",
"registrar/state_store_ms/p999",
"registrar/state_store_ms/p9999",
+ "registrar/log/ensemble_size",
+ "registrar/log/recovered",
+ "registrar/queued_operations",
+ "registrar/registry_size_bytes",
+ "registrar/state_store_ms/count",
}
} else if role == SLAVE {
m["resources"] = []string{
@@ -459,7 +481,7 @@ func getMetrics(role Role, group string) []string {
ret, ok := m[group]
if !ok {
- log.Printf("I! [mesos] Unknown %s metrics group: %s\n", role, group)
+ log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group)
return []string{}
}
@@ -477,9 +499,27 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) {
}
for _, k := range metricsDiff(role, selectedMetrics) {
- for _, v := range getMetrics(role, k) {
- if _, ok = (*metrics)[v]; ok {
- delete((*metrics), v)
+ switch k {
+ // allocator and framework_offers metrics have unpredictable names, so we have to identify them by name prefix.
+ case "allocator":
+ for m := range *metrics {
+ if strings.HasPrefix(m, "allocator/") {
+ delete((*metrics), m)
+ }
+ }
+ case "framework_offers":
+ for m := range *metrics {
+ if strings.HasPrefix(m, "master/frameworks/") || strings.HasPrefix(m, "frameworks/") {
+ delete((*metrics), m)
+ }
+ }
+
+ // All other metrics have predictable names. We can use getMetrics() to retrieve them.
+ default:
+ for _, v := range getMetrics(role, k) {
+ if _, ok = (*metrics)[v]; ok {
+ delete((*metrics), v)
+ }
}
}
}
diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go
index 905adb6e3d44d..e25f250c8f8d4 100644
--- a/plugins/inputs/mesos/mesos_test.go
+++ b/plugins/inputs/mesos/mesos_test.go
@@ -8,6 +8,7 @@ import (
"net/http/httptest"
"net/url"
"os"
+ "strings"
"testing"
"github.com/influxdata/telegraf/testutil"
@@ -27,194 +28,262 @@ func randUUID() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
}
-func generateMetrics() {
- masterMetrics = make(map[string]interface{})
+// master metrics that will be returned by generateMetrics()
+var masterMetricNames []string = []string{
+ // resources
+ "master/cpus_percent",
+ "master/cpus_used",
+ "master/cpus_total",
+ "master/cpus_revocable_percent",
+ "master/cpus_revocable_total",
+ "master/cpus_revocable_used",
+ "master/disk_percent",
+ "master/disk_used",
+ "master/disk_total",
+ "master/disk_revocable_percent",
+ "master/disk_revocable_total",
+ "master/disk_revocable_used",
+ "master/gpus_percent",
+ "master/gpus_used",
+ "master/gpus_total",
+ "master/gpus_revocable_percent",
+ "master/gpus_revocable_total",
+ "master/gpus_revocable_used",
+ "master/mem_percent",
+ "master/mem_used",
+ "master/mem_total",
+ "master/mem_revocable_percent",
+ "master/mem_revocable_total",
+ "master/mem_revocable_used",
+ // master
+ "master/elected",
+ "master/uptime_secs",
+ // system
+ "system/cpus_total",
+ "system/load_15min",
+ "system/load_5min",
+ "system/load_1min",
+ "system/mem_free_bytes",
+ "system/mem_total_bytes",
+ // agents
+ "master/slave_registrations",
+ "master/slave_removals",
+ "master/slave_reregistrations",
+ "master/slave_shutdowns_scheduled",
+ "master/slave_shutdowns_canceled",
+ "master/slave_shutdowns_completed",
+ "master/slaves_active",
+ "master/slaves_connected",
+ "master/slaves_disconnected",
+ "master/slaves_inactive",
+ "master/slave_unreachable_canceled",
+ "master/slave_unreachable_completed",
+ "master/slave_unreachable_scheduled",
+ "master/slaves_unreachable",
+ // frameworks
+ "master/frameworks_active",
+ "master/frameworks_connected",
+ "master/frameworks_disconnected",
+ "master/frameworks_inactive",
+ "master/outstanding_offers",
+ // framework offers
+ "master/frameworks/marathon/abc-123/calls",
+ "master/frameworks/marathon/abc-123/calls/accept",
+ "master/frameworks/marathon/abc-123/events",
+ "master/frameworks/marathon/abc-123/events/error",
+ "master/frameworks/marathon/abc-123/offers/sent",
+ "master/frameworks/marathon/abc-123/operations",
+ "master/frameworks/marathon/abc-123/operations/create",
+ "master/frameworks/marathon/abc-123/roles/*/suppressed",
+ "master/frameworks/marathon/abc-123/subscribed",
+ "master/frameworks/marathon/abc-123/tasks/active/task_killing",
+ "master/frameworks/marathon/abc-123/tasks/active/task_dropped",
+ "master/frameworks/marathon/abc-123/tasks/terminal/task_dropped",
+ "master/frameworks/marathon/abc-123/unknown/unknown", // test case for unknown metric type
+ // tasks
+ "master/tasks_error",
+ "master/tasks_failed",
+ "master/tasks_finished",
+ "master/tasks_killed",
+ "master/tasks_lost",
+ "master/tasks_running",
+ "master/tasks_staging",
+ "master/tasks_starting",
+ "master/tasks_dropped",
+ "master/tasks_gone",
+ "master/tasks_gone_by_operator",
+ "master/tasks_killing",
+ "master/tasks_unreachable",
+ // messages
+ "master/invalid_executor_to_framework_messages",
+ "master/invalid_framework_to_executor_messages",
+ "master/invalid_status_update_acknowledgements",
+ "master/invalid_status_updates",
+ "master/dropped_messages",
+ "master/messages_authenticate",
+ "master/messages_deactivate_framework",
+ "master/messages_decline_offers",
+ "master/messages_executor_to_framework",
+ "master/messages_exited_executor",
+ "master/messages_framework_to_executor",
+ "master/messages_kill_task",
+ "master/messages_launch_tasks",
+ "master/messages_reconcile_tasks",
+ "master/messages_register_framework",
+ "master/messages_register_slave",
+ "master/messages_reregister_framework",
+ "master/messages_reregister_slave",
+ "master/messages_resource_request",
+ "master/messages_revive_offers",
+ "master/messages_status_update",
+ "master/messages_status_update_acknowledgement",
+ "master/messages_unregister_framework",
+ "master/messages_unregister_slave",
+ "master/messages_update_slave",
+ "master/recovery_slave_removals",
+ "master/slave_removals/reason_registered",
+ "master/slave_removals/reason_unhealthy",
+ "master/slave_removals/reason_unregistered",
+ "master/valid_framework_to_executor_messages",
+ "master/valid_status_update_acknowledgements",
+ "master/valid_status_updates",
+ "master/task_lost/source_master/reason_invalid_offers",
+ "master/task_lost/source_master/reason_slave_removed",
+ "master/task_lost/source_slave/reason_executor_terminated",
+ "master/valid_executor_to_framework_messages",
+ "master/invalid_operation_status_update_acknowledgements",
+ "master/messages_operation_status_update_acknowledgement",
+ "master/messages_reconcile_operations",
+ "master/messages_suppress_offers",
+ "master/valid_operation_status_update_acknowledgements",
+ // evgqueue
+ "master/event_queue_dispatches",
+ "master/event_queue_http_requests",
+ "master/event_queue_messages",
+ "master/operator_event_stream_subscribers",
+ // registrar
+ "registrar/log/ensemble_size",
+ "registrar/log/recovered",
+ "registrar/queued_operations",
+ "registrar/registry_size_bytes",
+ "registrar/state_fetch_ms",
+ "registrar/state_store_ms",
+ "registrar/state_store_ms/max",
+ "registrar/state_store_ms/min",
+ "registrar/state_store_ms/p50",
+ "registrar/state_store_ms/p90",
+ "registrar/state_store_ms/p95",
+ "registrar/state_store_ms/p99",
+ "registrar/state_store_ms/p999",
+ "registrar/state_store_ms/p9999",
+ "registrar/state_store_ms/count",
+ // allocator
+ "allocator/mesos/allocation_run_ms",
+ "allocator/mesos/allocation_run_ms/count",
+ "allocator/mesos/allocation_run_ms/max",
+ "allocator/mesos/allocation_run_ms/min",
+ "allocator/mesos/allocation_run_ms/p50",
+ "allocator/mesos/allocation_run_ms/p90",
+ "allocator/mesos/allocation_run_ms/p95",
+ "allocator/mesos/allocation_run_ms/p99",
+ "allocator/mesos/allocation_run_ms/p999",
+ "allocator/mesos/allocation_run_ms/p9999",
+ "allocator/mesos/allocation_runs",
+ "allocator/mesos/allocation_run_latency_ms",
+ "allocator/mesos/allocation_run_latency_ms/count",
+ "allocator/mesos/allocation_run_latency_ms/max",
+ "allocator/mesos/allocation_run_latency_ms/min",
+ "allocator/mesos/allocation_run_latency_ms/p50",
+ "allocator/mesos/allocation_run_latency_ms/p90",
+ "allocator/mesos/allocation_run_latency_ms/p95",
+ "allocator/mesos/allocation_run_latency_ms/p99",
+ "allocator/mesos/allocation_run_latency_ms/p999",
+ "allocator/mesos/allocation_run_latency_ms/p9999",
+ "allocator/mesos/roles/*/shares/dominant",
+ "allocator/mesos/event_queue_dispatches",
+ "allocator/mesos/offer_filters/roles/*/active",
+ "allocator/mesos/quota/roles/*/resources/disk/offered_or_allocated",
+ "allocator/mesos/quota/roles/*/resources/mem/guarantee",
+ "allocator/mesos/quota/roles/*/resources/disk/guarantee",
+ "allocator/mesos/resources/cpus/offered_or_allocated",
+ "allocator/mesos/resources/cpus/total",
+ "allocator/mesos/resources/disk/offered_or_allocated",
+ "allocator/mesos/resources/disk/total",
+ "allocator/mesos/resources/mem/offered_or_allocated",
+ "allocator/mesos/resources/mem/total",
+}
- metricNames := []string{
- // resources
- "master/cpus_percent",
- "master/cpus_used",
- "master/cpus_total",
- "master/cpus_revocable_percent",
- "master/cpus_revocable_total",
- "master/cpus_revocable_used",
- "master/disk_percent",
- "master/disk_used",
- "master/disk_total",
- "master/disk_revocable_percent",
- "master/disk_revocable_total",
- "master/disk_revocable_used",
- "master/gpus_percent",
- "master/gpus_used",
- "master/gpus_total",
- "master/gpus_revocable_percent",
- "master/gpus_revocable_total",
- "master/gpus_revocable_used",
- "master/mem_percent",
- "master/mem_used",
- "master/mem_total",
- "master/mem_revocable_percent",
- "master/mem_revocable_total",
- "master/mem_revocable_used",
- // master
- "master/elected",
- "master/uptime_secs",
- // system
- "system/cpus_total",
- "system/load_15min",
- "system/load_5min",
- "system/load_1min",
- "system/mem_free_bytes",
- "system/mem_total_bytes",
- // agents
- "master/slave_registrations",
- "master/slave_removals",
- "master/slave_reregistrations",
- "master/slave_shutdowns_scheduled",
- "master/slave_shutdowns_canceled",
- "master/slave_shutdowns_completed",
- "master/slaves_active",
- "master/slaves_connected",
- "master/slaves_disconnected",
- "master/slaves_inactive",
- // frameworks
- "master/frameworks_active",
- "master/frameworks_connected",
- "master/frameworks_disconnected",
- "master/frameworks_inactive",
- "master/outstanding_offers",
- // tasks
- "master/tasks_error",
- "master/tasks_failed",
- "master/tasks_finished",
- "master/tasks_killed",
- "master/tasks_lost",
- "master/tasks_running",
- "master/tasks_staging",
- "master/tasks_starting",
- // messages
- "master/invalid_executor_to_framework_messages",
- "master/invalid_framework_to_executor_messages",
- "master/invalid_status_update_acknowledgements",
- "master/invalid_status_updates",
- "master/dropped_messages",
- "master/messages_authenticate",
- "master/messages_deactivate_framework",
- "master/messages_decline_offers",
- "master/messages_executor_to_framework",
- "master/messages_exited_executor",
- "master/messages_framework_to_executor",
- "master/messages_kill_task",
- "master/messages_launch_tasks",
- "master/messages_reconcile_tasks",
- "master/messages_register_framework",
- "master/messages_register_slave",
- "master/messages_reregister_framework",
- "master/messages_reregister_slave",
- "master/messages_resource_request",
- "master/messages_revive_offers",
- "master/messages_status_update",
- "master/messages_status_update_acknowledgement",
- "master/messages_unregister_framework",
- "master/messages_unregister_slave",
- "master/messages_update_slave",
- "master/recovery_slave_removals",
- "master/slave_removals/reason_registered",
- "master/slave_removals/reason_unhealthy",
- "master/slave_removals/reason_unregistered",
- "master/valid_framework_to_executor_messages",
- "master/valid_status_update_acknowledgements",
- "master/valid_status_updates",
- "master/task_lost/source_master/reason_invalid_offers",
- "master/task_lost/source_master/reason_slave_removed",
- "master/task_lost/source_slave/reason_executor_terminated",
- "master/valid_executor_to_framework_messages",
- // evgqueue
- "master/event_queue_dispatches",
- "master/event_queue_http_requests",
- "master/event_queue_messages",
- // registrar
- "registrar/state_fetch_ms",
- "registrar/state_store_ms",
- "registrar/state_store_ms/max",
- "registrar/state_store_ms/min",
- "registrar/state_store_ms/p50",
- "registrar/state_store_ms/p90",
- "registrar/state_store_ms/p95",
- "registrar/state_store_ms/p99",
- "registrar/state_store_ms/p999",
- "registrar/state_store_ms/p9999",
- }
+// slave metrics that will be returned by generateMetrics()
+var slaveMetricNames []string = []string{
+ // resources
+ "slave/cpus_percent",
+ "slave/cpus_used",
+ "slave/cpus_total",
+ "slave/cpus_revocable_percent",
+ "slave/cpus_revocable_total",
+ "slave/cpus_revocable_used",
+ "slave/disk_percent",
+ "slave/disk_used",
+ "slave/disk_total",
+ "slave/disk_revocable_percent",
+ "slave/disk_revocable_total",
+ "slave/disk_revocable_used",
+ "slave/gpus_percent",
+ "slave/gpus_used",
+ "slave/gpus_total",
+ "slave/gpus_revocable_percent",
+ "slave/gpus_revocable_total",
+ "slave/gpus_revocable_used",
+ "slave/mem_percent",
+ "slave/mem_used",
+ "slave/mem_total",
+ "slave/mem_revocable_percent",
+ "slave/mem_revocable_total",
+ "slave/mem_revocable_used",
+ // agent
+ "slave/registered",
+ "slave/uptime_secs",
+ // system
+ "system/cpus_total",
+ "system/load_15min",
+ "system/load_5min",
+ "system/load_1min",
+ "system/mem_free_bytes",
+ "system/mem_total_bytes",
+ // executors
+ "containerizer/mesos/container_destroy_errors",
+ "slave/container_launch_errors",
+ "slave/executors_preempted",
+ "slave/frameworks_active",
+ "slave/executor_directory_max_allowed_age_secs",
+ "slave/executors_registering",
+ "slave/executors_running",
+ "slave/executors_terminated",
+ "slave/executors_terminating",
+ "slave/recovery_errors",
+ // tasks
+ "slave/tasks_failed",
+ "slave/tasks_finished",
+ "slave/tasks_killed",
+ "slave/tasks_lost",
+ "slave/tasks_running",
+ "slave/tasks_staging",
+ "slave/tasks_starting",
+ // messages
+ "slave/invalid_framework_messages",
+ "slave/invalid_status_updates",
+ "slave/valid_framework_messages",
+ "slave/valid_status_updates",
+}
- for _, k := range metricNames {
+func generateMetrics() {
+ masterMetrics = make(map[string]interface{})
+ for _, k := range masterMetricNames {
masterMetrics[k] = rand.Float64()
}
slaveMetrics = make(map[string]interface{})
-
- metricNames = []string{
- // resources
- "slave/cpus_percent",
- "slave/cpus_used",
- "slave/cpus_total",
- "slave/cpus_revocable_percent",
- "slave/cpus_revocable_total",
- "slave/cpus_revocable_used",
- "slave/disk_percent",
- "slave/disk_used",
- "slave/disk_total",
- "slave/disk_revocable_percent",
- "slave/disk_revocable_total",
- "slave/disk_revocable_used",
- "slave/gpus_percent",
- "slave/gpus_used",
- "slave/gpus_total",
- "slave/gpus_revocable_percent",
- "slave/gpus_revocable_total",
- "slave/gpus_revocable_used",
- "slave/mem_percent",
- "slave/mem_used",
- "slave/mem_total",
- "slave/mem_revocable_percent",
- "slave/mem_revocable_total",
- "slave/mem_revocable_used",
- // agent
- "slave/registered",
- "slave/uptime_secs",
- // system
- "system/cpus_total",
- "system/load_15min",
- "system/load_5min",
- "system/load_1min",
- "system/mem_free_bytes",
- "system/mem_total_bytes",
- // executors
- "containerizer/mesos/container_destroy_errors",
- "slave/container_launch_errors",
- "slave/executors_preempted",
- "slave/frameworks_active",
- "slave/executor_directory_max_allowed_age_secs",
- "slave/executors_registering",
- "slave/executors_running",
- "slave/executors_terminated",
- "slave/executors_terminating",
- "slave/recovery_errors",
- // tasks
- "slave/tasks_failed",
- "slave/tasks_finished",
- "slave/tasks_killed",
- "slave/tasks_lost",
- "slave/tasks_running",
- "slave/tasks_staging",
- "slave/tasks_starting",
- // messages
- "slave/invalid_framework_messages",
- "slave/invalid_status_updates",
- "slave/valid_framework_messages",
- "slave/valid_status_updates",
- }
-
- for _, k := range metricNames {
+ for _, k := range slaveMetricNames {
slaveMetrics[k] = rand.Float64()
}
@@ -280,6 +349,7 @@ func TestMesosMaster(t *testing.T) {
var acc testutil.Accumulator
m := Mesos{
+ Log: testutil.Logger{},
Masters: []string{masterTestServer.Listener.Addr().String()},
Timeout: 10,
}
@@ -295,8 +365,9 @@ func TestMesosMaster(t *testing.T) {
func TestMasterFilter(t *testing.T) {
m := Mesos{
+ Log: testutil.Logger{},
MasterCols: []string{
- "resources", "master", "registrar",
+ "resources", "master", "registrar", "allocator",
},
}
b := []string{
@@ -306,6 +377,26 @@ func TestMasterFilter(t *testing.T) {
m.filterMetrics(MASTER, &masterMetrics)
+ // Assert expected metrics are present.
+ for _, v := range m.MasterCols {
+ for _, x := range getMetrics(MASTER, v) {
+ if _, ok := masterMetrics[x]; !ok {
+ t.Errorf("Didn't find key %s, it should present.", x)
+ }
+ }
+ }
+ // m.MasterCols includes "allocator", so allocator metrics should be present.
+ // allocator metrics have unpredictable names, so we can't rely on the list of metrics returned from
+ // getMetrics(). We have to find them by checking name prefixes.
+ for _, x := range masterMetricNames {
+ if strings.HasPrefix(x, "allocator/") {
+ if _, ok := masterMetrics[x]; !ok {
+ t.Errorf("Didn't find key %s, it should be present.", x)
+ }
+ }
+ }
+
+ // Assert unexpected metrics are not present.
for _, v := range b {
for _, x := range getMetrics(MASTER, v) {
if _, ok := masterMetrics[x]; ok {
@@ -313,11 +404,12 @@ func TestMasterFilter(t *testing.T) {
}
}
}
- for _, v := range m.MasterCols {
- for _, x := range getMetrics(MASTER, v) {
- if _, ok := masterMetrics[x]; !ok {
- t.Errorf("Didn't find key %s, it should present.", x)
- }
+ // m.MasterCols does not include "framework_offers", so framework_offers metrics should not be present.
+ // framework_offers metrics have unpredictable names, so we can't rely on the list of metrics returned from
+ // getMetrics(). We have to find them by checking name prefixes.
+ for k := range masterMetrics {
+ if strings.HasPrefix(k, "master/frameworks/") || strings.HasPrefix(k, "frameworks/") {
+ t.Errorf("Found key %s, it should be gone.", k)
}
}
}
@@ -326,6 +418,7 @@ func TestMesosSlave(t *testing.T) {
var acc testutil.Accumulator
m := Mesos{
+ Log: testutil.Logger{},
Masters: []string{},
Slaves: []string{slaveTestServer.Listener.Addr().String()},
// SlaveTasks: true,
@@ -339,22 +432,11 @@ func TestMesosSlave(t *testing.T) {
}
acc.AssertContainsFields(t, "mesos", slaveMetrics)
-
- // expectedFields := make(map[string]interface{}, len(slaveTaskMetrics["statistics"].(map[string]interface{}))+1)
- // for k, v := range slaveTaskMetrics["statistics"].(map[string]interface{}) {
- // expectedFields[k] = v
- // }
- // expectedFields["executor_id"] = slaveTaskMetrics["executor_id"]
-
- // acc.AssertContainsTaggedFields(
- // t,
- // "mesos_tasks",
- // expectedFields,
- // map[string]string{"server": "127.0.0.1", "framework_id": slaveTaskMetrics["framework_id"].(string)})
}
func TestSlaveFilter(t *testing.T) {
m := Mesos{
+ Log: testutil.Logger{},
SlaveCols: []string{
"resources", "agent", "tasks",
},
diff --git a/plugins/inputs/minecraft/README.md b/plugins/inputs/minecraft/README.md
index 726f9a29eedce..026c9e3b3fb99 100644
--- a/plugins/inputs/minecraft/README.md
+++ b/plugins/inputs/minecraft/README.md
@@ -1,66 +1,84 @@
-# Minecraft Plugin
+# Minecraft Input Plugin
-This plugin uses the RCON protocol to collect [statistics](http://minecraft.gamepedia.com/Statistics) from a [scoreboard](http://minecraft.gamepedia.com/Scoreboard) on a
-Minecraft server.
+The `minecraft` plugin connects to a Minecraft server using the RCON protocol
+to collects scores from the server [scoreboard][].
-To enable [RCON](http://wiki.vg/RCON) on the minecraft server, add this to your server configuration in the `server.properties` file:
+This plugin is known to support Minecraft Java Edition versions 1.11 - 1.14.
+When using an version of Minecraft earlier than 1.13, be aware that the values
+for some criterion has changed and may need to be modified.
-```
+#### Server Setup
+
+Enable [RCON][] on the Minecraft server, add this to your server configuration
+in the [server.properties][] file:
+
+```conf
enable-rcon=true
rcon.password=
rcon.port=<1-65535>
```
-To create a new scoreboard objective called `jump` on a minecraft server tracking the `stat.jump` criteria, run this command
-in the Minecraft console:
-
-`/scoreboard objectives add jump stat.jump`
-
-Stats are collected with the following RCON command, issued by the plugin:
+Scoreboard [Objectives][] must be added using the server console for the
+plugin to collect. These can be added in game by players with op status,
+from the server console, or over an RCON connection.
-`/scoreboard players list *`
+When getting started pick an easy to test objective. This command will add an
+objective that counts the number of times a player has jumped:
+```
+/scoreboard objectives add jumps minecraft.custom:minecraft.jump
+```
-### Configuration:
+Once a player has triggered the event they will be added to the scoreboard,
+you can then list all players with recorded scores:
```
-[[inputs.minecraft]]
- # server address for minecraft
- server = "localhost"
- # port for RCON
- port = "25575"
- # password RCON for mincraft server
- password = "replace_me"
+/scoreboard players list
```
-### Measurements & Fields:
+View the current scores with a command, substituting your player name:
+```
+/scoreboard players list Etho
+```
-*This plugin uses only one measurement, titled* `minecraft`
+### Configuration
-- The field name is the scoreboard objective name.
-- The field value is the count of the scoreboard objective
+```toml
+[[inputs.minecraft]]
+ ## Address of the Minecraft server.
+ # server = "localhost"
-- `minecraft`
- - `` (integer, count)
+ ## Server RCON Port.
+ # port = "25575"
-### Tags:
+ ## Server RCON Password.
+ password = ""
+```
-- The `minecraft` measurement:
- - `server`: the Minecraft RCON server
- - `player`: the Minecraft player
+### Metrics
+- minecraft
+ - tags:
+ - player
+ - port (port of the server)
+ - server (hostname:port, deprecated in 1.11; use `source` and `port` tags)
+ - source (hostname of the server)
+ - fields:
+ - `` (integer, count)
### Sample Queries:
Get the number of jumps per player in the last hour:
-```
-SELECT SPREAD("jump") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player"
+```sql
+SELECT SPREAD("jumps") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player"
```
### Example Output:
-
```
-$ telegraf --input-filter minecraft --test
-* Plugin: inputs.minecraft, Collection 1
-> minecraft,player=notch,server=127.0.0.1:25575 jumps=178i 1498261397000000000
-> minecraft,player=dinnerbone,server=127.0.0.1:25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000
-> minecraft,player=jeb,server=127.0.0.1:25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000
+minecraft,player=notch,source=127.0.0.1,port=25575 jumps=178i 1498261397000000000
+minecraft,player=dinnerbone,source=127.0.0.1,port=25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000
+minecraft,player=jeb,source=127.0.0.1,port=25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000
```
+
+[server.properties]: https://minecraft.gamepedia.com/Server.properties
+[scoreboard]: http://minecraft.gamepedia.com/Scoreboard
+[objectives]: https://minecraft.gamepedia.com/Scoreboard#Objectives
+[rcon]: http://wiki.vg/RCON
diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go
new file mode 100644
index 0000000000000..30f56213af345
--- /dev/null
+++ b/plugins/inputs/minecraft/client.go
@@ -0,0 +1,205 @@
+package minecraft
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon"
+)
+
+var (
+ scoreboardRegexLegacy = regexp.MustCompile(`(?U):\s(?P\d+)\s\((?P.*)\)`)
+ scoreboardRegex = regexp.MustCompile(`\[(?P[^\]]+)\]: (?P\d+)`)
+)
+
+// Connection is an established connection to the Minecraft server.
+type Connection interface {
+ // Execute runs a command.
+ Execute(command string) (string, error)
+}
+
+// Connector is used to create connections to the Minecraft server.
+type Connector interface {
+ // Connect establishes a connection to the server.
+ Connect() (Connection, error)
+}
+
+func NewConnector(hostname, port, password string) (*connector, error) {
+ return &connector{
+ hostname: hostname,
+ port: port,
+ password: password,
+ }, nil
+}
+
+type connector struct {
+ hostname string
+ port string
+ password string
+}
+
+func (c *connector) Connect() (Connection, error) {
+ p, err := strconv.Atoi(c.port)
+ if err != nil {
+ return nil, err
+ }
+
+ rcon, err := rcon.NewClient(c.hostname, p)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = rcon.Authorize(c.password)
+ if err != nil {
+ return nil, err
+ }
+
+ return &connection{rcon: rcon}, nil
+}
+
+func NewClient(connector Connector) (*client, error) {
+ return &client{connector: connector}, nil
+}
+
+type client struct {
+ connector Connector
+ conn Connection
+}
+
+func (c *client) Connect() error {
+ conn, err := c.connector.Connect()
+ if err != nil {
+ return err
+ }
+ c.conn = conn
+ return nil
+}
+
+func (c *client) Players() ([]string, error) {
+ if c.conn == nil {
+ err := c.Connect()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ resp, err := c.conn.Execute("scoreboard players list")
+ if err != nil {
+ c.conn = nil
+ return nil, err
+ }
+
+ players, err := parsePlayers(resp)
+ if err != nil {
+ c.conn = nil
+ return nil, err
+ }
+
+ return players, nil
+}
+
+func (c *client) Scores(player string) ([]Score, error) {
+ if c.conn == nil {
+ err := c.Connect()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ resp, err := c.conn.Execute("scoreboard players list " + player)
+ if err != nil {
+ c.conn = nil
+ return nil, err
+ }
+
+ scores, err := parseScores(resp)
+ if err != nil {
+ c.conn = nil
+ return nil, err
+ }
+
+ return scores, nil
+}
+
+type connection struct {
+ rcon *rcon.Client
+}
+
+func (c *connection) Execute(command string) (string, error) {
+ packet, err := c.rcon.Execute(command)
+ if err != nil {
+ return "", err
+ }
+ return packet.Body, nil
+}
+
+func parsePlayers(input string) ([]string, error) {
+ parts := strings.SplitAfterN(input, ":", 2)
+ if len(parts) != 2 {
+ return []string{}, nil
+ }
+
+ names := strings.Split(parts[1], ",")
+
+ // Detect Minecraft <= 1.12
+ if strings.Contains(parts[0], "players on the scoreboard") && len(names) > 0 {
+ // Split the last two player names: ex: "notch and dinnerbone"
+ head := names[:len(names)-1]
+ tail := names[len(names)-1]
+ names = append(head, strings.SplitN(tail, " and ", 2)...)
+ }
+
+ var players []string
+ for _, name := range names {
+ name := strings.TrimSpace(name)
+ if name == "" {
+ continue
+ }
+ players = append(players, name)
+
+ }
+ return players, nil
+}
+
+// Score is an individual tracked scoreboard stat.
+type Score struct {
+ Name string
+ Value int64
+}
+
+func parseScores(input string) ([]Score, error) {
+ if strings.Contains(input, "has no scores") {
+ return []Score{}, nil
+ }
+
+ // Detect Minecraft <= 1.12
+ var re *regexp.Regexp
+ if strings.Contains(input, "tracked objective") {
+ re = scoreboardRegexLegacy
+ } else {
+ re = scoreboardRegex
+ }
+
+ var scores []Score
+ matches := re.FindAllStringSubmatch(input, -1)
+ for _, match := range matches {
+ score := Score{}
+ for i, subexp := range re.SubexpNames() {
+ switch subexp {
+ case "name":
+ score.Name = match[i]
+ case "value":
+ value, err := strconv.ParseInt(match[i], 10, 64)
+ if err != nil {
+ continue
+ }
+ score.Value = value
+ default:
+ continue
+ }
+ }
+ scores = append(scores, score)
+ }
+ return scores, nil
+}
diff --git a/plugins/inputs/minecraft/client_test.go b/plugins/inputs/minecraft/client_test.go
new file mode 100644
index 0000000000000..767a0c30ef5d3
--- /dev/null
+++ b/plugins/inputs/minecraft/client_test.go
@@ -0,0 +1,195 @@
+package minecraft
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+type MockConnection struct {
+ commands map[string]string
+}
+
+func (c *MockConnection) Execute(command string) (string, error) {
+ return c.commands[command], nil
+}
+
+type MockConnector struct {
+ conn *MockConnection
+}
+
+func (c *MockConnector) Connect() (Connection, error) {
+ return c.conn, nil
+}
+
+func TestClient_Player(t *testing.T) {
+ tests := []struct {
+ name string
+ commands map[string]string
+ expected []string
+ }{
+ {
+ name: "minecraft 1.12 no players",
+ commands: map[string]string{
+ "scoreboard players list": "There are no tracked players on the scoreboard",
+ },
+ expected: []string{},
+ },
+ {
+ name: "minecraft 1.12 single player",
+ commands: map[string]string{
+ "scoreboard players list": "Showing 1 tracked players on the scoreboard:Etho",
+ },
+ expected: []string{"Etho"},
+ },
+ {
+ name: "minecraft 1.12 two players",
+ commands: map[string]string{
+ "scoreboard players list": "Showing 2 tracked players on the scoreboard:Etho and torham",
+ },
+ expected: []string{"Etho", "torham"},
+ },
+ {
+ name: "minecraft 1.12 three players",
+ commands: map[string]string{
+ "scoreboard players list": "Showing 3 tracked players on the scoreboard:Etho, notch and torham",
+ },
+ expected: []string{"Etho", "notch", "torham"},
+ },
+ {
+ name: "minecraft 1.12 players space in username",
+ commands: map[string]string{
+ "scoreboard players list": "Showing 4 tracked players on the scoreboard:with space, Etho, notch and torham",
+ },
+ expected: []string{"with space", "Etho", "notch", "torham"},
+ },
+ {
+ name: "minecraft 1.12 players and in username",
+ commands: map[string]string{
+ "scoreboard players list": "Showing 5 tracked players on the scoreboard:left and right, with space,Etho, notch and torham",
+ },
+ expected: []string{"left and right", "with space", "Etho", "notch", "torham"},
+ },
+ {
+ name: "minecraft 1.13 no players",
+ commands: map[string]string{
+ "scoreboard players list": "There are no tracked entities",
+ },
+ expected: []string{},
+ },
+ {
+ name: "minecraft 1.13 single player",
+ commands: map[string]string{
+ "scoreboard players list": "There are 1 tracked entities: torham",
+ },
+ expected: []string{"torham"},
+ },
+ {
+ name: "minecraft 1.13 multiple player",
+ commands: map[string]string{
+ "scoreboard players list": "There are 3 tracked entities: Etho, notch, torham",
+ },
+ expected: []string{"Etho", "notch", "torham"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ connector := &MockConnector{
+ conn: &MockConnection{commands: tt.commands},
+ }
+
+ client, err := NewClient(connector)
+ require.NoError(t, err)
+
+ actual, err := client.Players()
+ require.NoError(t, err)
+
+ require.Equal(t, tt.expected, actual)
+ })
+ }
+}
+
+func TestClient_Scores(t *testing.T) {
+ tests := []struct {
+ name string
+ player string
+ commands map[string]string
+ expected []Score
+ }{
+ {
+ name: "minecraft 1.12 player with no scores",
+ player: "Etho",
+ commands: map[string]string{
+ "scoreboard players list Etho": "Player Etho has no scores recorded",
+ },
+ expected: []Score{},
+ },
+ {
+ name: "minecraft 1.12 player with one score",
+ player: "Etho",
+ commands: map[string]string{
+ "scoreboard players list Etho": "Showing 1 tracked objective(s) for Etho:- jump: 2 (jump)",
+ },
+ expected: []Score{
+ {Name: "jump", Value: 2},
+ },
+ },
+ {
+ name: "minecraft 1.12 player with many scores",
+ player: "Etho",
+ commands: map[string]string{
+ "scoreboard players list Etho": "Showing 3 tracked objective(s) for Etho:- hopper: 2 (hopper)- dropper: 2 (dropper)- redstone: 1 (redstone)",
+ },
+ expected: []Score{
+ {Name: "hopper", Value: 2},
+ {Name: "dropper", Value: 2},
+ {Name: "redstone", Value: 1},
+ },
+ },
+ {
+ name: "minecraft 1.13 player with no scores",
+ player: "Etho",
+ commands: map[string]string{
+ "scoreboard players list Etho": "Etho has no scores to show",
+ },
+ expected: []Score{},
+ },
+ {
+ name: "minecraft 1.13 player with one score",
+ player: "Etho",
+ commands: map[string]string{
+ "scoreboard players list Etho": "Etho has 1 scores:[jumps]: 1",
+ },
+ expected: []Score{
+ {Name: "jumps", Value: 1},
+ },
+ },
+ {
+ name: "minecraft 1.13 player with many scores",
+ player: "Etho",
+ commands: map[string]string{
+ "scoreboard players list Etho": "Etho has 3 scores:[hopper]: 2[dropper]: 2[redstone]: 1",
+ },
+ expected: []Score{
+ {Name: "hopper", Value: 2},
+ {Name: "dropper", Value: 2},
+ {Name: "redstone", Value: 1},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ connector := &MockConnector{
+ conn: &MockConnection{commands: tt.commands},
+ }
+
+ client, err := NewClient(connector)
+ require.NoError(t, err)
+
+ actual, err := client.Scores(tt.player)
+ require.NoError(t, err)
+
+ require.Equal(t, tt.expected, actual)
+ })
+ }
+}
diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go
index a57d75629d269..f9e49e6e62d4e 100644
--- a/plugins/inputs/minecraft/internal/rcon/rcon.go
+++ b/plugins/inputs/minecraft/internal/rcon/rcon.go
@@ -32,8 +32,8 @@ const (
// Rcon package errors.
var (
- ErrInvalidWrite = errors.New("Failed to write the payload corretly to remote connection.")
- ErrInvalidRead = errors.New("Failed to read the response corretly from remote connection.")
+ ErrInvalidWrite = errors.New("Failed to write the payload correctly to remote connection.")
+ ErrInvalidRead = errors.New("Failed to read the response correctly from remote connection.")
ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.")
ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.")
ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.")
@@ -57,7 +57,7 @@ type Packet struct {
Body string // Body of packet.
}
-// Compile converts a packets header and body into its approriate
+// Compile converts a packets header and body into its appropriate
// byte array payload, returning an error if the binary packages
// Write method fails to write the header bytes in their little
// endian byte order.
@@ -112,7 +112,7 @@ func (c *Client) Execute(command string) (response *Packet, err error) {
// Sends accepts the commands type and its string to execute to the clients server,
// creating a packet with a random challenge id for the server to mirror,
-// and compiling its payload bytes in the appropriate order. The resonse is
+// and compiling its payload bytes in the appropriate order. The response is
// decompiled from its bytes into a Packet type for return. An error is returned
// if send fails.
func (c *Client) Send(typ int32, command string) (response *Packet, err error) {
@@ -152,7 +152,7 @@ func (c *Client) Send(typ int32, command string) (response *Packet, err error) {
}
if packet.Header.Type == Auth && header.Type == ResponseValue {
- // Discard, empty SERVERDATA_RESPOSE_VALUE from authorization.
+ // Discard, empty SERVERDATA_RESPONSE_VALUE from authorization.
c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize)))
// Reread the packet header.
diff --git a/plugins/inputs/minecraft/minecraft.go b/plugins/inputs/minecraft/minecraft.go
index 6debbd25bda82..0de79d94a3c77 100644
--- a/plugins/inputs/minecraft/minecraft.go
+++ b/plugins/inputs/minecraft/minecraft.go
@@ -1,95 +1,89 @@
package minecraft
import (
- "fmt"
- "regexp"
- "strconv"
-
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
const sampleConfig = `
- ## server address for minecraft
+ ## Address of the Minecraft server.
# server = "localhost"
- ## port for RCON
+
+ ## Server RCON Port.
# port = "25575"
- ## password RCON for mincraft server
- # password = ""
-`
-var (
- playerNameRegex = regexp.MustCompile(`for\s([^:]+):-`)
- scoreboardRegex = regexp.MustCompile(`(?U):\s(\d+)\s\((.*)\)`)
-)
+ ## Server RCON Password.
+ password = ""
-// Client is an interface for a client which gathers data from a minecraft server
+ ## Uncomment to remove deprecated metric components.
+ # tagdrop = ["server"]
+`
+
+// Client is a client for the Minecraft server.
type Client interface {
- Gather(producer RCONClientProducer) ([]string, error)
+ // Connect establishes a connection to the server.
+ Connect() error
+
+ // Players returns the players on the scoreboard.
+ Players() ([]string, error)
+
+ // Scores return the objective scores for a player.
+ Scores(player string) ([]Score, error)
}
-// Minecraft represents a connection to a minecraft server
+// Minecraft is the plugin type.
type Minecraft struct {
- Server string
- Port string
- Password string
- client Client
- clientSet bool
+ Server string `toml:"server"`
+ Port string `toml:"port"`
+ Password string `toml:"password"`
+
+ client Client
}
-// Description gives a brief description.
func (s *Minecraft) Description() string {
- return "Collects scores from a minecraft server's scoreboard using the RCON protocol"
+ return "Collects scores from a Minecraft server's scoreboard using the RCON protocol"
}
-// SampleConfig returns our sampleConfig.
func (s *Minecraft) SampleConfig() string {
return sampleConfig
}
-// Gather uses the RCON protocol to collect player and
-// scoreboard stats from a minecraft server.
-//var hasClient bool = false
func (s *Minecraft) Gather(acc telegraf.Accumulator) error {
- // can't simply compare s.client to nil, because comparing an interface
- // to nil often does not produce the desired result
- if !s.clientSet {
- var err error
- s.client, err = NewRCON(s.Server, s.Port, s.Password)
+ if s.client == nil {
+ connector, err := NewConnector(s.Server, s.Port, s.Password)
+ if err != nil {
+ return err
+ }
+
+ client, err := NewClient(connector)
if err != nil {
return err
}
- s.clientSet = true
- }
- // (*RCON).Gather() takes an RCONClientProducer for testing purposes
- d := defaultClientProducer{
- Server: s.Server,
- Port: s.Port,
+ s.client = client
}
- scores, err := s.client.Gather(d)
+ players, err := s.client.Players()
if err != nil {
return err
}
- for _, score := range scores {
- player, err := ParsePlayerName(score)
+ for _, player := range players {
+ scores, err := s.client.Scores(player)
if err != nil {
return err
}
+
tags := map[string]string{
"player": player,
"server": s.Server + ":" + s.Port,
+ "source": s.Server,
+ "port": s.Port,
}
- stats, err := ParseScoreboard(score)
- if err != nil {
- return err
- }
- var fields = make(map[string]interface{}, len(stats))
- for _, stat := range stats {
- fields[stat.Name] = stat.Value
+ var fields = make(map[string]interface{}, len(scores))
+ for _, score := range scores {
+ fields[score.Name] = score.Value
}
acc.AddFields("minecraft", fields, tags)
@@ -98,51 +92,6 @@ func (s *Minecraft) Gather(acc telegraf.Accumulator) error {
return nil
}
-// ParsePlayerName takes an input string from rcon, to parse
-// the player.
-func ParsePlayerName(input string) (string, error) {
- playerMatches := playerNameRegex.FindAllStringSubmatch(input, -1)
- if playerMatches == nil {
- return "", fmt.Errorf("no player was matched")
- }
- return playerMatches[0][1], nil
-}
-
-// Score is an individual tracked scoreboard stat.
-type Score struct {
- Name string
- Value int
-}
-
-// ParseScoreboard takes an input string from rcon, to parse
-// scoreboard stats.
-func ParseScoreboard(input string) ([]Score, error) {
- scoreMatches := scoreboardRegex.FindAllStringSubmatch(input, -1)
- if scoreMatches == nil {
- return nil, fmt.Errorf("No scores found")
- }
-
- var scores []Score
-
- for _, match := range scoreMatches {
- number := match[1]
- name := match[2]
- n, err := strconv.Atoi(number)
- // Not necessary in current state, because regex can only match integers,
- // maybe become necessary if regex is modified to match more types of
- // numbers
- if err != nil {
- return nil, fmt.Errorf("Failed to parse score")
- }
- s := Score{
- Name: name,
- Value: n,
- }
- scores = append(scores, s)
- }
- return scores, nil
-}
-
func init() {
inputs.Add("minecraft", func() telegraf.Input {
return &Minecraft{
diff --git a/plugins/inputs/minecraft/minecraft_test.go b/plugins/inputs/minecraft/minecraft_test.go
index c0a9e6cf53a43..487f7d58ab988 100644
--- a/plugins/inputs/minecraft/minecraft_test.go
+++ b/plugins/inputs/minecraft/minecraft_test.go
@@ -1,234 +1,124 @@
package minecraft
import (
- "fmt"
- "reflect"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
)
-// TestParsePlayerName tests different Minecraft RCON inputs for players
-func TestParsePlayerName(t *testing.T) {
- // Test a valid input string to ensure player is extracted
- input := "1 tracked objective(s) for divislight:- jumps: 178 (jumps)"
- got, err := ParsePlayerName(input)
- want := "divislight"
- if err != nil {
- t.Fatalf("player returned error. Error: %s\n", err)
- }
- if got != want {
- t.Errorf("got %s\nwant %s\n", got, want)
- }
-
- // Test an invalid input string to ensure error is returned
- input = ""
- got, err = ParsePlayerName(input)
- want = ""
- if err == nil {
- t.Fatal("Expected error when player not present. No error found.")
- }
- if got != want {
- t.Errorf("got %s\n want %s\n", got, want)
- }
-
- // Test an invalid input string to ensure error is returned
- input = "1 tracked objective(s) for 😂:- jumps: 178 (jumps)"
- got, err = ParsePlayerName(input)
- want = "😂"
- if err != nil {
- t.Fatalf("player returned error. Error: %s\n", err)
- }
- if got != want {
- t.Errorf("got %s\n want %s\n", got, want)
- }
+type MockClient struct {
+ ConnectF func() error
+ PlayersF func() ([]string, error)
+ ScoresF func(player string) ([]Score, error)
}
-// TestParseScoreboard tests different Minecraft RCON inputs for scoreboard stats.
-func TestParseScoreboard(t *testing.T) {
- // test a valid input string to ensure stats are parsed correctly.
- input := `1 tracked objective(s) for divislight:- jumps: 178 (jumps)- sword: 5 (sword)`
- got, err := ParseScoreboard(input)
- if err != nil {
- t.Fatal("Unexpected error")
- }
-
- want := []Score{
- {
- Name: "jumps",
- Value: 178,
- },
- {
- Name: "sword",
- Value: 5,
- },
- }
-
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Got: \n%#v\nWant: %#v", got, want)
- }
-
- // Tests a partial input string.
- input = `1 tracked objective(s) for divislight:- jumps: (jumps)- sword: 5 (sword)`
- got, err = ParseScoreboard(input)
-
- if err != nil {
- t.Fatal("Unexpected error")
- }
-
- want = []Score{
- {
- Name: "sword",
- Value: 5,
- },
- }
-
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Got: \n%#v\nWant:\n%#v", got, want)
- }
-
- // Tests an empty string.
- input = ``
- _, err = ParseScoreboard(input)
- if err == nil {
- t.Fatal("Expected input error, but error was nil")
- }
-
- // Tests when a number isn't an integer.
- input = `1 tracked objective(s) for divislight:- jumps: 178.5 (jumps)- sword: 5 (sword)`
- got, err = ParseScoreboard(input)
- if err != nil {
- t.Fatal("Unexpected error")
- }
-
- want = []Score{
- {
- Name: "sword",
- Value: 5,
- },
- }
+func (c *MockClient) Connect() error {
+ return c.ConnectF()
+}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Got: \n%#v\nWant: %#v", got, want)
- }
+func (c *MockClient) Players() ([]string, error) {
+ return c.PlayersF()
+}
- //Testing a real life data scenario with unicode characters
- input = `7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)`
- got, err = ParseScoreboard(input)
- if err != nil {
- t.Fatal("Unexpected error")
- }
+func (c *MockClient) Scores(player string) ([]Score, error) {
+ return c.ScoresF(player)
+}
- want = []Score{
- {
- Name: "total_kills",
- Value: 39,
- },
- {
- Name: "dalevel",
- Value: 37,
- },
- {
- Name: "lvl",
- Value: 37,
- },
- {
- Name: "jumps",
- Value: 1290,
- },
+func TestGather(t *testing.T) {
+ now := time.Unix(0, 0)
+
+ tests := []struct {
+ name string
+ client *MockClient
+ metrics []telegraf.Metric
+ err error
+ }{
{
- Name: "iron_pickaxe",
- Value: 284,
+ name: "no players",
+ client: &MockClient{
+ ConnectF: func() error {
+ return nil
+ },
+ PlayersF: func() ([]string, error) {
+ return []string{}, nil
+ },
+ },
+ metrics: []telegraf.Metric{},
},
{
- Name: "cow_kills",
- Value: 1,
+ name: "one player without scores",
+ client: &MockClient{
+ ConnectF: func() error {
+ return nil
+ },
+ PlayersF: func() ([]string, error) {
+ return []string{"Etho"}, nil
+ },
+ ScoresF: func(player string) ([]Score, error) {
+ switch player {
+ case "Etho":
+ return []Score{}, nil
+ default:
+ panic("unknown player")
+ }
+ },
+ },
+ metrics: []telegraf.Metric{},
},
{
- Name: "😂",
- Value: 37,
- },
- }
-
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Got: \n%#v\nWant: %#v", got, want)
- }
-
-}
-
-type MockClient struct {
- Result []string
- Err error
-}
-
-func (m *MockClient) Gather(d RCONClientProducer) ([]string, error) {
- return m.Result, m.Err
-}
-
-func TestGather(t *testing.T) {
- var acc testutil.Accumulator
- testConfig := Minecraft{
- Server: "biffsgang.net",
- Port: "25575",
- client: &MockClient{
- Result: []string{
- `1 tracked objective(s) for divislight:- jumps: 178 (jumps)`,
- `7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)`,
- `5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`,
+ name: "one player with scores",
+ client: &MockClient{
+ ConnectF: func() error {
+ return nil
+ },
+ PlayersF: func() ([]string, error) {
+ return []string{"Etho"}, nil
+ },
+ ScoresF: func(player string) ([]Score, error) {
+ switch player {
+ case "Etho":
+ return []Score{{Name: "jumps", Value: 42}}, nil
+ default:
+ panic("unknown player")
+ }
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "minecraft",
+ map[string]string{
+ "player": "Etho",
+ "server": "example.org:25575",
+ "source": "example.org",
+ "port": "25575",
+ },
+ map[string]interface{}{
+ "jumps": 42,
+ },
+ now,
+ ),
},
- Err: nil,
},
- clientSet: true,
- }
-
- err := testConfig.Gather(&acc)
-
- if err != nil {
- t.Fatalf("gather returned error. Error: %s\n", err)
}
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ plugin := &Minecraft{
+ Server: "example.org",
+ Port: "25575",
+ Password: "xyzzy",
+ client: tt.client,
+ }
- if !testConfig.clientSet {
- t.Fatalf("clientSet should be true, client should be set")
- }
+ var acc testutil.Accumulator
+ acc.TimeFunc = func() time.Time { return now }
- tags := map[string]string{
- "player": "divislight",
- "server": "biffsgang.net:25575",
- }
+ err := plugin.Gather(&acc)
- assertContainsTaggedStat(t, &acc, "minecraft", "jumps", 178, tags)
- tags["player"] = "mauxlaim"
- assertContainsTaggedStat(t, &acc, "minecraft", "cow_kills", 1, tags)
- tags["player"] = "torham"
- assertContainsTaggedStat(t, &acc, "minecraft", "total_kills", 29, tags)
-
-}
-
-func assertContainsTaggedStat(
- t *testing.T,
- acc *testutil.Accumulator,
- measurement string,
- field string,
- expectedValue int,
- tags map[string]string,
-) {
- var actualValue int
- for _, pt := range acc.Metrics {
- if pt.Measurement == measurement && reflect.DeepEqual(pt.Tags, tags) {
- for fieldname, value := range pt.Fields {
- if fieldname == field {
- actualValue = value.(int)
- if value == expectedValue {
- return
- }
- t.Errorf("Expected value %d\n got value %d\n", expectedValue, value)
- }
- }
- }
+ require.Equal(t, tt.err, err)
+ testutil.RequireMetricsEqual(t, tt.metrics, acc.GetTelegrafMetrics())
+ })
}
- msg := fmt.Sprintf(
- "Could not find measurement \"%s\" with requested tags within %s, Actual: %d",
- measurement, field, actualValue)
- t.Fatal(msg)
-
}
diff --git a/plugins/inputs/minecraft/rcon.go b/plugins/inputs/minecraft/rcon.go
deleted file mode 100644
index f42fc8ba48e82..0000000000000
--- a/plugins/inputs/minecraft/rcon.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package minecraft
-
-import (
- "strconv"
- "strings"
-
- "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon"
-)
-
-const (
- // NoMatches is a sentinel value returned when there are no statistics defined on the
- //minecraft server
- NoMatches = `All matches failed`
- // ScoreboardPlayerList is the command to see all player statistics
- ScoreboardPlayerList = `scoreboard players list *`
-)
-
-// RCONClient is a representation of RCON command authorizaiton and exectution
-type RCONClient interface {
- Authorize(password string) (*rcon.Packet, error)
- Execute(command string) (*rcon.Packet, error)
-}
-
-// RCON represents a RCON server connection
-type RCON struct {
- Server string
- Port string
- Password string
- client RCONClient
-}
-
-// RCONClientProducer is an interface which defines how a new client will be
-// produced in the event of a network disconnect. It exists mainly for
-// testing purposes
-type RCONClientProducer interface {
- newClient() (RCONClient, error)
-}
-
-type defaultClientProducer struct {
- Server string
- Port string
-}
-
-func (d defaultClientProducer) newClient() (RCONClient, error) {
- return newClient(d.Server, d.Port)
-}
-
-// NewRCON creates a new RCON
-func NewRCON(server, port, password string) (*RCON, error) {
- client, err := newClient(server, port)
- if err != nil {
- return nil, err
- }
-
- return &RCON{
- Server: server,
- Port: port,
- Password: password,
- client: client,
- }, nil
-}
-
-func newClient(server, port string) (*rcon.Client, error) {
- p, err := strconv.Atoi(port)
- if err != nil {
- return nil, err
- }
-
- client, err := rcon.NewClient(server, p)
-
- // If we've encountered any error, the contents of `client` could be corrupted,
- // so we must return nil, err
- if err != nil {
- return nil, err
- }
- return client, nil
-}
-
-// Gather receives all player scoreboard information and returns it per player.
-func (r *RCON) Gather(producer RCONClientProducer) ([]string, error) {
- if r.client == nil {
- var err error
- r.client, err = producer.newClient()
- if err != nil {
- return nil, err
- }
- }
-
- if _, err := r.client.Authorize(r.Password); err != nil {
- // Potentially a network problem where the client will need to be
- // re-initialized
- r.client = nil
- return nil, err
- }
-
- packet, err := r.client.Execute(ScoreboardPlayerList)
- if err != nil {
- // Potentially a network problem where the client will need to be
- // re-initialized
- r.client = nil
- return nil, err
- }
-
- if !strings.Contains(packet.Body, NoMatches) {
- players := strings.Split(packet.Body, "Showing")
- if len(players) > 1 {
- return players[1:], nil
- }
- }
-
- return []string{}, nil
-}
diff --git a/plugins/inputs/minecraft/rcon_disconnect_error_test.go b/plugins/inputs/minecraft/rcon_disconnect_error_test.go
deleted file mode 100644
index c89308e06fbee..0000000000000
--- a/plugins/inputs/minecraft/rcon_disconnect_error_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package minecraft
-
-import (
- "errors"
- "testing"
-)
-
-type MockRCONProducer struct {
- Err error
-}
-
-func (m *MockRCONProducer) newClient() (RCONClient, error) {
- return nil, m.Err
-}
-
-func TestRCONErrorHandling(t *testing.T) {
- m := &MockRCONProducer{
- Err: errors.New("Error: failed connection"),
- }
- c := &RCON{
- Server: "craftstuff.com",
- Port: "2222",
- Password: "pass",
- //Force fetching of new client
- client: nil,
- }
-
- _, err := c.Gather(m)
- if err == nil {
- t.Errorf("Error nil, unexpected result")
- }
-
- if c.client != nil {
- t.Fatal("c.client should be nil, unexpected result")
- }
-}
diff --git a/plugins/inputs/minecraft/rcon_test.go b/plugins/inputs/minecraft/rcon_test.go
deleted file mode 100644
index 1660b53f940b8..0000000000000
--- a/plugins/inputs/minecraft/rcon_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package minecraft
-
-import (
- "testing"
-
- "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon"
-)
-
-type MockRCONClient struct {
- Result *rcon.Packet
- Err error
-}
-
-func (m *MockRCONClient) Authorize(password string) (*rcon.Packet, error) {
- return m.Result, m.Err
-}
-func (m *MockRCONClient) Execute(command string) (*rcon.Packet, error) {
- return m.Result, m.Err
-}
-
-// TestRCONGather test the RCON gather function
-func TestRCONGather(t *testing.T) {
- mock := &MockRCONClient{
- Result: &rcon.Packet{
- Body: `Showing 1 tracked objective(s) for divislight:- jumps: 178 (jumps)Showing 7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)Showing 5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`,
- },
- Err: nil,
- }
-
- want := []string{
- ` 1 tracked objective(s) for divislight:- jumps: 178 (jumps)`,
- ` 7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)`,
- ` 5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`,
- }
-
- client := &RCON{
- Server: "craftstuff.com",
- Port: "2222",
- Password: "pass",
- client: mock,
- }
-
- d := defaultClientProducer{}
- got, err := client.Gather(d)
- if err != nil {
- t.Fatalf("Gather returned an error. Error %s\n", err)
- }
- for i, s := range got {
- if want[i] != s {
- t.Fatalf("Got %s at index %d, want %s at index %d", s, i, want[i], i)
- }
- }
-
- client.client = &MockRCONClient{
- Result: &rcon.Packet{
- Body: "",
- },
- Err: nil,
- }
-
- got, err = client.Gather(defaultClientProducer{})
- if err != nil {
- t.Fatalf("Gather returned an error. Error %s\n", err)
- }
- if len(got) != 0 {
- t.Fatalf("Expected empty slice of length %d, got slice of length %d", 0, len(got))
- }
-}
diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md
new file mode 100644
index 0000000000000..3c568b5e6e5e7
--- /dev/null
+++ b/plugins/inputs/modbus/README.md
@@ -0,0 +1,135 @@
+# Modbus Input Plugin
+
+The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding
+Registers via Modbus TCP or Modbus RTU/ASCII.
+
+### Configuration
+
+```toml
+[[inputs.modbus]]
+ ## Connection Configuration
+ ##
+ ## The plugin supports connections to PLCs via MODBUS/TCP or
+ ## via serial line communication in binary (RTU) or readable (ASCII) encoding
+ ##
+ ## Device name
+ name = "Device"
+
+ ## Slave ID - addresses a MODBUS device on the bus
+ ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved]
+ slave_id = 1
+
+ ## Timeout for each request
+ timeout = "1s"
+
+ ## Maximum number of retries and the time to wait between retries
+ ## when a slave-device is busy.
+ # busy_retries = 0
+ # busy_retries_wait = "100ms"
+
+ # TCP - connect via Modbus/TCP
+ controller = "tcp://localhost:502"
+
+ ## Serial (RS485; RS232)
+ # controller = "file:///dev/ttyUSB0"
+ # baud_rate = 9600
+ # data_bits = 8
+ # parity = "N"
+ # stop_bits = 1
+ # transmission_mode = "RTU"
+
+
+ ## Measurements
+ ##
+
+ ## Digital Variables, Discrete Inputs and Coils
+ ## measurement - the (optional) measurement name, defaults to "modbus"
+ ## name - the variable name
+ ## address - variable address
+
+ discrete_inputs = [
+ { name = "start", address = [0]},
+ { name = "stop", address = [1]},
+ { name = "reset", address = [2]},
+ { name = "emergency_stop", address = [3]},
+ ]
+ coils = [
+ { name = "motor1_run", address = [0]},
+ { name = "motor1_jog", address = [1]},
+ { name = "motor1_stop", address = [2]},
+ ]
+
+ ## Analog Variables, Input Registers and Holding Registers
+ ## measurement - the (optional) measurement name, defaults to "modbus"
+ ## name - the variable name
+ ## byte_order - the ordering of bytes
+ ## |---AB, ABCD - Big Endian
+ ## |---BA, DCBA - Little Endian
+ ## |---BADC - Mid-Big Endian
+ ## |---CDAB - Mid-Little Endian
+ ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation)
+ ## FLOAT32 (deprecated), FIXED, UFIXED (fixed-point representation on input)
+ ## scale - the final numeric variable representation
+ ## address - variable address
+
+ holding_registers = [
+ { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]},
+ { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]},
+ { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]},
+ { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]},
+ { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]},
+ { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]},
+ ]
+ input_registers = [
+ { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]},
+ { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]},
+ { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]},
+ ]
+```
+
+### Metrics
+
+Metric are custom and configured using the `discrete_inputs`, `coils`,
+`holding_register` and `input_registers` options.
+
+### Usage of `data_type`
+
+The field `data_type` defines the representation of the data value on input from the modbus registers.
+The input values are then converted from the given `data_type` to a type that is apropriate when
+sending the value to the output plugin. These output types are usually one of string,
+integer or floating-point-number. The size of the output type is assumed to be large enough
+for all supported input types. The mapping from the input type to the output type is fixed
+and cannot be configured.
+
+#### Integers: `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64`, `UINT64`
+
+These types are used for integer input values. Select the one that matches your modbus data source.
+
+#### Floating Point: `FLOAT32-IEEE`
+
+Use this type if your modbus registers contain a value that is encoded in this format. This type
+always includes the sign and therefore there exists no variant.
+
+#### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`)
+
+These types are handled as an integer type on input, but are converted to floating point representation
+for further processing (e.g. scaling). Use one of these types when the input value is a decimal fixed point
+representation of a non-integer value.
+
+Select the type `UFIXED` when the input type is declared to hold unsigned integer values, which cannot
+be negative. The documentation of your modbus device should indicate this by a term like
+'uint16 containing fixed-point representation with N decimal places'.
+
+Select the type `FIXED` when the input type is declared to hold signed integer values. Your documentation
+of the modbus device should indicate this with a term like 'int32 containing fixed-point representation
+with N decimal places'.
+
+(FLOAT32 is deprecated and should not be used any more. UFIXED provides the same conversion
+from unsigned values).
+
+### Example Output
+
+```sh
+$ ./telegraf -config telegraf.conf -input-filter modbus -test
+modbus.InputRegisters,host=orangepizero Current=0,Energy=0,Frecuency=60,Power=0,PowerFactor=0,Voltage=123.9000015258789 1554079521000000000
+```
diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go
new file mode 100644
index 0000000000000..ec68890c5eb91
--- /dev/null
+++ b/plugins/inputs/modbus/modbus.go
@@ -0,0 +1,732 @@
+package modbus
+
+import (
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+ "net"
+ "net/url"
+ "sort"
+ "time"
+
+ mb "github.com/goburrow/modbus"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// Modbus holds all data relevant to the plugin
+type Modbus struct {
+ Name string `toml:"name"`
+ Controller string `toml:"controller"`
+ TransmissionMode string `toml:"transmission_mode"`
+ BaudRate int `toml:"baud_rate"`
+ DataBits int `toml:"data_bits"`
+ Parity string `toml:"parity"`
+ StopBits int `toml:"stop_bits"`
+ SlaveID int `toml:"slave_id"`
+ Timeout internal.Duration `toml:"timeout"`
+ Retries int `toml:"busy_retries"`
+ RetriesWaitTime internal.Duration `toml:"busy_retries_wait"`
+ DiscreteInputs []fieldContainer `toml:"discrete_inputs"`
+ Coils []fieldContainer `toml:"coils"`
+ HoldingRegisters []fieldContainer `toml:"holding_registers"`
+ InputRegisters []fieldContainer `toml:"input_registers"`
+ registers []register
+ isConnected bool
+ tcpHandler *mb.TCPClientHandler
+ rtuHandler *mb.RTUClientHandler
+ asciiHandler *mb.ASCIIClientHandler
+ client mb.Client
+}
+
+type register struct {
+ Type string
+ RegistersRange []registerRange
+ Fields []fieldContainer
+}
+
+type fieldContainer struct {
+ Measurement string `toml:"measurement"`
+ Name string `toml:"name"`
+ ByteOrder string `toml:"byte_order"`
+ DataType string `toml:"data_type"`
+ Scale float64 `toml:"scale"`
+ Address []uint16 `toml:"address"`
+ value interface{}
+}
+
+type registerRange struct {
+ address uint16
+ length uint16
+}
+
+const (
+ cDiscreteInputs = "discrete_input"
+ cCoils = "coil"
+ cHoldingRegisters = "holding_register"
+ cInputRegisters = "input_register"
+)
+
+const description = `Retrieve data from MODBUS slave devices`
+const sampleConfig = `
+ ## Connection Configuration
+ ##
+ ## The plugin supports connections to PLCs via MODBUS/TCP or
+ ## via serial line communication in binary (RTU) or readable (ASCII) encoding
+ ##
+ ## Device name
+ name = "Device"
+
+ ## Slave ID - addresses a MODBUS device on the bus
+ ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved]
+ slave_id = 1
+
+ ## Timeout for each request
+ timeout = "1s"
+
+ ## Maximum number of retries and the time to wait between retries
+ ## when a slave-device is busy.
+ # busy_retries = 0
+ # busy_retries_wait = "100ms"
+
+ # TCP - connect via Modbus/TCP
+ controller = "tcp://localhost:502"
+
+ ## Serial (RS485; RS232)
+ # controller = "file:///dev/ttyUSB0"
+ # baud_rate = 9600
+ # data_bits = 8
+ # parity = "N"
+ # stop_bits = 1
+ # transmission_mode = "RTU"
+
+
+ ## Measurements
+ ##
+
+ ## Digital Variables, Discrete Inputs and Coils
+ ## measurement - the (optional) measurement name, defaults to "modbus"
+ ## name - the variable name
+ ## address - variable address
+
+ discrete_inputs = [
+ { name = "start", address = [0]},
+ { name = "stop", address = [1]},
+ { name = "reset", address = [2]},
+ { name = "emergency_stop", address = [3]},
+ ]
+ coils = [
+ { name = "motor1_run", address = [0]},
+ { name = "motor1_jog", address = [1]},
+ { name = "motor1_stop", address = [2]},
+ ]
+
+ ## Analog Variables, Input Registers and Holding Registers
+ ## measurement - the (optional) measurement name, defaults to "modbus"
+ ## name - the variable name
+ ## byte_order - the ordering of bytes
+ ## |---AB, ABCD - Big Endian
+ ## |---BA, DCBA - Little Endian
+ ## |---BADC - Mid-Big Endian
+ ## |---CDAB - Mid-Little Endian
+ ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation)
+ ## FLOAT32, FIXED, UFIXED (fixed-point representation on input)
+ ## scale - the final numeric variable representation
+ ## address - variable address
+
+ holding_registers = [
+ { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]},
+ { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]},
+ { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]},
+ { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]},
+ { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]},
+ { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]},
+ ]
+ input_registers = [
+ { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]},
+ { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]},
+ { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]},
+ ]
+`
+
+// SampleConfig returns a basic configuration for the plugin
+func (m *Modbus) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description returns a short description of what the plugin does
+func (m *Modbus) Description() string {
+ return description
+}
+
+func (m *Modbus) Init() error {
+ //check device name
+ if m.Name == "" {
+ return fmt.Errorf("device name is empty")
+ }
+
+ if m.Retries < 0 {
+ return fmt.Errorf("retries cannot be negative")
+ }
+
+ err := m.InitRegister(m.DiscreteInputs, cDiscreteInputs)
+ if err != nil {
+ return err
+ }
+
+ err = m.InitRegister(m.Coils, cCoils)
+ if err != nil {
+ return err
+ }
+
+ err = m.InitRegister(m.HoldingRegisters, cHoldingRegisters)
+ if err != nil {
+ return err
+ }
+
+ err = m.InitRegister(m.InputRegisters, cInputRegisters)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Modbus) InitRegister(fields []fieldContainer, name string) error {
+ if len(fields) == 0 {
+ return nil
+ }
+
+ err := validateFieldContainers(fields, name)
+ if err != nil {
+ return err
+ }
+
+ addrs := []uint16{}
+ for _, field := range fields {
+ for _, a := range field.Address {
+ addrs = append(addrs, a)
+ }
+ }
+
+ addrs = removeDuplicates(addrs)
+ sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] })
+
+ ii := 0
+ var registersRange []registerRange
+
+ // Get range of consecutive integers
+ // [1, 2, 3, 5, 6, 10, 11, 12, 14]
+ // (1, 3) , (5, 2) , (10, 3), (14 , 1)
+ for range addrs {
+ if ii < len(addrs) {
+ start := addrs[ii]
+ end := start
+
+ for ii < len(addrs)-1 && addrs[ii+1]-addrs[ii] == 1 {
+ end = addrs[ii+1]
+ ii++
+ }
+ ii++
+ registersRange = append(registersRange, registerRange{start, end - start + 1})
+ }
+ }
+
+ m.registers = append(m.registers, register{name, registersRange, fields})
+
+ return nil
+}
+
+// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII]
+func connect(m *Modbus) error {
+ u, err := url.Parse(m.Controller)
+ if err != nil {
+ return err
+ }
+
+ switch u.Scheme {
+ case "tcp":
+ var host, port string
+ host, port, err = net.SplitHostPort(u.Host)
+ if err != nil {
+ return err
+ }
+ m.tcpHandler = mb.NewTCPClientHandler(host + ":" + port)
+ m.tcpHandler.Timeout = m.Timeout.Duration
+ m.tcpHandler.SlaveId = byte(m.SlaveID)
+ m.client = mb.NewClient(m.tcpHandler)
+ err := m.tcpHandler.Connect()
+ if err != nil {
+ return err
+ }
+ m.isConnected = true
+ return nil
+ case "file":
+ if m.TransmissionMode == "RTU" {
+ m.rtuHandler = mb.NewRTUClientHandler(u.Path)
+ m.rtuHandler.Timeout = m.Timeout.Duration
+ m.rtuHandler.SlaveId = byte(m.SlaveID)
+ m.rtuHandler.BaudRate = m.BaudRate
+ m.rtuHandler.DataBits = m.DataBits
+ m.rtuHandler.Parity = m.Parity
+ m.rtuHandler.StopBits = m.StopBits
+ m.client = mb.NewClient(m.rtuHandler)
+ err := m.rtuHandler.Connect()
+ if err != nil {
+ return err
+ }
+ m.isConnected = true
+ return nil
+ } else if m.TransmissionMode == "ASCII" {
+ m.asciiHandler = mb.NewASCIIClientHandler(u.Path)
+ m.asciiHandler.Timeout = m.Timeout.Duration
+ m.asciiHandler.SlaveId = byte(m.SlaveID)
+ m.asciiHandler.BaudRate = m.BaudRate
+ m.asciiHandler.DataBits = m.DataBits
+ m.asciiHandler.Parity = m.Parity
+ m.asciiHandler.StopBits = m.StopBits
+ m.client = mb.NewClient(m.asciiHandler)
+ err := m.asciiHandler.Connect()
+ if err != nil {
+ return err
+ }
+ m.isConnected = true
+ return nil
+ } else {
+ return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode)
+ }
+ default:
+ return fmt.Errorf("invalid controller")
+ }
+}
+
+func disconnect(m *Modbus) error {
+ u, err := url.Parse(m.Controller)
+ if err != nil {
+ return err
+ }
+
+ switch u.Scheme {
+ case "tcp":
+ m.tcpHandler.Close()
+ return nil
+ case "file":
+ if m.TransmissionMode == "RTU" {
+ m.rtuHandler.Close()
+ return nil
+ } else if m.TransmissionMode == "ASCII" {
+ m.asciiHandler.Close()
+ return nil
+ } else {
+ return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode)
+ }
+ default:
+ return fmt.Errorf("invalid controller")
+ }
+}
+
+func validateFieldContainers(t []fieldContainer, n string) error {
+ nameEncountered := map[string]bool{}
+ for _, item := range t {
+ //check empty name
+ if item.Name == "" {
+ return fmt.Errorf("empty name in '%s'", n)
+ }
+
+ //search name duplicate
+ canonical_name := item.Measurement + "." + item.Name
+ if nameEncountered[canonical_name] {
+ return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name)
+ } else {
+ nameEncountered[canonical_name] = true
+ }
+
+ if n == cInputRegisters || n == cHoldingRegisters {
+ // search byte order
+ switch item.ByteOrder {
+ case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB":
+ break
+ default:
+ return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, n, item.Name)
+ }
+
+ // search data type
+ switch item.DataType {
+ case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT32", "FIXED", "UFIXED":
+ break
+ default:
+ return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, n, item.Name)
+ }
+
+ // check scale
+ if item.Scale == 0.0 {
+ return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, n, item.Name)
+ }
+ }
+
+ // check address
+ if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 {
+ return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name)
+ }
+
+ if n == cInputRegisters || n == cHoldingRegisters {
+ if 2*len(item.Address) != len(item.ByteOrder) {
+ return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, n, item.Name)
+ }
+
+ // search duplicated
+ if len(item.Address) > len(removeDuplicates(item.Address)) {
+ return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, n, item.Name)
+ }
+ } else if len(item.Address) != 1 {
+ return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name)
+ }
+ }
+ return nil
+}
+
+func removeDuplicates(elements []uint16) []uint16 {
+ encountered := map[uint16]bool{}
+ result := []uint16{}
+
+ for v := range elements {
+ if encountered[elements[v]] {
+ } else {
+ encountered[elements[v]] = true
+ result = append(result, elements[v])
+ }
+ }
+
+ return result
+}
+
+func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) {
+ if rt == cDiscreteInputs {
+ return m.client.ReadDiscreteInputs(uint16(rr.address), uint16(rr.length))
+ } else if rt == cCoils {
+ return m.client.ReadCoils(uint16(rr.address), uint16(rr.length))
+ } else if rt == cInputRegisters {
+ return m.client.ReadInputRegisters(uint16(rr.address), uint16(rr.length))
+ } else if rt == cHoldingRegisters {
+ return m.client.ReadHoldingRegisters(uint16(rr.address), uint16(rr.length))
+ } else {
+ return []byte{}, fmt.Errorf("not Valid function")
+ }
+}
+
+func (m *Modbus) getFields() error {
+ for _, register := range m.registers {
+ rawValues := make(map[uint16][]byte)
+ bitRawValues := make(map[uint16]uint16)
+ for _, rr := range register.RegistersRange {
+ address := rr.address
+ readValues, err := readRegisterValues(m, register.Type, rr)
+ if err != nil {
+ return err
+ }
+
+ // Raw Values
+ if register.Type == cDiscreteInputs || register.Type == cCoils {
+ for _, readValue := range readValues {
+ for bitPosition := 0; bitPosition < 8; bitPosition++ {
+ bitRawValues[address] = getBitValue(readValue, bitPosition)
+ address = address + 1
+ if address+1 > rr.length {
+ break
+ }
+ }
+ }
+ }
+
+ // Raw Values
+ if register.Type == cInputRegisters || register.Type == cHoldingRegisters {
+ batchSize := 2
+ for batchSize < len(readValues) {
+ rawValues[address] = readValues[0:batchSize:batchSize]
+ address = address + 1
+ readValues = readValues[batchSize:]
+ }
+
+ rawValues[address] = readValues[0:batchSize:batchSize]
+ }
+ }
+
+ if register.Type == cDiscreteInputs || register.Type == cCoils {
+ for i := 0; i < len(register.Fields); i++ {
+ register.Fields[i].value = bitRawValues[register.Fields[i].Address[0]]
+ }
+ }
+
+ if register.Type == cInputRegisters || register.Type == cHoldingRegisters {
+ for i := 0; i < len(register.Fields); i++ {
+ var values_t []byte
+
+ for j := 0; j < len(register.Fields[i].Address); j++ {
+ tempArray := rawValues[register.Fields[i].Address[j]]
+ for x := 0; x < len(tempArray); x++ {
+ values_t = append(values_t, tempArray[x])
+ }
+ }
+
+ register.Fields[i].value = convertDataType(register.Fields[i], values_t)
+ }
+
+ }
+ }
+
+ return nil
+}
+
+func getBitValue(n byte, pos int) uint16 {
+ return uint16(n >> uint(pos) & 0x01)
+}
+
+func convertDataType(t fieldContainer, bytes []byte) interface{} {
+ switch t.DataType {
+ case "UINT16":
+ e16 := convertEndianness16(t.ByteOrder, bytes)
+ return scaleUint16(t.Scale, e16)
+ case "INT16":
+ e16 := convertEndianness16(t.ByteOrder, bytes)
+ f16 := int16(e16)
+ return scaleInt16(t.Scale, f16)
+ case "UINT32":
+ e32 := convertEndianness32(t.ByteOrder, bytes)
+ return scaleUint32(t.Scale, e32)
+ case "INT32":
+ e32 := convertEndianness32(t.ByteOrder, bytes)
+ f32 := int32(e32)
+ return scaleInt32(t.Scale, f32)
+ case "UINT64":
+ e64 := convertEndianness64(t.ByteOrder, bytes)
+ f64 := format64(t.DataType, e64).(uint64)
+ return scaleUint64(t.Scale, f64)
+ case "INT64":
+ e64 := convertEndianness64(t.ByteOrder, bytes)
+ f64 := format64(t.DataType, e64).(int64)
+ return scaleInt64(t.Scale, f64)
+ case "FLOAT32-IEEE":
+ e32 := convertEndianness32(t.ByteOrder, bytes)
+ f32 := math.Float32frombits(e32)
+ return scaleFloat32(t.Scale, f32)
+ case "FIXED":
+ if len(bytes) == 2 {
+ e16 := convertEndianness16(t.ByteOrder, bytes)
+ f16 := int16(e16)
+ return scale16toFloat(t.Scale, f16)
+ } else if len(bytes) == 4 {
+ e32 := convertEndianness32(t.ByteOrder, bytes)
+ f32 := int32(e32)
+ return scale32toFloat(t.Scale, f32)
+ } else {
+ e64 := convertEndianness64(t.ByteOrder, bytes)
+ f64 := int64(e64)
+ return scale64toFloat(t.Scale, f64)
+ }
+ case "FLOAT32", "UFIXED":
+ if len(bytes) == 2 {
+ e16 := convertEndianness16(t.ByteOrder, bytes)
+ return scale16UtoFloat(t.Scale, e16)
+ } else if len(bytes) == 4 {
+ e32 := convertEndianness32(t.ByteOrder, bytes)
+ return scale32UtoFloat(t.Scale, e32)
+ } else {
+ e64 := convertEndianness64(t.ByteOrder, bytes)
+ return scale64UtoFloat(t.Scale, e64)
+ }
+ default:
+ return 0
+ }
+}
+
+func convertEndianness16(o string, b []byte) uint16 {
+ switch o {
+ case "AB":
+ return binary.BigEndian.Uint16(b)
+ case "BA":
+ return binary.LittleEndian.Uint16(b)
+ default:
+ return 0
+ }
+}
+
+func convertEndianness32(o string, b []byte) uint32 {
+ switch o {
+ case "ABCD":
+ return binary.BigEndian.Uint32(b)
+ case "DCBA":
+ return binary.LittleEndian.Uint32(b)
+ case "BADC":
+ return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:]))
+ case "CDAB":
+ return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:]))
+ default:
+ return 0
+ }
+}
+
+func convertEndianness64(o string, b []byte) uint64 {
+ switch o {
+ case "ABCDEFGH":
+ return binary.BigEndian.Uint64(b)
+ case "HGFEDCBA":
+ return binary.LittleEndian.Uint64(b)
+ case "BADCFEHG":
+ return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:]))
+ case "GHEFCDAB":
+ return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:]))
+ default:
+ return 0
+ }
+}
+
+func format16(f string, r uint16) interface{} {
+ switch f {
+ case "UINT16":
+ return r
+ case "INT16":
+ return int16(r)
+ default:
+ return r
+ }
+}
+
+func format32(f string, r uint32) interface{} {
+ switch f {
+ case "UINT32":
+ return r
+ case "INT32":
+ return int32(r)
+ case "FLOAT32-IEEE":
+ return math.Float32frombits(r)
+ default:
+ return r
+ }
+}
+
+func format64(f string, r uint64) interface{} {
+ switch f {
+ case "UINT64":
+ return r
+ case "INT64":
+ return int64(r)
+ default:
+ return r
+ }
+}
+
+func scale16toFloat(s float64, v int16) float64 {
+ return float64(v) * s
+}
+
+func scale32toFloat(s float64, v int32) float64 {
+ return float64(float64(v) * float64(s))
+}
+
+func scale64toFloat(s float64, v int64) float64 {
+ return float64(float64(v) * float64(s))
+}
+
+func scale16UtoFloat(s float64, v uint16) float64 {
+ return float64(v) * s
+}
+
+func scale32UtoFloat(s float64, v uint32) float64 {
+ return float64(float64(v) * float64(s))
+}
+
+func scale64UtoFloat(s float64, v uint64) float64 {
+ return float64(float64(v) * float64(s))
+}
+
+func scaleInt16(s float64, v int16) int16 {
+ return int16(float64(v) * s)
+}
+
+func scaleUint16(s float64, v uint16) uint16 {
+ return uint16(float64(v) * s)
+}
+
+func scaleUint32(s float64, v uint32) uint32 {
+ return uint32(float64(v) * float64(s))
+}
+
+func scaleInt32(s float64, v int32) int32 {
+ return int32(float64(v) * float64(s))
+}
+
+func scaleFloat32(s float64, v float32) float32 {
+ return float32(float64(v) * s)
+}
+
+func scaleUint64(s float64, v uint64) uint64 {
+ return uint64(float64(v) * float64(s))
+}
+
+func scaleInt64(s float64, v int64) int64 {
+ return int64(float64(v) * float64(s))
+}
+
+// Gather implements the telegraf plugin interface method for data accumulation
+func (m *Modbus) Gather(acc telegraf.Accumulator) error {
+ if !m.isConnected {
+ err := connect(m)
+ if err != nil {
+ m.isConnected = false
+ return err
+ }
+ }
+
+ timestamp := time.Now()
+ for retry := 0; retry <= m.Retries; retry += 1 {
+ timestamp = time.Now()
+ err := m.getFields()
+ if err != nil {
+ mberr, ok := err.(*mb.ModbusError)
+ if ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries {
+ log.Printf("I! [inputs.modbus] device busy! Retrying %d more time(s)...", m.Retries-retry)
+ time.Sleep(m.RetriesWaitTime.Duration)
+ continue
+ }
+ disconnect(m)
+ m.isConnected = false
+ return err
+ }
+ // Reading was successful, leave the retry loop
+ break
+ }
+
+ grouper := metric.NewSeriesGrouper()
+ for _, reg := range m.registers {
+ tags := map[string]string{
+ "name": m.Name,
+ "type": reg.Type,
+ }
+
+ for _, field := range reg.Fields {
+ // In case no measurement was specified we use "modbus" as default
+ measurement := "modbus"
+ if field.Measurement != "" {
+ measurement = field.Measurement
+ }
+
+ // Group the data by series
+ grouper.Add(measurement, tags, timestamp, field.Name, field.value)
+ }
+
+ // Add the metrics grouped by series to the accumulator
+ for _, metric := range grouper.Metrics() {
+ acc.AddMetric(metric)
+ }
+ }
+
+ return nil
+}
+
+// Add this plugin to telegraf
+func init() {
+ inputs.Add("modbus", func() telegraf.Input { return &Modbus{} })
+}
diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go
new file mode 100644
index 0000000000000..8c5241dc2aaee
--- /dev/null
+++ b/plugins/inputs/modbus/modbus_test.go
@@ -0,0 +1,724 @@
+package modbus
+
+import (
+ "testing"
+
+ m "github.com/goburrow/modbus"
+ "github.com/stretchr/testify/assert"
+ "github.com/tbrandon/mbserver"
+
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func TestCoils(t *testing.T) {
+ var coilTests = []struct {
+ name string
+ address uint16
+ quantity uint16
+ write []byte
+ read uint16
+ }{
+ {
+ name: "coil0_turn_off",
+ address: 0,
+ quantity: 1,
+ write: []byte{0x00},
+ read: 0,
+ },
+ {
+ name: "coil0_turn_on",
+ address: 0,
+ quantity: 1,
+ write: []byte{0x01},
+ read: 1,
+ },
+ {
+ name: "coil1_turn_on",
+ address: 1,
+ quantity: 1,
+ write: []byte{0x01},
+ read: 1,
+ },
+ {
+ name: "coil2_turn_on",
+ address: 2,
+ quantity: 1,
+ write: []byte{0x01},
+ read: 1,
+ },
+ {
+ name: "coil3_turn_on",
+ address: 3,
+ quantity: 1,
+ write: []byte{0x01},
+ read: 1,
+ },
+ {
+ name: "coil1_turn_off",
+ address: 1,
+ quantity: 1,
+ write: []byte{0x00},
+ read: 0,
+ },
+ {
+ name: "coil2_turn_off",
+ address: 2,
+ quantity: 1,
+ write: []byte{0x00},
+ read: 0,
+ },
+ {
+ name: "coil3_turn_off",
+ address: 3,
+ quantity: 1,
+ write: []byte{0x00},
+ read: 0,
+ },
+ }
+
+ serv := mbserver.NewServer()
+ err := serv.ListenTCP("localhost:1502")
+ defer serv.Close()
+ assert.NoError(t, err)
+
+ handler := m.NewTCPClientHandler("localhost:1502")
+ err = handler.Connect()
+ assert.NoError(t, err)
+ defer handler.Close()
+ client := m.NewClient(handler)
+
+ for _, ct := range coilTests {
+ t.Run(ct.name, func(t *testing.T) {
+ _, err = client.WriteMultipleCoils(ct.address, ct.quantity, ct.write)
+ assert.NoError(t, err)
+
+ modbus := Modbus{
+ Name: "TestCoils",
+ Controller: "tcp://localhost:1502",
+ SlaveID: 1,
+ Coils: []fieldContainer{
+ {
+ Name: ct.name,
+ Address: []uint16{ct.address},
+ },
+ },
+ }
+
+ err = modbus.Init()
+ assert.NoError(t, err)
+ var acc testutil.Accumulator
+ err = modbus.Gather(&acc)
+ assert.NoError(t, err)
+ assert.NotEmpty(t, modbus.registers)
+
+ for _, coil := range modbus.registers {
+ assert.Equal(t, ct.read, coil.Fields[0].value)
+ }
+ })
+ }
+}
+
+func TestHoldingRegisters(t *testing.T) {
+ var holdingRegisterTests = []struct {
+ name string
+ address []uint16
+ quantity uint16
+ byteOrder string
+ dataType string
+ scale float64
+ write []byte
+ read interface{}
+ }{
+ {
+ name: "register0_ab_float32",
+ address: []uint16{0},
+ quantity: 1,
+ byteOrder: "AB",
+ dataType: "FLOAT32",
+ scale: 0.1,
+ write: []byte{0x08, 0x98},
+ read: float64(220),
+ },
+ {
+ name: "register0_register1_ab_float32",
+ address: []uint16{0, 1},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "FLOAT32",
+ scale: 0.001,
+ write: []byte{0x00, 0x00, 0x03, 0xE8},
+ read: float64(1),
+ },
+ {
+ name: "register1_register2_abcd_float32",
+ address: []uint16{1, 2},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "FLOAT32",
+ scale: 0.1,
+ write: []byte{0x00, 0x00, 0x08, 0x98},
+ read: float64(220),
+ },
+ {
+ name: "register3_register4_abcd_float32",
+ address: []uint16{3, 4},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "FLOAT32",
+ scale: 0.1,
+ write: []byte{0x00, 0x00, 0x08, 0x98},
+ read: float64(220),
+ },
+ {
+ name: "register7_ab_float32",
+ address: []uint16{7},
+ quantity: 1,
+ byteOrder: "AB",
+ dataType: "FLOAT32",
+ scale: 0.1,
+ write: []byte{0x01, 0xF4},
+ read: float64(50),
+ },
+ {
+ name: "register0_ab_float32_msb",
+ address: []uint16{0},
+ quantity: 1,
+ byteOrder: "AB",
+ dataType: "FLOAT32",
+ scale: 0.1,
+ write: []byte{0x89, 0x65},
+ read: float64(3517.3),
+ },
+ {
+ name: "register0_register1_ab_float32_msb",
+ address: []uint16{0, 1},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "FLOAT32",
+ scale: 0.001,
+ write: []byte{0xFF, 0xFF, 0xFF, 0xFF},
+ read: float64(4294967.295),
+ },
+ {
+ name: "register5_to_register8_abcdefgh_float32",
+ address: []uint16{5, 6, 7, 8},
+ quantity: 4,
+ byteOrder: "ABCDEFGH",
+ dataType: "FLOAT32",
+ scale: 0.000001,
+ write: []byte{0x00, 0x00, 0x00, 0x62, 0xC6, 0xD1, 0xA9, 0xB2},
+ read: float64(424242.424242),
+ },
+ {
+ name: "register6_to_register9_hgfedcba_float32_msb",
+ address: []uint16{6, 7, 8, 9},
+ quantity: 4,
+ byteOrder: "HGFEDCBA",
+ dataType: "FLOAT32",
+ scale: 0.0000000001,
+ write: []byte{0xEA, 0x1E, 0x39, 0xEE, 0x8E, 0xA9, 0x54, 0xAB},
+ read: float64(1234567890.9876544),
+ },
+ {
+ name: "register0_ab_float",
+ address: []uint16{0},
+ quantity: 1,
+ byteOrder: "AB",
+ dataType: "FIXED",
+ scale: 0.1,
+ write: []byte{0xFF, 0xD6},
+ read: float64(-4.2),
+ },
+ {
+ name: "register1_ba_ufloat",
+ address: []uint16{1},
+ quantity: 1,
+ byteOrder: "BA",
+ dataType: "UFIXED",
+ scale: 0.1,
+ write: []byte{0xD8, 0xFF},
+ read: float64(6549.6),
+ },
+ {
+ name: "register4_register5_abcd_float",
+ address: []uint16{4, 5},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "FIXED",
+ scale: 0.1,
+ write: []byte{0xFF, 0xFF, 0xFF, 0xD6},
+ read: float64(-4.2),
+ },
+ {
+ name: "register5_register6_dcba_ufloat",
+ address: []uint16{5, 6},
+ quantity: 2,
+ byteOrder: "DCBA",
+ dataType: "UFIXED",
+ scale: 0.001,
+ write: []byte{0xD8, 0xFF, 0xFF, 0xFF},
+ read: float64(4294967.256),
+ },
+ {
+ name: "register5_to_register8_abcdefgh_float",
+ address: []uint16{5, 6, 7, 8},
+ quantity: 4,
+ byteOrder: "ABCDEFGH",
+ dataType: "FIXED",
+ scale: 0.000001,
+ write: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xD6},
+ read: float64(-0.000042),
+ },
+ {
+ name: "register6_to_register9_hgfedcba_ufloat",
+ address: []uint16{6, 7, 8, 9},
+ quantity: 4,
+ byteOrder: "HGFEDCBA",
+ dataType: "UFIXED",
+ scale: 0.000000001,
+ write: []byte{0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF},
+ read: float64(18441921395.520346504),
+ },
+ {
+ name: "register10_ab_uint16",
+ address: []uint16{10},
+ quantity: 1,
+ byteOrder: "AB",
+ dataType: "UINT16",
+ scale: 1,
+ write: []byte{0xAB, 0xCD},
+ read: uint16(43981),
+ },
+ {
+ name: "register10_ab_uint16-scale_.1",
+ address: []uint16{10},
+ quantity: 1,
+ byteOrder: "AB",
+ dataType: "UINT16",
+ scale: .1,
+ write: []byte{0xAB, 0xCD},
+ read: uint16(4398),
+ },
+ {
+ name: "register10_ab_uint16_scale_10",
+ address: []uint16{10},
+ quantity: 1,
+ byteOrder: "AB",
+ dataType: "UINT16",
+ scale: 10,
+ write: []byte{0x00, 0x2A},
+ read: uint16(420),
+ },
+ {
+ name: "register20_ba_uint16",
+ address: []uint16{20},
+ quantity: 1,
+ byteOrder: "BA",
+ dataType: "UINT16",
+ scale: 1,
+ write: []byte{0xAB, 0xCD},
+ read: uint16(52651),
+ },
+ {
+ name: "register30_ab_int16",
+ address: []uint16{20},
+ quantity: 1,
+ byteOrder: "AB",
+ dataType: "INT16",
+ scale: 1,
+ write: []byte{0xAB, 0xCD},
+ read: int16(-21555),
+ },
+ {
+ name: "register40_ba_int16",
+ address: []uint16{40},
+ quantity: 1,
+ byteOrder: "BA",
+ dataType: "INT16",
+ scale: 1,
+ write: []byte{0xAB, 0xCD},
+ read: int16(-12885),
+ },
+ {
+ name: "register50_register51_abcd_int32_scaled",
+ address: []uint16{50, 51},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "INT32",
+ scale: 10,
+ write: []byte{0x00, 0x00, 0xAB, 0xCD},
+ read: int32(439810),
+ },
+ {
+ name: "register50_register51_abcd_int32",
+ address: []uint16{50, 51},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "INT32",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: int32(-1430532899),
+ },
+ {
+ name: "register60_register61_dcba_int32",
+ address: []uint16{60, 61},
+ quantity: 2,
+ byteOrder: "DCBA",
+ dataType: "INT32",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: int32(-573785174),
+ },
+ {
+ name: "register70_register71_badc_int32",
+ address: []uint16{70, 71},
+ quantity: 2,
+ byteOrder: "BADC",
+ dataType: "INT32",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: int32(-1146430004),
+ },
+ {
+ name: "register80_register81_cdab_int32",
+ address: []uint16{80, 81},
+ quantity: 2,
+ byteOrder: "CDAB",
+ dataType: "INT32",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: int32(-857888069),
+ },
+ {
+ name: "register90_register91_abcd_uint32",
+ address: []uint16{90, 91},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "UINT32",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: uint32(2864434397),
+ },
+ {
+ name: "register100_register101_dcba_uint32",
+ address: []uint16{100, 101},
+ quantity: 2,
+ byteOrder: "DCBA",
+ dataType: "UINT32",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: uint32(3721182122),
+ },
+ {
+ name: "register110_register111_badc_uint32",
+ address: []uint16{110, 111},
+ quantity: 2,
+ byteOrder: "BADC",
+ dataType: "UINT32",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: uint32(3148537292),
+ },
+ {
+ name: "register120_register121_cdab_uint32",
+ address: []uint16{120, 121},
+ quantity: 2,
+ byteOrder: "CDAB",
+ dataType: "UINT32",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: uint32(3437079227),
+ },
+ {
+ name: "register130_register131_abcd_float32_ieee",
+ address: []uint16{130, 131},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "FLOAT32-IEEE",
+ scale: 1,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: float32(-3.3360025e-13),
+ },
+ {
+ name: "register130_register131_abcd_float32_ieee_scaled",
+ address: []uint16{130, 131},
+ quantity: 2,
+ byteOrder: "ABCD",
+ dataType: "FLOAT32-IEEE",
+ scale: 10,
+ write: []byte{0xAA, 0xBB, 0xCC, 0xDD},
+ read: float32(-3.3360025e-12),
+ },
+ {
+ name: "register140_to_register143_abcdefgh_int64_scaled",
+ address: []uint16{140, 141, 142, 143},
+ quantity: 4,
+ byteOrder: "ABCDEFGH",
+ dataType: "INT64",
+ scale: 10,
+ write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD},
+ read: int64(10995116717570),
+ },
+ {
+ name: "register140_to_register143_abcdefgh_int64",
+ address: []uint16{140, 141, 142, 143},
+ quantity: 4,
+ byteOrder: "ABCDEFGH",
+ dataType: "INT64",
+ scale: 1,
+ write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD},
+ read: int64(1099511671757),
+ },
+ {
+ name: "register150_to_register153_hgfedcba_int64",
+ address: []uint16{150, 151, 152, 153},
+ quantity: 4,
+ byteOrder: "HGFEDCBA",
+ dataType: "INT64",
+ scale: 1,
+ write: []byte{0x84, 0xF6, 0x45, 0xF9, 0xBC, 0xFE, 0xFF, 0xFF},
+ read: int64(-1387387292028),
+ },
+ {
+ name: "register160_to_register163_badcfehg_int64",
+ address: []uint16{160, 161, 162, 163},
+ quantity: 4,
+ byteOrder: "BADCFEHG",
+ dataType: "INT64",
+ scale: 1,
+ write: []byte{0xFF, 0xFF, 0xBC, 0xFE, 0x45, 0xF9, 0x84, 0xF6},
+ read: int64(-1387387292028),
+ },
+ {
+ name: "register170_to_register173_ghefcdab_int64",
+ address: []uint16{170, 171, 172, 173},
+ quantity: 4,
+ byteOrder: "GHEFCDAB",
+ dataType: "INT64",
+ scale: 1,
+ write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF},
+ read: int64(-1387387292028),
+ },
+ {
+ name: "register180_to_register183_abcdefgh_uint64_scaled",
+ address: []uint16{180, 181, 182, 183},
+ quantity: 4,
+ byteOrder: "ABCDEFGH",
+ dataType: "UINT64",
+ scale: 10,
+ write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD},
+ read: uint64(10995116717570),
+ },
+ {
+ name: "register180_to_register183_abcdefgh_uint64",
+ address: []uint16{180, 181, 182, 183},
+ quantity: 4,
+ byteOrder: "ABCDEFGH",
+ dataType: "UINT64",
+ scale: 1,
+ write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD},
+ read: uint64(1099511671757),
+ },
+ {
+ name: "register190_to_register193_hgfedcba_uint64",
+ address: []uint16{190, 191, 192, 193},
+ quantity: 4,
+ byteOrder: "HGFEDCBA",
+ dataType: "UINT64",
+ scale: 1,
+ write: []byte{0x84, 0xF6, 0x45, 0xF9, 0xBC, 0xFE, 0xFF, 0xFF},
+ read: uint64(18446742686322259968),
+ },
+ {
+ name: "register200_to_register203_badcfehg_uint64",
+ address: []uint16{200, 201, 202, 203},
+ quantity: 4,
+ byteOrder: "BADCFEHG",
+ dataType: "UINT64",
+ scale: 1,
+ write: []byte{0xFF, 0xFF, 0xBC, 0xFE, 0x45, 0xF9, 0x84, 0xF6},
+ read: uint64(18446742686322259968),
+ },
+ {
+ name: "register210_to_register213_ghefcdab_uint64",
+ address: []uint16{210, 211, 212, 213},
+ quantity: 4,
+ byteOrder: "GHEFCDAB",
+ dataType: "UINT64",
+ scale: 1,
+ write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF},
+ read: uint64(18446742686322259968),
+ },
+ }
+
+ serv := mbserver.NewServer()
+ err := serv.ListenTCP("localhost:1502")
+ defer serv.Close()
+ assert.NoError(t, err)
+
+ handler := m.NewTCPClientHandler("localhost:1502")
+ err = handler.Connect()
+ assert.NoError(t, err)
+ defer handler.Close()
+ client := m.NewClient(handler)
+
+ for _, hrt := range holdingRegisterTests {
+ t.Run(hrt.name, func(t *testing.T) {
+ _, err = client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write)
+ assert.NoError(t, err)
+
+ modbus := Modbus{
+ Name: "TestHoldingRegisters",
+ Controller: "tcp://localhost:1502",
+ SlaveID: 1,
+ HoldingRegisters: []fieldContainer{
+ {
+ Name: hrt.name,
+ ByteOrder: hrt.byteOrder,
+ DataType: hrt.dataType,
+ Scale: hrt.scale,
+ Address: hrt.address,
+ },
+ },
+ }
+
+ err = modbus.Init()
+ assert.NoError(t, err)
+ var acc testutil.Accumulator
+ modbus.Gather(&acc)
+ assert.NotEmpty(t, modbus.registers)
+
+ for _, coil := range modbus.registers {
+ assert.Equal(t, hrt.read, coil.Fields[0].value)
+ }
+ })
+ }
+}
+
+func TestRetrySuccessful(t *testing.T) {
+ retries := 0
+ maxretries := 2
+ value := 1
+
+ serv := mbserver.NewServer()
+ err := serv.ListenTCP("localhost:1502")
+ assert.NoError(t, err)
+ defer serv.Close()
+
+ // Make read on coil-registers fail for some trials by making the device
+ // to appear busy
+ serv.RegisterFunctionHandler(1,
+ func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) {
+ data := make([]byte, 2)
+ data[0] = byte(1)
+ data[1] = byte(value)
+
+ except := &mbserver.SlaveDeviceBusy
+ if retries >= maxretries {
+ except = &mbserver.Success
+ }
+ retries += 1
+
+ return data, except
+ })
+
+ t.Run("retry_success", func(t *testing.T) {
+ modbus := Modbus{
+ Name: "TestRetry",
+ Controller: "tcp://localhost:1502",
+ SlaveID: 1,
+ Retries: maxretries,
+ Coils: []fieldContainer{
+ {
+ Name: "retry_success",
+ Address: []uint16{0},
+ },
+ },
+ }
+
+ err = modbus.Init()
+ assert.NoError(t, err)
+ var acc testutil.Accumulator
+ err = modbus.Gather(&acc)
+ assert.NoError(t, err)
+ assert.NotEmpty(t, modbus.registers)
+
+ for _, coil := range modbus.registers {
+ assert.Equal(t, uint16(value), coil.Fields[0].value)
+ }
+ })
+}
+
+func TestRetryFail(t *testing.T) {
+ maxretries := 2
+
+ serv := mbserver.NewServer()
+ err := serv.ListenTCP("localhost:1502")
+ assert.NoError(t, err)
+ defer serv.Close()
+
+ // Make the read on coils fail with busy
+ serv.RegisterFunctionHandler(1,
+ func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) {
+ data := make([]byte, 2)
+ data[0] = byte(1)
+ data[1] = byte(0)
+
+ return data, &mbserver.SlaveDeviceBusy
+ })
+
+ t.Run("retry_fail", func(t *testing.T) {
+ modbus := Modbus{
+ Name: "TestRetryFail",
+ Controller: "tcp://localhost:1502",
+ SlaveID: 1,
+ Retries: maxretries,
+ Coils: []fieldContainer{
+ {
+ Name: "retry_fail",
+ Address: []uint16{0},
+ },
+ },
+ }
+
+ err = modbus.Init()
+ assert.NoError(t, err)
+ var acc testutil.Accumulator
+ err = modbus.Gather(&acc)
+ assert.Error(t, err)
+ })
+
+ // Make the read on coils fail with illegal function preventing retry
+ counter := 0
+ serv.RegisterFunctionHandler(1,
+ func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) {
+ counter += 1
+ data := make([]byte, 2)
+ data[0] = byte(1)
+ data[1] = byte(0)
+
+ return data, &mbserver.IllegalFunction
+ })
+
+ t.Run("retry_fail", func(t *testing.T) {
+ modbus := Modbus{
+ Name: "TestRetryFail",
+ Controller: "tcp://localhost:1502",
+ SlaveID: 1,
+ Retries: maxretries,
+ Coils: []fieldContainer{
+ {
+ Name: "retry_fail",
+ Address: []uint16{0},
+ },
+ },
+ }
+
+ err = modbus.Init()
+ assert.NoError(t, err)
+ var acc testutil.Accumulator
+ err = modbus.Gather(&acc)
+ assert.Error(t, err)
+ assert.Equal(t, counter, 1)
+ })
+}
diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md
index 07ab133d480ce..cce93dc07376a 100644
--- a/plugins/inputs/mongodb/README.md
+++ b/plugins/inputs/mongodb/README.md
@@ -11,9 +11,21 @@
## mongodb://10.10.3.33:18832,
servers = ["mongodb://127.0.0.1:27017"]
+ ## When true, collect cluster status.
+ ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
+ ## may have an impact on performance.
+ # gather_cluster_status = true
+
## When true, collect per database stats
# gather_perdb_stats = false
+ ## When true, collect per collection stats
+ # gather_col_stats = false
+
+ ## List of db where collections stats are collected
+ ## If empty, all db are concerned
+ # col_stats_dbs = ["local"]
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
@@ -39,58 +51,140 @@ Telegraf logs similar to:
Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 }
```
+Some permission related errors are logged at debug level, you can check these
+messages by setting `debug = true` in the agent section of the configuration or
+by running Telegraf with the `--debug` argument.
+
### Metrics:
- mongodb
- tags:
- hostname
+ - node_type
+ - rs_name
- fields:
- active_reads (integer)
- active_writes (integer)
+ - aggregate_command_failed (integer)
+ - aggregate_command_total (integer)
+ - assert_msg (integer)
+ - assert_regular (integer)
+ - assert_rollovers (integer)
+ - assert_user (integer)
+ - assert_warning (integer)
+ - available_reads (integer)
+ - available_writes (integer)
- commands (integer)
- - connections_current (integer)
- connections_available (integer)
+ - connections_current (integer)
- connections_total_created (integer)
- - cursor_timed_out_count (integer)
+ - count_command_failed (integer)
+ - count_command_total (integer)
- cursor_no_timeout_count (integer)
- cursor_pinned_count (integer)
+ - cursor_timed_out_count (integer)
- cursor_total_count (integer)
+ - delete_command_failed (integer)
+ - delete_command_total (integer)
- deletes (integer)
+ - distinct_command_failed (integer)
+ - distinct_command_total (integer)
- document_deleted (integer)
- document_inserted (integer)
- document_returned (integer)
- document_updated (integer)
+ - find_and_modify_command_failed (integer)
+ - find_and_modify_command_total (integer)
+ - find_command_failed (integer)
+ - find_command_total (integer)
- flushes (integer)
- flushes_total_time_ns (integer)
+ - get_more_command_failed (integer)
+ - get_more_command_total (integer)
- getmores (integer)
- - inserts (integer
+ - insert_command_failed (integer)
+ - insert_command_total (integer)
+ - inserts (integer)
- jumbo_chunks (integer)
+ - latency_commands_count (integer)
+ - latency_commands (integer)
+ - latency_reads_count (integer)
+ - latency_reads (integer)
+ - latency_writes_count (integer)
+ - latency_writes (integer)
- member_status (string)
- net_in_bytes_count (integer)
- net_out_bytes_count (integer)
- open_connections (integer)
+ - operation_scan_and_order (integer)
+ - operation_write_conflicts (integer)
+ - page_faults (integer)
- percent_cache_dirty (float)
- percent_cache_used (float)
- queries (integer)
- queued_reads (integer)
- queued_writes (integer)
+ - repl_apply_batches_num (integer)
+ - repl_apply_batches_total_millis (integer)
+ - repl_apply_ops (integer)
+ - repl_buffer_count (integer)
+ - repl_buffer_size_bytes (integer)
- repl_commands (integer)
- repl_deletes (integer)
+ - repl_executor_pool_in_progress_count (integer)
+ - repl_executor_queues_network_in_progress (integer)
+ - repl_executor_queues_sleepers (integer)
+ - repl_executor_unsignaled_events (integer)
- repl_getmores (integer)
- repl_inserts (integer)
- repl_lag (integer)
+ - repl_network_bytes (integer)
+ - repl_network_getmores_num (integer)
+ - repl_network_getmores_total_millis (integer)
+ - repl_network_ops (integer)
- repl_queries (integer)
- repl_updates (integer)
- repl_oplog_window_sec (integer)
+ - repl_state (integer)
- resident_megabytes (integer)
- state (string)
+ - storage_freelist_search_bucket_exhausted (integer)
+ - storage_freelist_search_requests (integer)
+ - storage_freelist_search_scanned (integer)
+ - tcmalloc_central_cache_free_bytes (integer)
+ - tcmalloc_current_allocated_bytes (integer)
+ - tcmalloc_current_total_thread_cache_bytes (integer)
+ - tcmalloc_heap_size (integer)
+ - tcmalloc_max_total_thread_cache_bytes (integer)
+ - tcmalloc_pageheap_commit_count (integer)
+ - tcmalloc_pageheap_committed_bytes (integer)
+ - tcmalloc_pageheap_decommit_count (integer)
+ - tcmalloc_pageheap_free_bytes (integer)
+ - tcmalloc_pageheap_reserve_count (integer)
+ - tcmalloc_pageheap_scavenge_count (integer)
+ - tcmalloc_pageheap_total_commit_bytes (integer)
+ - tcmalloc_pageheap_total_decommit_bytes (integer)
+ - tcmalloc_pageheap_total_reserve_bytes (integer)
+ - tcmalloc_pageheap_unmapped_bytes (integer)
+ - tcmalloc_spinlock_total_delay_ns (integer)
+ - tcmalloc_thread_cache_free_bytes (integer)
+ - tcmalloc_total_free_bytes (integer)
+ - tcmalloc_transfer_cache_free_bytes (integer)
- total_available (integer)
- total_created (integer)
+ - total_docs_scanned (integer)
- total_in_use (integer)
+ - total_keys_scanned (integer)
- total_refreshing (integer)
+ - total_tickets_reads (integer)
+ - total_tickets_writes (integer)
- ttl_deletes (integer)
- ttl_passes (integer)
+ - update_command_failed (integer)
+ - update_command_total (integer)
- updates (integer)
+ - uptime_ns (integer)
+ - version (string)
- vsize_megabytes (integer)
- wtcache_app_threads_page_read_count (integer)
- wtcache_app_threads_page_read_time (integer)
@@ -127,7 +221,7 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta
- repl_inserts_per_sec (integer, deprecated in 1.10; use `repl_inserts`))
- repl_queries_per_sec (integer, deprecated in 1.10; use `repl_queries`))
- repl_updates_per_sec (integer, deprecated in 1.10; use `repl_updates`))
- - ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deltes`))
+ - ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deletes`))
- ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`))
- updates_per_sec (integer, deprecated in 1.10; use `updates`))
@@ -147,6 +241,20 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta
- storage_size (integer)
- type (string)
+- mongodb_col_stats
+ - tags:
+ - hostname
+ - collection
+ - db_name
+ - fields:
+ - size (integer)
+ - avg_obj_size (integer)
+ - storage_size (integer)
+ - total_index_size (integer)
+ - ok (integer)
+ - count (integer)
+ - type (string)
+
- mongodb_shard_stats
- tags:
- hostname
@@ -158,8 +266,10 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta
### Example Output:
```
-mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000
+mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000
+mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000
mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000
mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000
+mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000
mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000
```
diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go
index 895667dee9fd3..4ba54137383dd 100644
--- a/plugins/inputs/mongodb/mongodb.go
+++ b/plugins/inputs/mongodb/mongodb.go
@@ -4,7 +4,6 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
- "log"
"net"
"net/url"
"strings"
@@ -12,17 +11,22 @@ import (
"time"
"github.com/influxdata/telegraf"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"gopkg.in/mgo.v2"
)
type MongoDB struct {
- Servers []string
- Ssl Ssl
- mongos map[string]*Server
- GatherPerdbStats bool
+ Servers []string
+ Ssl Ssl
+ mongos map[string]*Server
+ GatherClusterStatus bool
+ GatherPerdbStats bool
+ GatherColStats bool
+ ColStatsDbs []string
tlsint.ClientConfig
+
+ Log telegraf.Logger
}
type Ssl struct {
@@ -38,9 +42,21 @@ var sampleConfig = `
## mongodb://10.10.3.33:18832,
servers = ["mongodb://127.0.0.1:27017"]
+ ## When true, collect cluster status
+ ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
+ ## may have an impact on performance.
+ # gather_cluster_status = true
+
## When true, collect per database stats
# gather_perdb_stats = false
+ ## When true, collect per collection stats
+ # gather_col_stats = false
+
+ ## List of db where collections stats are collected
+ ## If empty, all db are concerned
+ # col_stats_dbs = ["local"]
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
@@ -73,24 +89,27 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
// Preserve backwards compatibility for hostnames without a
// scheme, broken in go 1.8. Remove in Telegraf 2.0
serv = "mongodb://" + serv
- log.Printf("W! [inputs.mongodb] Using %q as connection URL; please update your configuration to use an URL", serv)
+ m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv)
m.Servers[i] = serv
}
u, err := url.Parse(serv)
if err != nil {
- acc.AddError(fmt.Errorf("Unable to parse address %q: %s", serv, err))
+ m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error())
continue
}
if u.Host == "" {
- acc.AddError(fmt.Errorf("Unable to parse address %q", serv))
+ m.Log.Errorf("Unable to parse address %q", serv)
continue
}
wg.Add(1)
go func(srv *Server) {
defer wg.Done()
- acc.AddError(m.gatherServer(srv, acc))
+ err := m.gatherServer(srv, acc)
+ if err != nil {
+ m.Log.Errorf("Error in plugin: %v", err)
+ }
}(m.getMongoServer(u))
}
@@ -101,6 +120,7 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
func (m *MongoDB) getMongoServer(url *url.URL) *Server {
if _, ok := m.mongos[url.Host]; !ok {
m.mongos[url.Host] = &Server{
+ Log: m.Log,
Url: url,
}
}
@@ -117,8 +137,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
}
dialInfo, err := mgo.ParseURL(dialAddrs[0])
if err != nil {
- return fmt.Errorf("Unable to parse URL (%s), %s\n",
- dialAddrs[0], err.Error())
+ return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error())
}
dialInfo.Direct = true
dialInfo.Timeout = 5 * time.Second
@@ -160,17 +179,21 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
sess, err := mgo.DialWithInfo(dialInfo)
if err != nil {
- return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error())
+ return fmt.Errorf("unable to connect to MongoDB: %s", err.Error())
}
server.Session = sess
}
- return server.gatherData(acc, m.GatherPerdbStats)
+ return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs)
}
func init() {
inputs.Add("mongodb", func() telegraf.Input {
return &MongoDB{
- mongos: make(map[string]*Server),
+ mongos: make(map[string]*Server),
+ GatherClusterStatus: true,
+ GatherPerdbStats: false,
+ GatherColStats: false,
+ ColStatsDbs: []string{"local"},
}
})
}
diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go
index 5fa0c237df67b..6a2c0a86ebd12 100644
--- a/plugins/inputs/mongodb/mongodb_data.go
+++ b/plugins/inputs/mongodb/mongodb_data.go
@@ -13,6 +13,7 @@ type MongodbData struct {
Fields map[string]interface{}
Tags map[string]string
DbData []DbData
+ ColData []ColData
ShardHostData []DbData
}
@@ -21,6 +22,12 @@ type DbData struct {
Fields map[string]interface{}
}
+type ColData struct {
+ Name string
+ DbName string
+ Fields map[string]interface{}
+}
+
func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
return &MongodbData{
StatLine: statLine,
@@ -31,6 +38,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
}
var DefaultStats = map[string]string{
+ "uptime_ns": "UptimeNanos",
"inserts": "InsertCnt",
"inserts_per_sec": "Insert",
"queries": "QueryCnt",
@@ -52,6 +60,10 @@ var DefaultStats = map[string]string{
"queued_writes": "QueuedWriters",
"active_reads": "ActiveReaders",
"active_writes": "ActiveWriters",
+ "available_reads": "AvailableReaders",
+ "available_writes": "AvailableWriters",
+ "total_tickets_reads": "TotalTicketsReaders",
+ "total_tickets_writes": "TotalTicketsWriters",
"net_in_bytes_count": "NetInCnt",
"net_in_bytes": "NetIn",
"net_out_bytes_count": "NetOutCnt",
@@ -76,25 +88,80 @@ var DefaultStats = map[string]string{
"connections_current": "CurrentC",
"connections_available": "AvailableC",
"connections_total_created": "TotalCreatedC",
+ "operation_scan_and_order": "ScanAndOrderOp",
+ "operation_write_conflicts": "WriteConflictsOp",
+ "total_keys_scanned": "TotalKeysScanned",
+ "total_docs_scanned": "TotalObjectsScanned",
+}
+
+var DefaultAssertsStats = map[string]string{
+ "assert_regular": "Regular",
+ "assert_warning": "Warning",
+ "assert_msg": "Msg",
+ "assert_user": "User",
+ "assert_rollovers": "Rollovers",
+}
+
+var DefaultCommandsStats = map[string]string{
+ "aggregate_command_total": "AggregateCommandTotal",
+ "aggregate_command_failed": "AggregateCommandFailed",
+ "count_command_total": "CountCommandTotal",
+ "count_command_failed": "CountCommandFailed",
+ "delete_command_total": "DeleteCommandTotal",
+ "delete_command_failed": "DeleteCommandFailed",
+ "distinct_command_total": "DistinctCommandTotal",
+ "distinct_command_failed": "DistinctCommandFailed",
+ "find_command_total": "FindCommandTotal",
+ "find_command_failed": "FindCommandFailed",
+ "find_and_modify_command_total": "FindAndModifyCommandTotal",
+ "find_and_modify_command_failed": "FindAndModifyCommandFailed",
+ "get_more_command_total": "GetMoreCommandTotal",
+ "get_more_command_failed": "GetMoreCommandFailed",
+ "insert_command_total": "InsertCommandTotal",
+ "insert_command_failed": "InsertCommandFailed",
+ "update_command_total": "UpdateCommandTotal",
+ "update_command_failed": "UpdateCommandFailed",
+}
+
+var DefaultLatencyStats = map[string]string{
+ "latency_writes_count": "WriteOpsCnt",
+ "latency_writes": "WriteLatency",
+ "latency_reads_count": "ReadOpsCnt",
+ "latency_reads": "ReadLatency",
+ "latency_commands_count": "CommandOpsCnt",
+ "latency_commands": "CommandLatency",
}
var DefaultReplStats = map[string]string{
- "repl_inserts": "InsertRCnt",
- "repl_inserts_per_sec": "InsertR",
- "repl_queries": "QueryRCnt",
- "repl_queries_per_sec": "QueryR",
- "repl_updates": "UpdateRCnt",
- "repl_updates_per_sec": "UpdateR",
- "repl_deletes": "DeleteRCnt",
- "repl_deletes_per_sec": "DeleteR",
- "repl_getmores": "GetMoreRCnt",
- "repl_getmores_per_sec": "GetMoreR",
- "repl_commands": "CommandRCnt",
- "repl_commands_per_sec": "CommandR",
- "member_status": "NodeType",
- "state": "NodeState",
- "repl_lag": "ReplLag",
- "repl_oplog_window_sec": "OplogTimeDiff",
+ "repl_inserts": "InsertRCnt",
+ "repl_inserts_per_sec": "InsertR",
+ "repl_queries": "QueryRCnt",
+ "repl_queries_per_sec": "QueryR",
+ "repl_updates": "UpdateRCnt",
+ "repl_updates_per_sec": "UpdateR",
+ "repl_deletes": "DeleteRCnt",
+ "repl_deletes_per_sec": "DeleteR",
+ "repl_getmores": "GetMoreRCnt",
+ "repl_getmores_per_sec": "GetMoreR",
+ "repl_commands": "CommandRCnt",
+ "repl_commands_per_sec": "CommandR",
+ "member_status": "NodeType",
+ "state": "NodeState",
+ "repl_state": "NodeStateInt",
+ "repl_lag": "ReplLag",
+ "repl_network_bytes": "ReplNetworkBytes",
+ "repl_network_getmores_num": "ReplNetworkGetmoresNum",
+ "repl_network_getmores_total_millis": "ReplNetworkGetmoresTotalMillis",
+ "repl_network_ops": "ReplNetworkOps",
+ "repl_buffer_count": "ReplBufferCount",
+ "repl_buffer_size_bytes": "ReplBufferSizeBytes",
+ "repl_apply_batches_num": "ReplApplyBatchesNum",
+ "repl_apply_batches_total_millis": "ReplApplyBatchesTotalMillis",
+ "repl_apply_ops": "ReplApplyOps",
+ "repl_executor_pool_in_progress_count": "ReplExecutorPoolInProgressCount",
+ "repl_executor_queues_network_in_progress": "ReplExecutorQueuesNetworkInProgress",
+ "repl_executor_queues_sleepers": "ReplExecutorQueuesSleepers",
+ "repl_executor_unsignaled_events": "ReplExecutorUnsignaledEvents",
}
var DefaultClusterStats = map[string]string{
@@ -139,6 +206,7 @@ var WiredTigerExtStats = map[string]string{
"wtcache_pages_evicted_by_app_thread": "PagesEvictedByAppThread",
"wtcache_pages_queued_for_eviction": "PagesQueuedForEviction",
"wtcache_pages_read_into": "PagesReadIntoCache",
+ "wtcache_pages_written_from": "PagesWrittenFromCache",
"wtcache_pages_requested_from": "PagesRequestedFromCache",
"wtcache_server_evicting_pages": "ServerEvictingPages",
"wtcache_worker_thread_evictingpages": "WorkerThreadEvictingPages",
@@ -147,6 +215,34 @@ var WiredTigerExtStats = map[string]string{
"wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted",
}
+var DefaultTCMallocStats = map[string]string{
+ "tcmalloc_current_allocated_bytes": "TCMallocCurrentAllocatedBytes",
+ "tcmalloc_heap_size": "TCMallocHeapSize",
+ "tcmalloc_central_cache_free_bytes": "TCMallocCentralCacheFreeBytes",
+ "tcmalloc_current_total_thread_cache_bytes": "TCMallocCurrentTotalThreadCacheBytes",
+ "tcmalloc_max_total_thread_cache_bytes": "TCMallocMaxTotalThreadCacheBytes",
+ "tcmalloc_total_free_bytes": "TCMallocTotalFreeBytes",
+ "tcmalloc_transfer_cache_free_bytes": "TCMallocTransferCacheFreeBytes",
+ "tcmalloc_thread_cache_free_bytes": "TCMallocThreadCacheFreeBytes",
+ "tcmalloc_spinlock_total_delay_ns": "TCMallocSpinLockTotalDelayNanos",
+ "tcmalloc_pageheap_free_bytes": "TCMallocPageheapFreeBytes",
+ "tcmalloc_pageheap_unmapped_bytes": "TCMallocPageheapUnmappedBytes",
+ "tcmalloc_pageheap_committed_bytes": "TCMallocPageheapComittedBytes",
+ "tcmalloc_pageheap_scavenge_count": "TCMallocPageheapScavengeCount",
+ "tcmalloc_pageheap_commit_count": "TCMallocPageheapCommitCount",
+ "tcmalloc_pageheap_total_commit_bytes": "TCMallocPageheapTotalCommitBytes",
+ "tcmalloc_pageheap_decommit_count": "TCMallocPageheapDecommitCount",
+ "tcmalloc_pageheap_total_decommit_bytes": "TCMallocPageheapTotalDecommitBytes",
+ "tcmalloc_pageheap_reserve_count": "TCMallocPageheapReserveCount",
+ "tcmalloc_pageheap_total_reserve_bytes": "TCMallocPageheapTotalReserveBytes",
+}
+
+var DefaultStorageStats = map[string]string{
+ "storage_freelist_search_bucket_exhausted": "StorageFreelistSearchBucketExhausted",
+ "storage_freelist_search_requests": "StorageFreelistSearchRequests",
+ "storage_freelist_search_scanned": "StorageFreelistSearchScanned",
+}
+
var DbDataStats = map[string]string{
"collections": "Collections",
"objects": "Objects",
@@ -159,6 +255,15 @@ var DbDataStats = map[string]string{
"ok": "Ok",
}
+var ColDataStats = map[string]string{
+ "count": "Count",
+ "size": "Size",
+ "avg_obj_size": "AvgObjSize",
+ "storage_size": "StorageSize",
+ "total_index_size": "TotalIndexSize",
+ "ok": "Ok",
+}
+
func (d *MongodbData) AddDbStats() {
for _, dbstat := range d.StatLine.DbStatsLines {
dbStatLine := reflect.ValueOf(&dbstat).Elem()
@@ -175,6 +280,23 @@ func (d *MongodbData) AddDbStats() {
}
}
+func (d *MongodbData) AddColStats() {
+ for _, colstat := range d.StatLine.ColStatsLines {
+ colStatLine := reflect.ValueOf(&colstat).Elem()
+ newColData := &ColData{
+ Name: colstat.Name,
+ DbName: colstat.DbName,
+ Fields: make(map[string]interface{}),
+ }
+ newColData.Fields["type"] = "col_stat"
+ for key, value := range ColDataStats {
+ val := colStatLine.FieldByName(value).Interface()
+ newColData.Fields[key] = val
+ }
+ d.ColData = append(d.ColData, *newColData)
+ }
+}
+
func (d *MongodbData) AddShardHostStats() {
for host, hostStat := range d.StatLine.ShardHostStatsLines {
hostStatLine := reflect.ValueOf(&hostStat).Elem()
@@ -196,9 +318,32 @@ func (d *MongodbData) AddDefaultStats() {
d.addStat(statLine, DefaultStats)
if d.StatLine.NodeType != "" {
d.addStat(statLine, DefaultReplStats)
+ d.Tags["node_type"] = d.StatLine.NodeType
+ }
+
+ if d.StatLine.ReadLatency > 0 {
+ d.addStat(statLine, DefaultLatencyStats)
+ }
+
+ if d.StatLine.ReplSetName != "" {
+ d.Tags["rs_name"] = d.StatLine.ReplSetName
+ }
+
+ if d.StatLine.OplogStats != nil {
+ d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff)
}
+
+ if d.StatLine.Version != "" {
+ d.add("version", d.StatLine.Version)
+ }
+
+ d.addStat(statLine, DefaultAssertsStats)
d.addStat(statLine, DefaultClusterStats)
+ d.addStat(statLine, DefaultCommandsStats)
d.addStat(statLine, DefaultShardStats)
+ d.addStat(statLine, DefaultStorageStats)
+ d.addStat(statLine, DefaultTCMallocStats)
+
if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" {
d.addStat(statLine, MmapStats)
} else if d.StatLine.StorageEngine == "wiredTiger" {
@@ -209,6 +354,7 @@ func (d *MongodbData) AddDefaultStats() {
d.add(key, floatVal)
}
d.addStat(statLine, WiredTigerExtStats)
+ d.add("page_faults", d.StatLine.FaultsCnt)
}
}
@@ -242,6 +388,17 @@ func (d *MongodbData) flush(acc telegraf.Accumulator) {
)
db.Fields = make(map[string]interface{})
}
+ for _, col := range d.ColData {
+ d.Tags["collection"] = col.Name
+ d.Tags["db_name"] = col.DbName
+ acc.AddFields(
+ "mongodb_col_stats",
+ col.Fields,
+ d.Tags,
+ d.StatLine.Time,
+ )
+ col.Fields = make(map[string]interface{})
+ }
for _, host := range d.ShardHostData {
d.Tags["hostname"] = host.Name
acc.AddFields(
diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go
index da50bdc9e47f4..4a1730211b594 100644
--- a/plugins/inputs/mongodb/mongodb_data_test.go
+++ b/plugins/inputs/mongodb/mongodb_data_test.go
@@ -14,39 +14,48 @@ var tags = make(map[string]string)
func TestAddNonReplStats(t *testing.T) {
d := NewMongodbData(
&StatLine{
- StorageEngine: "",
- Time: time.Now(),
- Insert: 0,
- Query: 0,
- Update: 0,
- UpdateCnt: 0,
- Delete: 0,
- GetMore: 0,
- Command: 0,
- Flushes: 0,
- FlushesCnt: 0,
- Virtual: 0,
- Resident: 0,
- QueuedReaders: 0,
- QueuedWriters: 0,
- ActiveReaders: 0,
- ActiveWriters: 0,
- NetIn: 0,
- NetOut: 0,
- NumConnections: 0,
- Passes: 0,
- DeletedDocuments: 0,
- TimedOutC: 0,
- NoTimeoutC: 0,
- PinnedC: 0,
- TotalC: 0,
- DeletedD: 0,
- InsertedD: 0,
- ReturnedD: 0,
- UpdatedD: 0,
- CurrentC: 0,
- AvailableC: 0,
- TotalCreatedC: 0,
+ StorageEngine: "",
+ Time: time.Now(),
+ UptimeNanos: 0,
+ Insert: 0,
+ Query: 0,
+ Update: 0,
+ UpdateCnt: 0,
+ Delete: 0,
+ GetMore: 0,
+ Command: 0,
+ Flushes: 0,
+ FlushesCnt: 0,
+ Virtual: 0,
+ Resident: 0,
+ QueuedReaders: 0,
+ QueuedWriters: 0,
+ ActiveReaders: 0,
+ ActiveWriters: 0,
+ AvailableReaders: 0,
+ AvailableWriters: 0,
+ TotalTicketsReaders: 0,
+ TotalTicketsWriters: 0,
+ NetIn: 0,
+ NetOut: 0,
+ NumConnections: 0,
+ Passes: 0,
+ DeletedDocuments: 0,
+ TimedOutC: 0,
+ NoTimeoutC: 0,
+ PinnedC: 0,
+ TotalC: 0,
+ DeletedD: 0,
+ InsertedD: 0,
+ ReturnedD: 0,
+ UpdatedD: 0,
+ CurrentC: 0,
+ AvailableC: 0,
+ TotalCreatedC: 0,
+ ScanAndOrderOp: 0,
+ WriteConflictsOp: 0,
+ TotalKeysScanned: 0,
+ TotalObjectsScanned: 0,
},
tags,
)
@@ -97,8 +106,10 @@ func TestAddWiredTigerStats(t *testing.T) {
BytesReadInto: 0,
PagesEvictedByAppThread: 0,
PagesQueuedForEviction: 0,
+ PagesWrittenFromCache: 1247,
ServerEvictingPages: 0,
WorkerThreadEvictingPages: 0,
+ FaultsCnt: 204,
},
tags,
)
@@ -115,6 +126,8 @@ func TestAddWiredTigerStats(t *testing.T) {
for key := range WiredTigerExtStats {
assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key)
}
+
+ assert.True(t, acc.HasInt64Field("mongodb", "page_faults"))
}
func TestAddShardStats(t *testing.T) {
@@ -138,6 +151,142 @@ func TestAddShardStats(t *testing.T) {
}
}
+func TestAddLatencyStats(t *testing.T) {
+ d := NewMongodbData(
+ &StatLine{
+ CommandOpsCnt: 73,
+ CommandLatency: 364,
+ ReadOpsCnt: 113,
+ ReadLatency: 201,
+ WriteOpsCnt: 7,
+ WriteLatency: 55,
+ },
+ tags,
+ )
+
+ var acc testutil.Accumulator
+
+ d.AddDefaultStats()
+ d.flush(&acc)
+
+ for key := range DefaultLatencyStats {
+ assert.True(t, acc.HasInt64Field("mongodb", key))
+ }
+}
+
+func TestAddAssertsStats(t *testing.T) {
+ d := NewMongodbData(
+ &StatLine{
+ Regular: 3,
+ Warning: 9,
+ Msg: 2,
+ User: 34,
+ Rollovers: 0,
+ },
+ tags,
+ )
+
+ var acc testutil.Accumulator
+
+ d.AddDefaultStats()
+ d.flush(&acc)
+
+ for key := range DefaultAssertsStats {
+ assert.True(t, acc.HasInt64Field("mongodb", key))
+ }
+}
+
+func TestAddCommandsStats(t *testing.T) {
+ d := NewMongodbData(
+ &StatLine{
+ AggregateCommandTotal: 12,
+ AggregateCommandFailed: 2,
+ CountCommandTotal: 18,
+ CountCommandFailed: 5,
+ DeleteCommandTotal: 73,
+ DeleteCommandFailed: 364,
+ DistinctCommandTotal: 87,
+ DistinctCommandFailed: 19,
+ FindCommandTotal: 113,
+ FindCommandFailed: 201,
+ FindAndModifyCommandTotal: 7,
+ FindAndModifyCommandFailed: 55,
+ GetMoreCommandTotal: 4,
+ GetMoreCommandFailed: 55,
+ InsertCommandTotal: 34,
+ InsertCommandFailed: 65,
+ UpdateCommandTotal: 23,
+ UpdateCommandFailed: 6,
+ },
+ tags,
+ )
+
+ var acc testutil.Accumulator
+
+ d.AddDefaultStats()
+ d.flush(&acc)
+
+ for key := range DefaultCommandsStats {
+ assert.True(t, acc.HasInt64Field("mongodb", key))
+ }
+}
+
+func TestAddTCMallocStats(t *testing.T) {
+ d := NewMongodbData(
+ &StatLine{
+ TCMallocCurrentAllocatedBytes: 5877253096,
+ TCMallocHeapSize: 8067108864,
+ TCMallocPageheapFreeBytes: 1054994432,
+ TCMallocPageheapUnmappedBytes: 677859328,
+ TCMallocMaxTotalThreadCacheBytes: 1073741824,
+ TCMallocCurrentTotalThreadCacheBytes: 80405312,
+ TCMallocTotalFreeBytes: 457002008,
+ TCMallocCentralCacheFreeBytes: 375131800,
+ TCMallocTransferCacheFreeBytes: 1464896,
+ TCMallocThreadCacheFreeBytes: 80405312,
+ TCMallocPageheapComittedBytes: 7389249536,
+ TCMallocPageheapScavengeCount: 396394,
+ TCMallocPageheapCommitCount: 641765,
+ TCMallocPageheapTotalCommitBytes: 102248751104,
+ TCMallocPageheapDecommitCount: 396394,
+ TCMallocPageheapTotalDecommitBytes: 94859501568,
+ TCMallocPageheapReserveCount: 6179,
+ TCMallocPageheapTotalReserveBytes: 8067108864,
+ TCMallocSpinLockTotalDelayNanos: 2344453860,
+ },
+ tags,
+ )
+
+ var acc testutil.Accumulator
+
+ d.AddDefaultStats()
+ d.flush(&acc)
+
+ for key := range DefaultTCMallocStats {
+ assert.True(t, acc.HasInt64Field("mongodb", key))
+ }
+}
+
+func TestAddStorageStats(t *testing.T) {
+ d := NewMongodbData(
+ &StatLine{
+ StorageFreelistSearchBucketExhausted: 0,
+ StorageFreelistSearchRequests: 0,
+ StorageFreelistSearchScanned: 0,
+ },
+ tags,
+ )
+
+ var acc testutil.Accumulator
+
+ d.AddDefaultStats()
+ d.flush(&acc)
+
+ for key := range DefaultStorageStats {
+ assert.True(t, acc.HasInt64Field("mongodb", key))
+ }
+}
+
func TestAddShardHostStats(t *testing.T) {
expectedHosts := []string{"hostA", "hostB"}
hostStatLines := map[string]ShardHostStatLine{}
@@ -184,83 +333,155 @@ func TestStateTag(t *testing.T) {
Query: 0,
NodeType: "PRI",
NodeState: "PRIMARY",
+ ReplSetName: "rs1",
+ Version: "3.6.17",
},
tags,
)
stateTags := make(map[string]string)
+ stateTags["node_type"] = "PRI"
+ stateTags["rs_name"] = "rs1"
var acc testutil.Accumulator
d.AddDefaultStats()
d.flush(&acc)
fields := map[string]interface{}{
- "active_reads": int64(0),
- "active_writes": int64(0),
- "commands": int64(0),
- "commands_per_sec": int64(0),
- "deletes": int64(0),
- "deletes_per_sec": int64(0),
- "flushes": int64(0),
- "flushes_per_sec": int64(0),
- "flushes_total_time_ns": int64(0),
- "getmores": int64(0),
- "getmores_per_sec": int64(0),
- "inserts": int64(0),
- "inserts_per_sec": int64(0),
- "member_status": "PRI",
- "state": "PRIMARY",
- "net_in_bytes_count": int64(0),
- "net_in_bytes": int64(0),
- "net_out_bytes_count": int64(0),
- "net_out_bytes": int64(0),
- "open_connections": int64(0),
- "queries": int64(0),
- "queries_per_sec": int64(0),
- "queued_reads": int64(0),
- "queued_writes": int64(0),
- "repl_commands": int64(0),
- "repl_commands_per_sec": int64(0),
- "repl_deletes": int64(0),
- "repl_deletes_per_sec": int64(0),
- "repl_getmores": int64(0),
- "repl_getmores_per_sec": int64(0),
- "repl_inserts": int64(0),
- "repl_inserts_per_sec": int64(0),
- "repl_queries": int64(0),
- "repl_queries_per_sec": int64(0),
- "repl_updates": int64(0),
- "repl_updates_per_sec": int64(0),
- "repl_lag": int64(0),
- "repl_oplog_window_sec": int64(0),
- "resident_megabytes": int64(0),
- "updates": int64(0),
- "updates_per_sec": int64(0),
- "vsize_megabytes": int64(0),
- "ttl_deletes": int64(0),
- "ttl_deletes_per_sec": int64(0),
- "ttl_passes": int64(0),
- "ttl_passes_per_sec": int64(0),
- "jumbo_chunks": int64(0),
- "total_in_use": int64(0),
- "total_available": int64(0),
- "total_created": int64(0),
- "total_refreshing": int64(0),
- "cursor_timed_out": int64(0),
- "cursor_timed_out_count": int64(0),
- "cursor_no_timeout": int64(0),
- "cursor_no_timeout_count": int64(0),
- "cursor_pinned": int64(0),
- "cursor_pinned_count": int64(0),
- "cursor_total": int64(0),
- "cursor_total_count": int64(0),
- "document_deleted": int64(0),
- "document_inserted": int64(0),
- "document_returned": int64(0),
- "document_updated": int64(0),
- "connections_current": int64(0),
- "connections_available": int64(0),
- "connections_total_created": int64(0),
+ "active_reads": int64(0),
+ "active_writes": int64(0),
+ "aggregate_command_failed": int64(0),
+ "aggregate_command_total": int64(0),
+ "assert_msg": int64(0),
+ "assert_regular": int64(0),
+ "assert_rollovers": int64(0),
+ "assert_user": int64(0),
+ "assert_warning": int64(0),
+ "available_reads": int64(0),
+ "available_writes": int64(0),
+ "commands": int64(0),
+ "commands_per_sec": int64(0),
+ "connections_available": int64(0),
+ "connections_current": int64(0),
+ "connections_total_created": int64(0),
+ "count_command_failed": int64(0),
+ "count_command_total": int64(0),
+ "cursor_no_timeout": int64(0),
+ "cursor_no_timeout_count": int64(0),
+ "cursor_pinned": int64(0),
+ "cursor_pinned_count": int64(0),
+ "cursor_timed_out": int64(0),
+ "cursor_timed_out_count": int64(0),
+ "cursor_total": int64(0),
+ "cursor_total_count": int64(0),
+ "delete_command_failed": int64(0),
+ "delete_command_total": int64(0),
+ "deletes": int64(0),
+ "deletes_per_sec": int64(0),
+ "distinct_command_failed": int64(0),
+ "distinct_command_total": int64(0),
+ "document_deleted": int64(0),
+ "document_inserted": int64(0),
+ "document_returned": int64(0),
+ "document_updated": int64(0),
+ "find_and_modify_command_failed": int64(0),
+ "find_and_modify_command_total": int64(0),
+ "find_command_failed": int64(0),
+ "find_command_total": int64(0),
+ "flushes": int64(0),
+ "flushes_per_sec": int64(0),
+ "flushes_total_time_ns": int64(0),
+ "get_more_command_failed": int64(0),
+ "get_more_command_total": int64(0),
+ "getmores": int64(0),
+ "getmores_per_sec": int64(0),
+ "insert_command_failed": int64(0),
+ "insert_command_total": int64(0),
+ "inserts": int64(0),
+ "inserts_per_sec": int64(0),
+ "jumbo_chunks": int64(0),
+ "member_status": "PRI",
+ "net_in_bytes": int64(0),
+ "net_in_bytes_count": int64(0),
+ "net_out_bytes": int64(0),
+ "net_out_bytes_count": int64(0),
+ "open_connections": int64(0),
+ "operation_scan_and_order": int64(0),
+ "operation_write_conflicts": int64(0),
+ "queries": int64(0),
+ "queries_per_sec": int64(0),
+ "queued_reads": int64(0),
+ "queued_writes": int64(0),
+ "repl_apply_batches_num": int64(0),
+ "repl_apply_batches_total_millis": int64(0),
+ "repl_apply_ops": int64(0),
+ "repl_buffer_count": int64(0),
+ "repl_buffer_size_bytes": int64(0),
+ "repl_commands": int64(0),
+ "repl_commands_per_sec": int64(0),
+ "repl_deletes": int64(0),
+ "repl_deletes_per_sec": int64(0),
+ "repl_executor_pool_in_progress_count": int64(0),
+ "repl_executor_queues_network_in_progress": int64(0),
+ "repl_executor_queues_sleepers": int64(0),
+ "repl_executor_unsignaled_events": int64(0),
+ "repl_getmores": int64(0),
+ "repl_getmores_per_sec": int64(0),
+ "repl_inserts": int64(0),
+ "repl_inserts_per_sec": int64(0),
+ "repl_lag": int64(0),
+ "repl_network_bytes": int64(0),
+ "repl_network_getmores_num": int64(0),
+ "repl_network_getmores_total_millis": int64(0),
+ "repl_network_ops": int64(0),
+ "repl_queries": int64(0),
+ "repl_queries_per_sec": int64(0),
+ "repl_updates": int64(0),
+ "repl_updates_per_sec": int64(0),
+ "repl_state": int64(0),
+ "resident_megabytes": int64(0),
+ "state": "PRIMARY",
+ "storage_freelist_search_bucket_exhausted": int64(0),
+ "storage_freelist_search_requests": int64(0),
+ "storage_freelist_search_scanned": int64(0),
+ "tcmalloc_central_cache_free_bytes": int64(0),
+ "tcmalloc_current_allocated_bytes": int64(0),
+ "tcmalloc_current_total_thread_cache_bytes": int64(0),
+ "tcmalloc_heap_size": int64(0),
+ "tcmalloc_max_total_thread_cache_bytes": int64(0),
+ "tcmalloc_pageheap_commit_count": int64(0),
+ "tcmalloc_pageheap_committed_bytes": int64(0),
+ "tcmalloc_pageheap_decommit_count": int64(0),
+ "tcmalloc_pageheap_free_bytes": int64(0),
+ "tcmalloc_pageheap_reserve_count": int64(0),
+ "tcmalloc_pageheap_scavenge_count": int64(0),
+ "tcmalloc_pageheap_total_commit_bytes": int64(0),
+ "tcmalloc_pageheap_total_decommit_bytes": int64(0),
+ "tcmalloc_pageheap_total_reserve_bytes": int64(0),
+ "tcmalloc_pageheap_unmapped_bytes": int64(0),
+ "tcmalloc_spinlock_total_delay_ns": int64(0),
+ "tcmalloc_thread_cache_free_bytes": int64(0),
+ "tcmalloc_total_free_bytes": int64(0),
+ "tcmalloc_transfer_cache_free_bytes": int64(0),
+ "total_available": int64(0),
+ "total_created": int64(0),
+ "total_docs_scanned": int64(0),
+ "total_in_use": int64(0),
+ "total_keys_scanned": int64(0),
+ "total_refreshing": int64(0),
+ "total_tickets_reads": int64(0),
+ "total_tickets_writes": int64(0),
+ "ttl_deletes": int64(0),
+ "ttl_deletes_per_sec": int64(0),
+ "ttl_passes": int64(0),
+ "ttl_passes_per_sec": int64(0),
+ "update_command_failed": int64(0),
+ "update_command_total": int64(0),
+ "updates": int64(0),
+ "updates_per_sec": int64(0),
+ "uptime_ns": int64(0),
+ "version": "3.6.17",
+ "vsize_megabytes": int64(0),
}
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
}
diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go
index 6ab236b5829e1..5af48c10a6f9b 100644
--- a/plugins/inputs/mongodb/mongodb_server.go
+++ b/plugins/inputs/mongodb/mongodb_server.go
@@ -1,7 +1,7 @@
package mongodb
import (
- "log"
+ "fmt"
"net/url"
"strings"
"time"
@@ -15,6 +15,8 @@ type Server struct {
Url *url.URL
Session *mgo.Session
lastResult *MongoStatus
+
+ Log telegraf.Logger
}
func (s *Server) getDefaultTags() map[string]string {
@@ -31,49 +33,16 @@ func IsAuthorization(err error) bool {
return strings.Contains(err.Error(), "not authorized")
}
-func (s *Server) gatherOplogStats() *OplogStats {
- stats := &OplogStats{}
- localdb := s.Session.DB("local")
-
- op_first := oplogEntry{}
- op_last := oplogEntry{}
- query := bson.M{"ts": bson.M{"$exists": true}}
-
- for _, collection_name := range []string{"oplog.rs", "oplog.$main"} {
- if err := localdb.C(collection_name).Find(query).Sort("$natural").Limit(1).One(&op_first); err != nil {
- if err == mgo.ErrNotFound {
- continue
- }
- if IsAuthorization(err) {
- log.Println("D! Error getting first oplog entry (" + err.Error() + ")")
- } else {
- log.Println("E! Error getting first oplog entry (" + err.Error() + ")")
- }
- return stats
- }
- if err := localdb.C(collection_name).Find(query).Sort("-$natural").Limit(1).One(&op_last); err != nil {
- if err == mgo.ErrNotFound || IsAuthorization(err) {
- continue
- }
- if IsAuthorization(err) {
- log.Println("D! Error getting first oplog entry (" + err.Error() + ")")
- } else {
- log.Println("E! Error getting first oplog entry (" + err.Error() + ")")
- }
- return stats
- }
+func (s *Server) authLog(err error) {
+ if IsAuthorization(err) {
+ s.Log.Debug(err.Error())
+ } else {
+ s.Log.Error(err.Error())
}
-
- op_first_time := time.Unix(int64(op_first.Timestamp>>32), 0)
- op_last_time := time.Unix(int64(op_last.Timestamp>>32), 0)
- stats.TimeDiff = int64(op_last_time.Sub(op_first_time).Seconds())
- return stats
}
-func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error {
- s.Session.SetMode(mgo.Eventual, true)
- s.Session.SetSocketTimeout(0)
- result_server := &ServerStatus{}
+func (s *Server) gatherServerStatus() (*ServerStatus, error) {
+ serverStatus := &ServerStatus{}
err := s.Session.DB("admin").Run(bson.D{
{
Name: "serverStatus",
@@ -83,83 +52,221 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
Name: "recordStats",
Value: 0,
},
- }, result_server)
+ }, serverStatus)
if err != nil {
- return err
+ return nil, err
}
- result_repl := &ReplSetStatus{}
- // ignore error because it simply indicates that the db is not a member
- // in a replica set, which is fine.
- _ = s.Session.DB("admin").Run(bson.D{
+ return serverStatus, nil
+}
+
+func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) {
+ replSetStatus := &ReplSetStatus{}
+ err := s.Session.DB("admin").Run(bson.D{
{
Name: "replSetGetStatus",
Value: 1,
},
- }, result_repl)
-
- jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
+ }, replSetStatus)
+ if err != nil {
+ return nil, err
+ }
+ return replSetStatus, nil
+}
- result_cluster := &ClusterStatus{
- JumboChunksCount: int64(jumbo_chunks),
+func (s *Server) gatherClusterStatus() (*ClusterStatus, error) {
+ chunkCount, err := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
+ if err != nil {
+ return nil, err
}
- resultShards := &ShardStats{}
- err = s.Session.DB("admin").Run(bson.D{
+ return &ClusterStatus{
+ JumboChunksCount: int64(chunkCount),
+ }, nil
+}
+
+func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) {
+ shardStats := &ShardStats{}
+ err := s.Session.DB("admin").Run(bson.D{
{
Name: "shardConnPoolStats",
Value: 1,
},
- }, &resultShards)
+ }, &shardStats)
+ if err != nil {
+ return nil, err
+ }
+ return shardStats, nil
+}
+
+func (s *Server) gatherDBStats(name string) (*Db, error) {
+ stats := &DbStatsData{}
+ err := s.Session.DB(name).Run(bson.D{
+ {
+ Name: "dbStats",
+ Value: 1,
+ },
+ }, stats)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Db{
+ Name: name,
+ DbStatsData: stats,
+ }, nil
+}
+
+func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) {
+ query := bson.M{"ts": bson.M{"$exists": true}}
+
+ var first oplogEntry
+ err := s.Session.DB("local").C(collection).Find(query).Sort("$natural").Limit(1).One(&first)
+ if err != nil {
+ return nil, err
+ }
+
+ var last oplogEntry
+ err = s.Session.DB("local").C(collection).Find(query).Sort("-$natural").Limit(1).One(&last)
+ if err != nil {
+ return nil, err
+ }
+
+ firstTime := time.Unix(int64(first.Timestamp>>32), 0)
+ lastTime := time.Unix(int64(last.Timestamp>>32), 0)
+ stats := &OplogStats{
+ TimeDiff: int64(lastTime.Sub(firstTime).Seconds()),
+ }
+ return stats, nil
+}
+
+// The "oplog.rs" collection is stored on all replica set members.
+//
+// The "oplog.$main" collection is created on the master node of a
+// master-slave replicated deployment. As of MongoDB 3.2, master-slave
+// replication has been deprecated.
+func (s *Server) gatherOplogStats() (*OplogStats, error) {
+ stats, err := s.getOplogReplLag("oplog.rs")
+ if err == nil {
+ return stats, nil
+ }
+
+ return s.getOplogReplLag("oplog.$main")
+}
+
+func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) {
+ names, err := s.Session.DatabaseNames()
if err != nil {
- if IsAuthorization(err) {
- log.Println("D! Error getting database shard stats (" + err.Error() + ")")
- } else {
- log.Println("E! Error getting database shard stats (" + err.Error() + ")")
+ return nil, err
+ }
+
+ results := &ColStats{}
+ for _, dbName := range names {
+ if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 {
+ var colls []string
+ colls, err = s.Session.DB(dbName).CollectionNames()
+ if err != nil {
+ s.Log.Errorf("Error getting collection names: %s", err.Error())
+ continue
+ }
+ for _, colName := range colls {
+ colStatLine := &ColStatsData{}
+ err = s.Session.DB(dbName).Run(bson.D{
+ {
+ Name: "collStats",
+ Value: colName,
+ },
+ }, colStatLine)
+ if err != nil {
+ s.authLog(fmt.Errorf("error getting col stats from %q: %v", colName, err))
+ continue
+ }
+ collection := &Collection{
+ Name: colName,
+ DbName: dbName,
+ ColStatsData: colStatLine,
+ }
+ results.Collections = append(results.Collections, *collection)
+ }
}
}
+ return results, nil
+}
+
+func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error {
+ s.Session.SetMode(mgo.Eventual, true)
+ s.Session.SetSocketTimeout(0)
+
+ serverStatus, err := s.gatherServerStatus()
+ if err != nil {
+ return err
+ }
- oplogStats := s.gatherOplogStats()
+ // Get replica set status, an error indicates that the server is not a
+ // member of a replica set.
+ replSetStatus, err := s.gatherReplSetStatus()
+ if err != nil {
+ s.Log.Debugf("Unable to gather replica set status: %s", err.Error())
+ }
- result_db_stats := &DbStats{}
- if gatherDbStats == true {
- names := []string{}
- names, err = s.Session.DatabaseNames()
+ // Gather the oplog if we are a member of a replica set. Non-replica set
+ // members do not have the oplog collections.
+ var oplogStats *OplogStats
+ if replSetStatus != nil {
+ oplogStats, err = s.gatherOplogStats()
if err != nil {
- log.Println("E! Error getting database names (" + err.Error() + ")")
+ s.authLog(fmt.Errorf("Unable to get oplog stats: %v", err))
}
- for _, db_name := range names {
- db_stat_line := &DbStatsData{}
- err = s.Session.DB(db_name).Run(bson.D{
- {
- Name: "dbStats",
- Value: 1,
- },
- }, db_stat_line)
+ }
+
+ var clusterStatus *ClusterStatus
+ if gatherClusterStatus {
+ status, err := s.gatherClusterStatus()
+ if err != nil {
+ s.Log.Debugf("Unable to gather cluster status: %s", err.Error())
+ }
+ clusterStatus = status
+ }
+
+ shardStats, err := s.gatherShardConnPoolStats()
+ if err != nil {
+ s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error()))
+ }
+
+ var collectionStats *ColStats
+ if gatherColStats {
+ stats, err := s.gatherCollectionStats(colStatsDbs)
+ if err != nil {
+ return err
+ }
+ collectionStats = stats
+ }
+
+ dbStats := &DbStats{}
+ if gatherDbStats {
+ names, err := s.Session.DatabaseNames()
+ if err != nil {
+ return err
+ }
+
+ for _, name := range names {
+ db, err := s.gatherDBStats(name)
if err != nil {
- log.Println("E! Error getting db stats from " + db_name + "(" + err.Error() + ")")
+ s.Log.Debugf("Error getting db stats from %q: %s", name, err.Error())
}
- db := &Db{
- Name: db_name,
- DbStatsData: db_stat_line,
- }
-
- result_db_stats.Dbs = append(result_db_stats.Dbs, *db)
+ dbStats.Dbs = append(dbStats.Dbs, *db)
}
}
result := &MongoStatus{
- ServerStatus: result_server,
- ReplSetStatus: result_repl,
- ClusterStatus: result_cluster,
- DbStats: result_db_stats,
- ShardStats: resultShards,
+ ServerStatus: serverStatus,
+ ReplSetStatus: replSetStatus,
+ ClusterStatus: clusterStatus,
+ DbStats: dbStats,
+ ColStats: collectionStats,
+ ShardStats: shardStats,
OplogStats: oplogStats,
}
- defer func() {
- s.lastResult = result
- }()
-
result.SampleTime = time.Now()
if s.lastResult != nil && result != nil {
duration := result.SampleTime.Sub(s.lastResult.SampleTime)
@@ -173,8 +280,20 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
)
data.AddDefaultStats()
data.AddDbStats()
+ data.AddColStats()
data.AddShardHostStats()
data.flush(acc)
}
+
+ s.lastResult = result
return nil
}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go
index b763631ca74af..ee96d5f8b3ad1 100644
--- a/plugins/inputs/mongodb/mongostat.go
+++ b/plugins/inputs/mongodb/mongostat.go
@@ -1,7 +1,7 @@
/***
The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go
and contains modifications so that no other dependency from that project is needed. Other modifications included
-removing uneccessary code specific to formatting the output and determine the current state of the database. It
+removing unnecessary code specific to formatting the output and determine the current state of the database. It
is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html
***/
@@ -34,6 +34,7 @@ type MongoStatus struct {
ReplSetStatus *ReplSetStatus
ClusterStatus *ClusterStatus
DbStats *DbStats
+ ColStats *ColStats
ShardStats *ShardStats
OplogStats *OplogStats
}
@@ -47,7 +48,7 @@ type ServerStatus struct {
UptimeMillis int64 `bson:"uptimeMillis"`
UptimeEstimate int64 `bson:"uptimeEstimate"`
LocalTime time.Time `bson:"localTime"`
- Asserts map[string]int64 `bson:"asserts"`
+ Asserts *AssertsStats `bson:"asserts"`
BackgroundFlushing *FlushStats `bson:"backgroundFlushing"`
ExtraInfo *ExtraInfo `bson:"extra_info"`
Connections *ConnectionStats `bson:"connections"`
@@ -57,6 +58,7 @@ type ServerStatus struct {
Network *NetworkStats `bson:"network"`
Opcounters *OpcountStats `bson:"opcounters"`
OpcountersRepl *OpcountStats `bson:"opcountersRepl"`
+ OpLatencies *OpLatenciesStats `bson:"opLatencies"`
RecordStats *DBRecordStats `bson:"recordStats"`
Mem *MemStats `bson:"mem"`
Repl *ReplStatus `bson:"repl"`
@@ -64,6 +66,7 @@ type ServerStatus struct {
StorageEngine map[string]string `bson:"storageEngine"`
WiredTiger *WiredTiger `bson:"wiredTiger"`
Metrics *MetricsStats `bson:"metrics"`
+ TCMallocStats *TCMallocStats `bson:"tcmalloc"`
}
// DbStats stores stats from all dbs
@@ -92,6 +95,26 @@ type DbStatsData struct {
GleStats interface{} `bson:"gleStats"`
}
+type ColStats struct {
+ Collections []Collection
+}
+
+type Collection struct {
+ Name string
+ DbName string
+ ColStatsData *ColStatsData
+}
+
+type ColStatsData struct {
+ Collection string `bson:"ns"`
+ Count int64 `bson:"count"`
+ Size int64 `bson:"size"`
+ AvgObjSize float64 `bson:"avgObjSize"`
+ StorageSize int64 `bson:"storageSize"`
+ TotalIndexSize int64 `bson:"totalIndexSize"`
+ Ok int64 `bson:"ok"`
+}
+
// ClusterStatus stores information related to the whole cluster
type ClusterStatus struct {
JumboChunksCount int64
@@ -152,7 +175,18 @@ type ConcurrentTransactions struct {
}
type ConcurrentTransStats struct {
- Out int64 `bson:"out"`
+ Out int64 `bson:"out"`
+ Available int64 `bson:"available"`
+ TotalTickets int64 `bson:"totalTickets"`
+}
+
+// AssertsStats stores information related to assertions raised since the MongoDB process started
+type AssertsStats struct {
+ Regular int64 `bson:"regular"`
+ Warning int64 `bson:"warning"`
+ Msg int64 `bson:"msg"`
+ User int64 `bson:"user"`
+ Rollovers int64 `bson:"rollovers"`
}
// CacheStats stores cache statistics for WiredTiger.
@@ -169,6 +203,7 @@ type CacheStats struct {
PagesEvictedByAppThread int64 `bson:"pages evicted by application threads"`
PagesQueuedForEviction int64 `bson:"pages queued for eviction"`
PagesReadIntoCache int64 `bson:"pages read into cache"`
+ PagesWrittenFromCache int64 `bson:"pages written from cache"`
PagesRequestedFromCache int64 `bson:"pages requested from the cache"`
ServerEvictingPages int64 `bson:"eviction server evicting pages"`
WorkerThreadEvictingPages int64 `bson:"eviction worker thread evicting pages"`
@@ -231,7 +266,7 @@ type FlushStats struct {
type ConnectionStats struct {
Current int64 `bson:"current"`
Available int64 `bson:"available"`
- TotalCreated int64 `bson:"total_created"`
+ TotalCreated int64 `bson:"totalCreated"`
}
// DurTiming stores information related to journaling.
@@ -283,7 +318,7 @@ type NetworkStats struct {
NumRequests int64 `bson:"numRequests"`
}
-// OpcountStats stores information related to comamnds and basic CRUD operations.
+// OpcountStats stores information related to commands and basic CRUD operations.
type OpcountStats struct {
Insert int64 `bson:"insert"`
Query int64 `bson:"query"`
@@ -293,11 +328,29 @@ type OpcountStats struct {
Command int64 `bson:"command"`
}
+// OpLatenciesStats stores information related to operation latencies for the database as a whole
+type OpLatenciesStats struct {
+ Reads *LatencyStats `bson:"reads"`
+ Writes *LatencyStats `bson:"writes"`
+ Commands *LatencyStats `bson:"commands"`
+}
+
+// LatencyStats lists total latency in microseconds and count of operations, enabling you to obtain an average
+type LatencyStats struct {
+ Latency int64 `bson:"latency"`
+ Ops int64 `bson:"ops"`
+}
+
// MetricsStats stores information related to metrics
type MetricsStats struct {
- TTL *TTLStats `bson:"ttl"`
- Cursor *CursorStats `bson:"cursor"`
- Document *DocumentStats `bson:"document"`
+ TTL *TTLStats `bson:"ttl"`
+ Cursor *CursorStats `bson:"cursor"`
+ Document *DocumentStats `bson:"document"`
+ Commands *CommandsStats `bson:"commands"`
+ Operation *OperationStats `bson:"operation"`
+ QueryExecutor *QueryExecutorStats `bson:"queryExecutor"`
+ Repl *ReplStats `bson:"repl"`
+ Storage *StorageStats `bson:"storage"`
}
// TTLStats stores information related to documents with a ttl index.
@@ -320,6 +373,24 @@ type DocumentStats struct {
Updated int64 `bson:"updated"`
}
+// CommandsStats stores information related to document metrics.
+type CommandsStats struct {
+ Aggregate *CommandsStatsValue `bson:"aggregate"`
+ Count *CommandsStatsValue `bson:"count"`
+ Delete *CommandsStatsValue `bson:"delete"`
+ Distinct *CommandsStatsValue `bson:"distinct"`
+ Find *CommandsStatsValue `bson:"find"`
+ FindAndModify *CommandsStatsValue `bson:"findAndModify"`
+ GetMore *CommandsStatsValue `bson:"getMore"`
+ Insert *CommandsStatsValue `bson:"insert"`
+ Update *CommandsStatsValue `bson:"update"`
+}
+
+type CommandsStatsValue struct {
+ Failed int64 `bson:"failed"`
+ Total int64 `bson:"total"`
+}
+
// OpenCursorStats stores information related to open cursor metrics
type OpenCursorStats struct {
NoTimeout int64 `bson:"noTimeout"`
@@ -327,6 +398,59 @@ type OpenCursorStats struct {
Total int64 `bson:"total"`
}
+// OperationStats stores information related to query operations
+// using special operation types
+type OperationStats struct {
+ ScanAndOrder int64 `bson:"scanAndOrder"`
+ WriteConflicts int64 `bson:"writeConflicts"`
+}
+
+// QueryExecutorStats stores information related to query execution
+type QueryExecutorStats struct {
+ Scanned int64 `bson:"scanned"`
+ ScannedObjects int64 `bson:"scannedObjects"`
+}
+
+// ReplStats stores information related to replication process
+type ReplStats struct {
+ Apply *ReplApplyStats `bson:"apply"`
+ Buffer *ReplBufferStats `bson:"buffer"`
+ Executor *ReplExecutorStats `bson:"executor,omitempty"`
+ Network *ReplNetworkStats `bson:"network"`
+}
+
+// ReplApplyStats stores information related to oplog application process
+type ReplApplyStats struct {
+ Batches *BasicStats `bson:"batches"`
+ Ops int64 `bson:"ops"`
+}
+
+// ReplBufferStats stores information related to oplog buffer
+type ReplBufferStats struct {
+ Count int64 `bson:"count"`
+ SizeBytes int64 `bson:"sizeBytes"`
+}
+
+// ReplExecutorStats stores information related to replication executor
+type ReplExecutorStats struct {
+ Pool map[string]int64 `bson:"pool"`
+ Queues map[string]int64 `bson:"queues"`
+ UnsignaledEvents int64 `bson:"unsignaledEvents"`
+}
+
+// ReplNetworkStats stores information related to network usage by replication process
+type ReplNetworkStats struct {
+ Bytes int64 `bson:"bytes"`
+ GetMores *BasicStats `bson:"getmores"`
+ Ops int64 `bson:"ops"`
+}
+
+// BasicStats stores information about an operation
+type BasicStats struct {
+ Num int64 `bson:"num"`
+ TotalMillis int64 `bson:"totalMillis"`
+}
+
// ReadWriteLockTimes stores time spent holding read/write locks.
type ReadWriteLockTimes struct {
Read int64 `bson:"R"`
@@ -353,6 +477,46 @@ type ExtraInfo struct {
PageFaults *int64 `bson:"page_faults"`
}
+// TCMallocStats stores information related to TCMalloc memory allocator metrics
+type TCMallocStats struct {
+ Generic *GenericTCMAllocStats `bson:"generic"`
+ TCMalloc *DetailedTCMallocStats `bson:"tcmalloc"`
+}
+
+// GenericTCMAllocStats stores generic TCMalloc memory allocator metrics
+type GenericTCMAllocStats struct {
+ CurrentAllocatedBytes int64 `bson:"current_allocated_bytes"`
+ HeapSize int64 `bson:"heap_size"`
+}
+
+// DetailedTCMallocStats stores detailed TCMalloc memory allocator metrics
+type DetailedTCMallocStats struct {
+ PageheapFreeBytes int64 `bson:"pageheap_free_bytes"`
+ PageheapUnmappedBytes int64 `bson:"pageheap_unmapped_bytes"`
+ MaxTotalThreadCacheBytes int64 `bson:"max_total_thread_cache_bytes"`
+ CurrentTotalThreadCacheBytes int64 `bson:"current_total_thread_cache_bytes"`
+ TotalFreeBytes int64 `bson:"total_free_bytes"`
+ CentralCacheFreeBytes int64 `bson:"central_cache_free_bytes"`
+ TransferCacheFreeBytes int64 `bson:"transfer_cache_free_bytes"`
+ ThreadCacheFreeBytes int64 `bson:"thread_cache_free_bytes"`
+ PageheapComittedBytes int64 `bson:"pageheap_committed_bytes"`
+ PageheapScavengeCount int64 `bson:"pageheap_scavenge_count"`
+ PageheapCommitCount int64 `bson:"pageheap_commit_count"`
+ PageheapTotalCommitBytes int64 `bson:"pageheap_total_commit_bytes"`
+ PageheapDecommitCount int64 `bson:"pageheap_decommit_count"`
+ PageheapTotalDecommitBytes int64 `bson:"pageheap_total_decommit_bytes"`
+ PageheapReserveCount int64 `bson:"pageheap_reserve_count"`
+ PageheapTotalReserveBytes int64 `bson:"pageheap_total_reserve_bytes"`
+ SpinLockTotalDelayNanos int64 `bson:"spinlock_total_delay_ns"`
+}
+
+// StorageStats stores information related to record allocations
+type StorageStats struct {
+ FreelistSearchBucketExhausted int64 `bson:"freelist.search.bucketExhausted"`
+ FreelistSearchRequests int64 `bson:"freelist.search.requests"`
+ FreelistSearchScanned int64 `bson:"freelist.search.scanned"`
+}
+
// StatHeader describes a single column for mongostat's terminal output,
// its formatting, and in which modes it should be displayed.
type StatHeader struct {
@@ -455,6 +619,9 @@ type StatLine struct {
Error error
IsMongos bool
Host string
+ Version string
+
+ UptimeNanos int64
// The time at which this StatLine was generated.
Time time.Time
@@ -470,6 +637,21 @@ type StatLine struct {
GetMore, GetMoreCnt int64
Command, CommandCnt int64
+ // Asserts fields
+ Regular int64
+ Warning int64
+ Msg int64
+ User int64
+ Rollovers int64
+
+ // OpLatency fields
+ WriteOpsCnt int64
+ WriteLatency int64
+ ReadOpsCnt int64
+ ReadLatency int64
+ CommandOpsCnt int64
+ CommandLatency int64
+
// TTL fields
Passes, PassesCnt int64
DeletedDocuments, DeletedDocumentsCnt int64
@@ -483,6 +665,23 @@ type StatLine struct {
// Document fields
DeletedD, InsertedD, ReturnedD, UpdatedD int64
+ //Commands fields
+ AggregateCommandTotal, AggregateCommandFailed int64
+ CountCommandTotal, CountCommandFailed int64
+ DeleteCommandTotal, DeleteCommandFailed int64
+ DistinctCommandTotal, DistinctCommandFailed int64
+ FindCommandTotal, FindCommandFailed int64
+ FindAndModifyCommandTotal, FindAndModifyCommandFailed int64
+ GetMoreCommandTotal, GetMoreCommandFailed int64
+ InsertCommandTotal, InsertCommandFailed int64
+ UpdateCommandTotal, UpdateCommandFailed int64
+
+ // Operation fields
+ ScanAndOrderOp, WriteConflictsOp int64
+
+ // Query Executor fields
+ TotalKeysScanned, TotalObjectsScanned int64
+
// Connection fields
CurrentC, AvailableC, TotalCreatedC int64
@@ -493,7 +692,7 @@ type StatLine struct {
CacheDirtyPercent float64
CacheUsedPercent float64
- // Cache ultilization extended (wiredtiger only)
+ // Cache utilization extended (wiredtiger only)
TrackedDirtyBytes int64
CurrentCachedBytes int64
MaxBytesConfigured int64
@@ -505,6 +704,7 @@ type StatLine struct {
PagesEvictedByAppThread int64
PagesQueuedForEviction int64
PagesReadIntoCache int64
+ PagesWrittenFromCache int64
PagesRequestedFromCache int64
ServerEvictingPages int64
WorkerThreadEvictingPages int64
@@ -513,27 +713,45 @@ type StatLine struct {
UnmodifiedPagesEvicted int64
// Replicated Opcounter fields
- InsertR, InsertRCnt int64
- QueryR, QueryRCnt int64
- UpdateR, UpdateRCnt int64
- DeleteR, DeleteRCnt int64
- GetMoreR, GetMoreRCnt int64
- CommandR, CommandRCnt int64
- ReplLag int64
- OplogTimeDiff int64
- Flushes, FlushesCnt int64
- FlushesTotalTime int64
- Mapped, Virtual, Resident, NonMapped int64
- Faults, FaultsCnt int64
- HighestLocked *LockStatus
- QueuedReaders, QueuedWriters int64
- ActiveReaders, ActiveWriters int64
- NetIn, NetInCnt int64
- NetOut, NetOutCnt int64
- NumConnections int64
- ReplSetName string
- NodeType string
- NodeState string
+ InsertR, InsertRCnt int64
+ QueryR, QueryRCnt int64
+ UpdateR, UpdateRCnt int64
+ DeleteR, DeleteRCnt int64
+ GetMoreR, GetMoreRCnt int64
+ CommandR, CommandRCnt int64
+ ReplLag int64
+ OplogStats *OplogStats
+ Flushes, FlushesCnt int64
+ FlushesTotalTime int64
+ Mapped, Virtual, Resident, NonMapped int64
+ Faults, FaultsCnt int64
+ HighestLocked *LockStatus
+ QueuedReaders, QueuedWriters int64
+ ActiveReaders, ActiveWriters int64
+ AvailableReaders, AvailableWriters int64
+ TotalTicketsReaders, TotalTicketsWriters int64
+ NetIn, NetInCnt int64
+ NetOut, NetOutCnt int64
+ NumConnections int64
+ ReplSetName string
+ NodeType string
+ NodeState string
+ NodeStateInt int64
+
+ // Replicated Metrics fields
+ ReplNetworkBytes int64
+ ReplNetworkGetmoresNum int64
+ ReplNetworkGetmoresTotalMillis int64
+ ReplNetworkOps int64
+ ReplBufferCount int64
+ ReplBufferSizeBytes int64
+ ReplApplyBatchesNum int64
+ ReplApplyBatchesTotalMillis int64
+ ReplApplyOps int64
+ ReplExecutorPoolInProgressCount int64
+ ReplExecutorQueuesNetworkInProgress int64
+ ReplExecutorQueuesSleepers int64
+ ReplExecutorUnsignaledEvents int64
// Cluster fields
JumboChunksCount int64
@@ -541,11 +759,40 @@ type StatLine struct {
// DB stats field
DbStatsLines []DbStatLine
+ // Col Stats field
+ ColStatsLines []ColStatLine
+
// Shard stats
TotalInUse, TotalAvailable, TotalCreated, TotalRefreshing int64
// Shard Hosts stats field
ShardHostStatsLines map[string]ShardHostStatLine
+
+ // TCMalloc stats field
+ TCMallocCurrentAllocatedBytes int64
+ TCMallocHeapSize int64
+ TCMallocCentralCacheFreeBytes int64
+ TCMallocCurrentTotalThreadCacheBytes int64
+ TCMallocMaxTotalThreadCacheBytes int64
+ TCMallocTotalFreeBytes int64
+ TCMallocTransferCacheFreeBytes int64
+ TCMallocThreadCacheFreeBytes int64
+ TCMallocSpinLockTotalDelayNanos int64
+ TCMallocPageheapFreeBytes int64
+ TCMallocPageheapUnmappedBytes int64
+ TCMallocPageheapComittedBytes int64
+ TCMallocPageheapScavengeCount int64
+ TCMallocPageheapCommitCount int64
+ TCMallocPageheapTotalCommitBytes int64
+ TCMallocPageheapDecommitCount int64
+ TCMallocPageheapTotalDecommitBytes int64
+ TCMallocPageheapReserveCount int64
+ TCMallocPageheapTotalReserveBytes int64
+
+ // Storage stats field
+ StorageFreelistSearchBucketExhausted int64
+ StorageFreelistSearchRequests int64
+ StorageFreelistSearchScanned int64
}
type DbStatLine struct {
@@ -560,6 +807,16 @@ type DbStatLine struct {
IndexSize int64
Ok int64
}
+type ColStatLine struct {
+ Name string
+ DbName string
+ Count int64
+ Size int64
+ AvgObjSize float64
+ StorageSize int64
+ TotalIndexSize int64
+ Ok int64
+}
type ShardHostStatLine struct {
InUse int64
@@ -618,6 +875,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
returnVal := &StatLine{
Key: key,
Host: newStat.Host,
+ Version: newStat.Version,
Mapped: -1,
Virtual: -1,
Resident: -1,
@@ -625,6 +883,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
Faults: -1,
}
+ returnVal.UptimeNanos = 1000 * 1000 * newStat.UptimeMillis
+
// set connection info
returnVal.CurrentC = newStat.Connections.Current
returnVal.AvailableC = newStat.Connections.Available
@@ -646,6 +906,56 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
returnVal.Command, returnVal.CommandCnt = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
}
+ if newStat.OpLatencies != nil {
+ if newStat.OpLatencies.Reads != nil {
+ returnVal.ReadOpsCnt = newStat.OpLatencies.Reads.Ops
+ returnVal.ReadLatency = newStat.OpLatencies.Reads.Latency
+ }
+ if newStat.OpLatencies.Writes != nil {
+ returnVal.WriteOpsCnt = newStat.OpLatencies.Writes.Ops
+ returnVal.WriteLatency = newStat.OpLatencies.Writes.Latency
+ }
+ if newStat.OpLatencies.Commands != nil {
+ returnVal.CommandOpsCnt = newStat.OpLatencies.Commands.Ops
+ returnVal.CommandLatency = newStat.OpLatencies.Commands.Latency
+ }
+ }
+
+ if newStat.Asserts != nil {
+ returnVal.Regular = newStat.Asserts.Regular
+ returnVal.Warning = newStat.Asserts.Warning
+ returnVal.Msg = newStat.Asserts.Msg
+ returnVal.User = newStat.Asserts.User
+ returnVal.Rollovers = newStat.Asserts.Rollovers
+ }
+
+ if newStat.TCMallocStats != nil {
+ if newStat.TCMallocStats.Generic != nil {
+ returnVal.TCMallocCurrentAllocatedBytes = newStat.TCMallocStats.Generic.CurrentAllocatedBytes
+ returnVal.TCMallocHeapSize = newStat.TCMallocStats.Generic.HeapSize
+ }
+ if newStat.TCMallocStats.TCMalloc != nil {
+ returnVal.TCMallocCentralCacheFreeBytes = newStat.TCMallocStats.TCMalloc.CentralCacheFreeBytes
+ returnVal.TCMallocCurrentTotalThreadCacheBytes = newStat.TCMallocStats.TCMalloc.CurrentTotalThreadCacheBytes
+ returnVal.TCMallocMaxTotalThreadCacheBytes = newStat.TCMallocStats.TCMalloc.MaxTotalThreadCacheBytes
+ returnVal.TCMallocTransferCacheFreeBytes = newStat.TCMallocStats.TCMalloc.TransferCacheFreeBytes
+ returnVal.TCMallocThreadCacheFreeBytes = newStat.TCMallocStats.TCMalloc.ThreadCacheFreeBytes
+ returnVal.TCMallocTotalFreeBytes = newStat.TCMallocStats.TCMalloc.TotalFreeBytes
+ returnVal.TCMallocSpinLockTotalDelayNanos = newStat.TCMallocStats.TCMalloc.SpinLockTotalDelayNanos
+
+ returnVal.TCMallocPageheapFreeBytes = newStat.TCMallocStats.TCMalloc.PageheapFreeBytes
+ returnVal.TCMallocPageheapUnmappedBytes = newStat.TCMallocStats.TCMalloc.PageheapUnmappedBytes
+ returnVal.TCMallocPageheapComittedBytes = newStat.TCMallocStats.TCMalloc.PageheapComittedBytes
+ returnVal.TCMallocPageheapScavengeCount = newStat.TCMallocStats.TCMalloc.PageheapScavengeCount
+ returnVal.TCMallocPageheapCommitCount = newStat.TCMallocStats.TCMalloc.PageheapCommitCount
+ returnVal.TCMallocPageheapTotalCommitBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalCommitBytes
+ returnVal.TCMallocPageheapDecommitCount = newStat.TCMallocStats.TCMalloc.PageheapDecommitCount
+ returnVal.TCMallocPageheapTotalDecommitBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalDecommitBytes
+ returnVal.TCMallocPageheapReserveCount = newStat.TCMallocStats.TCMalloc.PageheapReserveCount
+ returnVal.TCMallocPageheapTotalReserveBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalReserveBytes
+ }
+ }
+
if newStat.Metrics != nil && oldStat.Metrics != nil {
if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
returnVal.Passes, returnVal.PassesCnt = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
@@ -665,6 +975,85 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
returnVal.ReturnedD = newStat.Metrics.Document.Returned
returnVal.UpdatedD = newStat.Metrics.Document.Updated
}
+
+ if newStat.Metrics.Commands != nil {
+ if newStat.Metrics.Commands.Aggregate != nil {
+ returnVal.AggregateCommandTotal = newStat.Metrics.Commands.Aggregate.Total
+ returnVal.AggregateCommandFailed = newStat.Metrics.Commands.Aggregate.Failed
+ }
+ if newStat.Metrics.Commands.Count != nil {
+ returnVal.CountCommandTotal = newStat.Metrics.Commands.Count.Total
+ returnVal.CountCommandFailed = newStat.Metrics.Commands.Count.Failed
+ }
+ if newStat.Metrics.Commands.Delete != nil {
+ returnVal.DeleteCommandTotal = newStat.Metrics.Commands.Delete.Total
+ returnVal.DeleteCommandFailed = newStat.Metrics.Commands.Delete.Failed
+ }
+ if newStat.Metrics.Commands.Distinct != nil {
+ returnVal.DistinctCommandTotal = newStat.Metrics.Commands.Distinct.Total
+ returnVal.DistinctCommandFailed = newStat.Metrics.Commands.Distinct.Failed
+ }
+ if newStat.Metrics.Commands.Find != nil {
+ returnVal.FindCommandTotal = newStat.Metrics.Commands.Find.Total
+ returnVal.FindCommandFailed = newStat.Metrics.Commands.Find.Failed
+ }
+ if newStat.Metrics.Commands.FindAndModify != nil {
+ returnVal.FindAndModifyCommandTotal = newStat.Metrics.Commands.FindAndModify.Total
+ returnVal.FindAndModifyCommandFailed = newStat.Metrics.Commands.FindAndModify.Failed
+ }
+ if newStat.Metrics.Commands.GetMore != nil {
+ returnVal.GetMoreCommandTotal = newStat.Metrics.Commands.GetMore.Total
+ returnVal.GetMoreCommandFailed = newStat.Metrics.Commands.GetMore.Failed
+ }
+ if newStat.Metrics.Commands.Insert != nil {
+ returnVal.InsertCommandTotal = newStat.Metrics.Commands.Insert.Total
+ returnVal.InsertCommandFailed = newStat.Metrics.Commands.Insert.Failed
+ }
+ if newStat.Metrics.Commands.Update != nil {
+ returnVal.UpdateCommandTotal = newStat.Metrics.Commands.Update.Total
+ returnVal.UpdateCommandFailed = newStat.Metrics.Commands.Update.Failed
+ }
+ }
+
+ if newStat.Metrics.Operation != nil {
+ returnVal.ScanAndOrderOp = newStat.Metrics.Operation.ScanAndOrder
+ returnVal.WriteConflictsOp = newStat.Metrics.Operation.WriteConflicts
+ }
+
+ if newStat.Metrics.QueryExecutor != nil {
+ returnVal.TotalKeysScanned = newStat.Metrics.QueryExecutor.Scanned
+ returnVal.TotalObjectsScanned = newStat.Metrics.QueryExecutor.ScannedObjects
+ }
+
+ if newStat.Metrics.Repl != nil {
+ if newStat.Metrics.Repl.Apply != nil {
+ returnVal.ReplApplyBatchesNum = newStat.Metrics.Repl.Apply.Batches.Num
+ returnVal.ReplApplyBatchesTotalMillis = newStat.Metrics.Repl.Apply.Batches.TotalMillis
+ returnVal.ReplApplyOps = newStat.Metrics.Repl.Apply.Ops
+ }
+ if newStat.Metrics.Repl.Buffer != nil {
+ returnVal.ReplBufferCount = newStat.Metrics.Repl.Buffer.Count
+ returnVal.ReplBufferSizeBytes = newStat.Metrics.Repl.Buffer.SizeBytes
+ }
+ if newStat.Metrics.Repl.Executor != nil {
+ returnVal.ReplExecutorPoolInProgressCount = newStat.Metrics.Repl.Executor.Pool["inProgressCount"]
+ returnVal.ReplExecutorQueuesNetworkInProgress = newStat.Metrics.Repl.Executor.Queues["networkInProgress"]
+ returnVal.ReplExecutorQueuesSleepers = newStat.Metrics.Repl.Executor.Queues["sleepers"]
+ returnVal.ReplExecutorUnsignaledEvents = newStat.Metrics.Repl.Executor.UnsignaledEvents
+ }
+ if newStat.Metrics.Repl.Network != nil {
+ returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes
+ returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num
+ returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis
+ returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops
+ }
+ }
+
+ if newStat.Metrics.Storage != nil {
+ returnVal.StorageFreelistSearchBucketExhausted = newStat.Metrics.Storage.FreelistSearchBucketExhausted
+ returnVal.StorageFreelistSearchRequests = newStat.Metrics.Storage.FreelistSearchRequests
+ returnVal.StorageFreelistSearchScanned = newStat.Metrics.Storage.FreelistSearchScanned
+ }
}
if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil {
@@ -693,6 +1082,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
returnVal.PagesEvictedByAppThread = newStat.WiredTiger.Cache.PagesEvictedByAppThread
returnVal.PagesQueuedForEviction = newStat.WiredTiger.Cache.PagesQueuedForEviction
returnVal.PagesReadIntoCache = newStat.WiredTiger.Cache.PagesReadIntoCache
+ returnVal.PagesWrittenFromCache = newStat.WiredTiger.Cache.PagesWrittenFromCache
returnVal.PagesRequestedFromCache = newStat.WiredTiger.Cache.PagesRequestedFromCache
returnVal.ServerEvictingPages = newStat.WiredTiger.Cache.ServerEvictingPages
returnVal.WorkerThreadEvictingPages = newStat.WiredTiger.Cache.WorkerThreadEvictingPages
@@ -841,6 +1231,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
if hasWT {
returnVal.ActiveReaders = newStat.WiredTiger.Concurrent.Read.Out
returnVal.ActiveWriters = newStat.WiredTiger.Concurrent.Write.Out
+ returnVal.AvailableReaders = newStat.WiredTiger.Concurrent.Read.Available
+ returnVal.AvailableWriters = newStat.WiredTiger.Concurrent.Write.Available
+ returnVal.TotalTicketsReaders = newStat.WiredTiger.Concurrent.Read.TotalTickets
+ returnVal.TotalTicketsWriters = newStat.WiredTiger.Concurrent.Write.TotalTickets
} else if newStat.GlobalLock.ActiveClients != nil {
returnVal.ActiveReaders = newStat.GlobalLock.ActiveClients.Readers
returnVal.ActiveWriters = newStat.GlobalLock.ActiveClients.Writers
@@ -856,84 +1250,119 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
returnVal.NumConnections = newStat.Connections.Current
}
- newReplStat := *newMongo.ReplSetStatus
-
- if newReplStat.Members != nil {
- myName := newStat.Repl.Me
- // Find the master and myself
- master := ReplSetMember{}
- me := ReplSetMember{}
- for _, member := range newReplStat.Members {
- if member.Name == myName {
- // Store my state string
- returnVal.NodeState = member.StateStr
- if member.State == 1 {
- // I'm the master
+ if newMongo.ReplSetStatus != nil {
+ newReplStat := *newMongo.ReplSetStatus
+
+ if newReplStat.Members != nil {
+ myName := newStat.Repl.Me
+ // Find the master and myself
+ master := ReplSetMember{}
+ me := ReplSetMember{}
+ for _, member := range newReplStat.Members {
+ if member.Name == myName {
+ // Store my state string
+ returnVal.NodeState = member.StateStr
+ // Store my state integer
+ returnVal.NodeStateInt = member.State
+
+ if member.State == 1 {
+ // I'm the master
+ returnVal.ReplLag = 0
+ break
+ } else {
+ // I'm secondary
+ me = member
+ }
+ } else if member.State == 1 {
+ // Master found
+ master = member
+ }
+ }
+
+ if me.State == 2 {
+ // OptimeDate.Unix() type is int64
+ lag := master.OptimeDate.Unix() - me.OptimeDate.Unix()
+ if lag < 0 {
returnVal.ReplLag = 0
- break
} else {
- // I'm secondary
- me = member
+ returnVal.ReplLag = lag
}
- } else if member.State == 1 {
- // Master found
- master = member
}
}
+ }
- if me.State == 2 {
- // OptimeDate.Unix() type is int64
- lag := master.OptimeDate.Unix() - me.OptimeDate.Unix()
- if lag < 0 {
- returnVal.ReplLag = 0
- } else {
- returnVal.ReplLag = lag
- }
- }
+ if newMongo.ClusterStatus != nil {
+ newClusterStat := *newMongo.ClusterStatus
+ returnVal.JumboChunksCount = newClusterStat.JumboChunksCount
}
- newClusterStat := *newMongo.ClusterStatus
- returnVal.JumboChunksCount = newClusterStat.JumboChunksCount
- returnVal.OplogTimeDiff = newMongo.OplogStats.TimeDiff
+ if newMongo.OplogStats != nil {
+ returnVal.OplogStats = newMongo.OplogStats
+ }
- newDbStats := *newMongo.DbStats
- for _, db := range newDbStats.Dbs {
- dbStatsData := db.DbStatsData
- // mongos doesn't have the db key, so setting the db name
- if dbStatsData.Db == "" {
- dbStatsData.Db = db.Name
+ if newMongo.DbStats != nil {
+ newDbStats := *newMongo.DbStats
+ for _, db := range newDbStats.Dbs {
+ dbStatsData := db.DbStatsData
+ // mongos doesn't have the db key, so setting the db name
+ if dbStatsData.Db == "" {
+ dbStatsData.Db = db.Name
+ }
+ dbStatLine := &DbStatLine{
+ Name: dbStatsData.Db,
+ Collections: dbStatsData.Collections,
+ Objects: dbStatsData.Objects,
+ AvgObjSize: dbStatsData.AvgObjSize,
+ DataSize: dbStatsData.DataSize,
+ StorageSize: dbStatsData.StorageSize,
+ NumExtents: dbStatsData.NumExtents,
+ Indexes: dbStatsData.Indexes,
+ IndexSize: dbStatsData.IndexSize,
+ Ok: dbStatsData.Ok,
+ }
+ returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine)
}
- dbStatLine := &DbStatLine{
- Name: dbStatsData.Db,
- Collections: dbStatsData.Collections,
- Objects: dbStatsData.Objects,
- AvgObjSize: dbStatsData.AvgObjSize,
- DataSize: dbStatsData.DataSize,
- StorageSize: dbStatsData.StorageSize,
- NumExtents: dbStatsData.NumExtents,
- Indexes: dbStatsData.Indexes,
- IndexSize: dbStatsData.IndexSize,
- Ok: dbStatsData.Ok,
+ }
+
+ if newMongo.ColStats != nil {
+ for _, col := range newMongo.ColStats.Collections {
+ colStatsData := col.ColStatsData
+ // mongos doesn't have the db key, so setting the db name
+ if colStatsData.Collection == "" {
+ colStatsData.Collection = col.Name
+ }
+ colStatLine := &ColStatLine{
+ Name: colStatsData.Collection,
+ DbName: col.DbName,
+ Count: colStatsData.Count,
+ Size: colStatsData.Size,
+ AvgObjSize: colStatsData.AvgObjSize,
+ StorageSize: colStatsData.StorageSize,
+ TotalIndexSize: colStatsData.TotalIndexSize,
+ Ok: colStatsData.Ok,
+ }
+ returnVal.ColStatsLines = append(returnVal.ColStatsLines, *colStatLine)
}
- returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine)
}
// Set shard stats
- newShardStats := *newMongo.ShardStats
- returnVal.TotalInUse = newShardStats.TotalInUse
- returnVal.TotalAvailable = newShardStats.TotalAvailable
- returnVal.TotalCreated = newShardStats.TotalCreated
- returnVal.TotalRefreshing = newShardStats.TotalRefreshing
- returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{}
- for host, stats := range newShardStats.Hosts {
- shardStatLine := &ShardHostStatLine{
- InUse: stats.InUse,
- Available: stats.Available,
- Created: stats.Created,
- Refreshing: stats.Refreshing,
- }
+ if newMongo.ShardStats != nil {
+ newShardStats := *newMongo.ShardStats
+ returnVal.TotalInUse = newShardStats.TotalInUse
+ returnVal.TotalAvailable = newShardStats.TotalAvailable
+ returnVal.TotalCreated = newShardStats.TotalCreated
+ returnVal.TotalRefreshing = newShardStats.TotalRefreshing
+ returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{}
+ for host, stats := range newShardStats.Hosts {
+ shardStatLine := &ShardHostStatLine{
+ InUse: stats.InUse,
+ Available: stats.Available,
+ Created: stats.Created,
+ Refreshing: stats.Refreshing,
+ }
- returnVal.ShardHostStatsLines[host] = *shardStatLine
+ returnVal.ShardHostStatsLines[host] = *shardStatLine
+ }
}
return returnVal
diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go
new file mode 100644
index 0000000000000..5506602a9e692
--- /dev/null
+++ b/plugins/inputs/mongodb/mongostat_test.go
@@ -0,0 +1,205 @@
+package mongodb
+
+import (
+ "testing"
+ //"time"
+
+ //"github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestLatencyStats(t *testing.T) {
+
+ sl := NewStatLine(
+ MongoStatus{
+ ServerStatus: &ServerStatus{
+ Connections: &ConnectionStats{},
+ Mem: &MemStats{
+ Bits: 0,
+ Resident: 0,
+ Virtual: 0,
+ Supported: false,
+ Mapped: 0,
+ MappedWithJournal: 0,
+ },
+ },
+ },
+ MongoStatus{
+ ServerStatus: &ServerStatus{
+ Connections: &ConnectionStats{},
+ Mem: &MemStats{
+ Bits: 0,
+ Resident: 0,
+ Virtual: 0,
+ Supported: false,
+ Mapped: 0,
+ MappedWithJournal: 0,
+ },
+ OpLatencies: &OpLatenciesStats{
+ Reads: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ Writes: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ Commands: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ },
+ },
+ },
+ "foo",
+ true,
+ 60,
+ )
+
+ assert.Equal(t, sl.CommandLatency, int64(0))
+ assert.Equal(t, sl.ReadLatency, int64(0))
+ assert.Equal(t, sl.WriteLatency, int64(0))
+ assert.Equal(t, sl.CommandOpsCnt, int64(0))
+ assert.Equal(t, sl.ReadOpsCnt, int64(0))
+ assert.Equal(t, sl.WriteOpsCnt, int64(0))
+}
+
+func TestLatencyStatsDiffZero(t *testing.T) {
+
+ sl := NewStatLine(
+ MongoStatus{
+ ServerStatus: &ServerStatus{
+ Connections: &ConnectionStats{},
+ Mem: &MemStats{
+ Bits: 0,
+ Resident: 0,
+ Virtual: 0,
+ Supported: false,
+ Mapped: 0,
+ MappedWithJournal: 0,
+ },
+ OpLatencies: &OpLatenciesStats{
+ Reads: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ Writes: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ Commands: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ },
+ },
+ },
+ MongoStatus{
+ ServerStatus: &ServerStatus{
+ Connections: &ConnectionStats{},
+ Mem: &MemStats{
+ Bits: 0,
+ Resident: 0,
+ Virtual: 0,
+ Supported: false,
+ Mapped: 0,
+ MappedWithJournal: 0,
+ },
+ OpLatencies: &OpLatenciesStats{
+ Reads: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ Writes: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ Commands: &LatencyStats{
+ Ops: 0,
+ Latency: 0,
+ },
+ },
+ },
+ },
+ "foo",
+ true,
+ 60,
+ )
+
+ assert.Equal(t, sl.CommandLatency, int64(0))
+ assert.Equal(t, sl.ReadLatency, int64(0))
+ assert.Equal(t, sl.WriteLatency, int64(0))
+ assert.Equal(t, sl.CommandOpsCnt, int64(0))
+ assert.Equal(t, sl.ReadOpsCnt, int64(0))
+ assert.Equal(t, sl.WriteOpsCnt, int64(0))
+}
+
+func TestLatencyStatsDiff(t *testing.T) {
+
+ sl := NewStatLine(
+ MongoStatus{
+ ServerStatus: &ServerStatus{
+ Connections: &ConnectionStats{},
+ Mem: &MemStats{
+ Bits: 0,
+ Resident: 0,
+ Virtual: 0,
+ Supported: false,
+ Mapped: 0,
+ MappedWithJournal: 0,
+ },
+ OpLatencies: &OpLatenciesStats{
+ Reads: &LatencyStats{
+ Ops: 4189041956,
+ Latency: 2255922322753,
+ },
+ Writes: &LatencyStats{
+ Ops: 1691019457,
+ Latency: 494478256915,
+ },
+ Commands: &LatencyStats{
+ Ops: 1019150402,
+ Latency: 59177710371,
+ },
+ },
+ },
+ },
+ MongoStatus{
+ ServerStatus: &ServerStatus{
+ Connections: &ConnectionStats{},
+ Mem: &MemStats{
+ Bits: 0,
+ Resident: 0,
+ Virtual: 0,
+ Supported: false,
+ Mapped: 0,
+ MappedWithJournal: 0,
+ },
+ OpLatencies: &OpLatenciesStats{
+ Reads: &LatencyStats{
+ Ops: 4189049884,
+ Latency: 2255946760057,
+ },
+ Writes: &LatencyStats{
+ Ops: 1691021287,
+ Latency: 494479456987,
+ },
+ Commands: &LatencyStats{
+ Ops: 1019152861,
+ Latency: 59177981552,
+ },
+ },
+ },
+ },
+ "foo",
+ true,
+ 60,
+ )
+
+ assert.Equal(t, sl.CommandLatency, int64(59177981552))
+ assert.Equal(t, sl.ReadLatency, int64(2255946760057))
+ assert.Equal(t, sl.WriteLatency, int64(494479456987))
+ assert.Equal(t, sl.CommandOpsCnt, int64(1019152861))
+ assert.Equal(t, sl.ReadOpsCnt, int64(4189049884))
+ assert.Equal(t, sl.WriteOpsCnt, int64(1691021287))
+}
diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md
new file mode 100644
index 0000000000000..be116394d6609
--- /dev/null
+++ b/plugins/inputs/monit/README.md
@@ -0,0 +1,235 @@
+# Monit Input Plugin
+
+The `monit` plugin gathers metrics and status information about local processes,
+remote hosts, file, file systems, directories and network interfaces managed
+and watched over by [Monit][monit].
+
+The use this plugin you should first enable the [HTTPD TCP port][httpd] in
+Monit.
+
+Minimum Version of Monit tested with is 5.16.
+
+[monit]: https://mmonit.com/
+[httpd]: https://mmonit.com/monit/documentation/monit.html#TCP-PORT
+
+### Configuration
+
+```toml
+[[inputs.monit]]
+ ## Monit HTTPD address
+ address = "http://127.0.0.1:2812"
+
+ ## Username and Password for Monit
+ # username = ""
+ # password = ""
+
+ ## Amount of time allowed to complete the HTTP request
+ # timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+```
+
+### Metrics
+
+- monit_filesystem
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+ - mode
+ - block_percent
+ - block_usage
+ - block_total
+ - inode_percent
+ - inode_usage
+ - inode_total
+
++ monit_directory
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+ - permissions
+
+- monit_file
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+ - size
+ - permissions
+
++ monit_process
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+ - cpu_percent
+ - cpu_percent_total
+ - mem_kb
+ - mem_kb_total
+ - mem_percent
+ - mem_percent_total
+ - pid
+ - parent_pid
+ - threads
+ - children
+
+- monit_remote_host
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+ - hostname
+ - port_number
+ - request
+ - protocol
+ - type
+
++ monit_system
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+ - cpu_system
+ - cpu_user
+ - cpu_wait
+ - cpu_load_avg_1m
+ - cpu_load_avg_5m
+ - cpu_load_avg_15m
+ - mem_kb
+ - mem_percent
+ - swap_kb
+ - swap_percent
+
+- monit_fifo
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+ - permissions
+
++ monit_program
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+
+- monit_network
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+
++ monit_program
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+
+- monit_network
+ - tags:
+ - address
+ - version
+ - service
+ - platform_name
+ - status
+ - monitoring_status
+ - monitoring_mode
+ - fields:
+ - status_code
+ - monitoring_status_code
+ - monitoring_mode_code
+
+### Example Output
+```
+monit_file,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog_pid,source=xyzzy.local,status=running,version=5.20.0 mode=644i,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,size=3i,status_code=0i 1579735047000000000
+monit_process,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog,source=xyzzy.local,status=running,version=5.20.0 children=0i,cpu_percent=0,cpu_percent_total=0,mem_kb=3148i,mem_kb_total=3148i,mem_percent=0.2,mem_percent_total=0.2,monitoring_mode_code=0i,monitoring_status_code=1i,parent_pid=1i,pending_action_code=0i,pid=318i,status_code=0i,threads=4i 1579735047000000000
+monit_program,monitoring_mode=active,monitoring_status=initializing,pending_action=none,platform_name=Linux,service=echo,source=xyzzy.local,status=running,version=5.20.0 monitoring_mode_code=0i,monitoring_status_code=2i,pending_action_code=0i,program_started=0i,program_status=0i,status_code=0i 1579735047000000000
+monit_system,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=debian-stretch-monit.virt,source=xyzzy.local,status=running,version=5.20.0 cpu_load_avg_15m=0,cpu_load_avg_1m=0,cpu_load_avg_5m=0,cpu_system=0,cpu_user=0,cpu_wait=0,mem_kb=42852i,mem_percent=2.1,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,status_code=0i,swap_kb=0,swap_percent=0 1579735047000000000
+```
diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go
new file mode 100644
index 0000000000000..a17042bf5e3a9
--- /dev/null
+++ b/plugins/inputs/monit/monit.go
@@ -0,0 +1,409 @@
+package monit
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "golang.org/x/net/html/charset"
+)
+
+const (
+ fileSystem string = "0"
+ directory = "1"
+ file = "2"
+ process = "3"
+ remoteHost = "4"
+ system = "5"
+ fifo = "6"
+ program = "7"
+ network = "8"
+)
+
+var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"}
+
+type Status struct {
+ Server Server `xml:"server"`
+ Platform Platform `xml:"platform"`
+ Services []Service `xml:"service"`
+}
+
+type Server struct {
+ ID string `xml:"id"`
+ Version string `xml:"version"`
+ Uptime int64 `xml:"uptime"`
+ Poll int `xml:"poll"`
+ LocalHostname string `xml:"localhostname"`
+ StartDelay int `xml:"startdelay"`
+ ControlFile string `xml:"controlfile"`
+}
+
+type Platform struct {
+ Name string `xml:"name"`
+ Release string `xml:"release"`
+ Version string `xml:"version"`
+ Machine string `xml:"machine"`
+ CPU int `xml:"cpu"`
+ Memory int `xml:"memory"`
+ Swap int `xml:"swap"`
+}
+
+type Service struct {
+ Type string `xml:"type,attr"`
+ Name string `xml:"name"`
+ Status int `xml:"status"`
+ MonitoringStatus int `xml:"monitor"`
+ MonitorMode int `xml:"monitormode"`
+ PendingAction int `xml:"pendingaction"`
+ Memory Memory `xml:"memory"`
+ CPU CPU `xml:"cpu"`
+ System System `xml:"system"`
+ Size int64 `xml:"size"`
+ Mode int `xml:"mode"`
+ Program Program `xml:"program"`
+ Block Block `xml:"block"`
+ Inode Inode `xml:"inode"`
+ Pid int64 `xml:"pid"`
+ ParentPid int64 `xml:"ppid"`
+ Threads int `xml:"threads"`
+ Children int `xml:"children"`
+ Port Port `xml:"port"`
+ Link Link `xml:"link"`
+}
+
+type Link struct {
+ State int `xml:"state"`
+ Speed int64 `xml:"speed"`
+ Duplex int `xml:"duplex"`
+ Download Download `xml:"download"`
+ Upload Upload `xml:"upload"`
+}
+
+type Download struct {
+ Packets struct {
+ Now int64 `xml:"now"`
+ Total int64 `xml:"total"`
+ } `xml:"packets"`
+ Bytes struct {
+ Now int64 `xml:"now"`
+ Total int64 `xml:"total"`
+ } `xml:"bytes"`
+ Errors struct {
+ Now int64 `xml:"now"`
+ Total int64 `xml:"total"`
+ } `xml:"errors"`
+}
+
+type Upload struct {
+ Packets struct {
+ Now int64 `xml:"now"`
+ Total int64 `xml:"total"`
+ } `xml:"packets"`
+ Bytes struct {
+ Now int64 `xml:"now"`
+ Total int64 `xml:"total"`
+ } `xml:"bytes"`
+ Errors struct {
+ Now int64 `xml:"now"`
+ Total int64 `xml:"total"`
+ } `xml:"errors"`
+}
+
+type Port struct {
+ Hostname string `xml:"hostname"`
+ PortNumber int64 `xml:"portnumber"`
+ Request string `xml:"request"`
+ Protocol string `xml:"protocol"`
+ Type string `xml:"type"`
+}
+
+type Block struct {
+ Percent float64 `xml:"percent"`
+ Usage float64 `xml:"usage"`
+ Total float64 `xml:"total"`
+}
+
+type Inode struct {
+ Percent float64 `xml:"percent"`
+ Usage float64 `xml:"usage"`
+ Total float64 `xml:"total"`
+}
+
+type Program struct {
+ Started int64 `xml:"started"`
+ Status int `xml:"status"`
+}
+
+type Memory struct {
+ Percent float64 `xml:"percent"`
+ PercentTotal float64 `xml:"percenttotal"`
+ Kilobyte int64 `xml:"kilobyte"`
+ KilobyteTotal int64 `xml:"kilobytetotal"`
+}
+
+type CPU struct {
+ Percent float64 `xml:"percent"`
+ PercentTotal float64 `xml:"percenttotal"`
+}
+
+type System struct {
+ Load struct {
+ Avg01 float64 `xml:"avg01"`
+ Avg05 float64 `xml:"avg05"`
+ Avg15 float64 `xml:"avg15"`
+ } `xml:"load"`
+ CPU struct {
+ User float64 `xml:"user"`
+ System float64 `xml:"system"`
+ Wait float64 `xml:"wait"`
+ } `xml:"cpu"`
+ Memory struct {
+ Percent float64 `xml:"percent"`
+ Kilobyte int64 `xml:"kilobyte"`
+ } `xml:"memory"`
+ Swap struct {
+ Percent float64 `xml:"percent"`
+ Kilobyte float64 `xml:"kilobyte"`
+ } `xml:"swap"`
+}
+
+type Monit struct {
+ Address string `toml:"address"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ client http.Client
+ tls.ClientConfig
+ Timeout internal.Duration `toml:"timeout"`
+}
+
+type Messagebody struct {
+ Metrics []string `json:"metrics"`
+}
+
+func (m *Monit) Description() string {
+ return "Read metrics and status information about processes managed by Monit"
+}
+
+var sampleConfig = `
+ ## Monit HTTPD address
+ address = "http://127.0.0.1:2812"
+
+ ## Username and Password for Monit
+ # username = ""
+ # password = ""
+
+ ## Amount of time allowed to complete the HTTP request
+ # timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+`
+
+func (m *Monit) SampleConfig() string {
+ return sampleConfig
+}
+
+func (m *Monit) Init() error {
+ tlsCfg, err := m.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ m.client = http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ Proxy: http.ProxyFromEnvironment,
+ },
+ Timeout: m.Timeout.Duration,
+ }
+ return nil
+}
+
+func (m *Monit) Gather(acc telegraf.Accumulator) error {
+
+ req, err := http.NewRequest("GET", fmt.Sprintf("%s/_status?format=xml", m.Address), nil)
+ if err != nil {
+ return err
+ }
+ if len(m.Username) > 0 || len(m.Password) > 0 {
+ req.SetBasicAuth(m.Username, m.Password)
+ }
+
+ resp, err := m.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == 200 {
+
+ var status Status
+ decoder := xml.NewDecoder(resp.Body)
+ decoder.CharsetReader = charset.NewReaderLabel
+ if err := decoder.Decode(&status); err != nil {
+ return fmt.Errorf("error parsing input: %v", err)
+ }
+
+ tags := map[string]string{
+ "version": status.Server.Version,
+ "source": status.Server.LocalHostname,
+ "platform_name": status.Platform.Name,
+ }
+
+ for _, service := range status.Services {
+ fields := make(map[string]interface{})
+ tags["status"] = serviceStatus(service)
+ fields["status_code"] = service.Status
+ tags["pending_action"] = pendingAction(service)
+ fields["pending_action_code"] = service.PendingAction
+ tags["monitoring_status"] = monitoringStatus(service)
+ fields["monitoring_status_code"] = service.MonitoringStatus
+ tags["monitoring_mode"] = monitoringMode(service)
+ fields["monitoring_mode_code"] = service.MonitorMode
+ tags["service"] = service.Name
+ if service.Type == fileSystem {
+ fields["mode"] = service.Mode
+ fields["block_percent"] = service.Block.Percent
+ fields["block_usage"] = service.Block.Usage
+ fields["block_total"] = service.Block.Total
+ fields["inode_percent"] = service.Inode.Percent
+ fields["inode_usage"] = service.Inode.Usage
+ fields["inode_total"] = service.Inode.Total
+ acc.AddFields("monit_filesystem", fields, tags)
+ } else if service.Type == directory {
+ fields["mode"] = service.Mode
+ acc.AddFields("monit_directory", fields, tags)
+ } else if service.Type == file {
+ fields["size"] = service.Size
+ fields["mode"] = service.Mode
+ acc.AddFields("monit_file", fields, tags)
+ } else if service.Type == process {
+ fields["cpu_percent"] = service.CPU.Percent
+ fields["cpu_percent_total"] = service.CPU.PercentTotal
+ fields["mem_kb"] = service.Memory.Kilobyte
+ fields["mem_kb_total"] = service.Memory.KilobyteTotal
+ fields["mem_percent"] = service.Memory.Percent
+ fields["mem_percent_total"] = service.Memory.PercentTotal
+ fields["pid"] = service.Pid
+ fields["parent_pid"] = service.ParentPid
+ fields["threads"] = service.Threads
+ fields["children"] = service.Children
+ acc.AddFields("monit_process", fields, tags)
+ } else if service.Type == remoteHost {
+ fields["remote_hostname"] = service.Port.Hostname
+ fields["port_number"] = service.Port.PortNumber
+ fields["request"] = service.Port.Request
+ fields["protocol"] = service.Port.Protocol
+ fields["type"] = service.Port.Type
+ acc.AddFields("monit_remote_host", fields, tags)
+ } else if service.Type == system {
+ fields["cpu_system"] = service.System.CPU.System
+ fields["cpu_user"] = service.System.CPU.User
+ fields["cpu_wait"] = service.System.CPU.Wait
+ fields["cpu_load_avg_1m"] = service.System.Load.Avg01
+ fields["cpu_load_avg_5m"] = service.System.Load.Avg05
+ fields["cpu_load_avg_15m"] = service.System.Load.Avg15
+ fields["mem_kb"] = service.System.Memory.Kilobyte
+ fields["mem_percent"] = service.System.Memory.Percent
+ fields["swap_kb"] = service.System.Swap.Kilobyte
+ fields["swap_percent"] = service.System.Swap.Percent
+ acc.AddFields("monit_system", fields, tags)
+ } else if service.Type == fifo {
+ fields["mode"] = service.Mode
+ acc.AddFields("monit_fifo", fields, tags)
+ } else if service.Type == program {
+ fields["program_started"] = service.Program.Started * 10000000
+ fields["program_status"] = service.Program.Status
+ acc.AddFields("monit_program", fields, tags)
+ } else if service.Type == network {
+ fields["link_state"] = service.Link.State
+ fields["link_speed"] = service.Link.Speed
+ fields["link_mode"] = linkMode(service)
+ fields["download_packets_now"] = service.Link.Download.Packets.Now
+ fields["download_packets_total"] = service.Link.Download.Packets.Total
+ fields["download_bytes_now"] = service.Link.Download.Bytes.Now
+ fields["download_bytes_total"] = service.Link.Download.Bytes.Total
+ fields["download_errors_now"] = service.Link.Download.Errors.Now
+ fields["download_errors_total"] = service.Link.Download.Errors.Total
+ fields["upload_packets_now"] = service.Link.Upload.Packets.Now
+ fields["upload_packets_total"] = service.Link.Upload.Packets.Total
+ fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now
+ fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total
+ fields["upload_errors_now"] = service.Link.Upload.Errors.Now
+ fields["upload_errors_total"] = service.Link.Upload.Errors.Total
+ acc.AddFields("monit_network", fields, tags)
+ }
+ }
+ } else {
+ return fmt.Errorf("received status code %d (%s), expected 200",
+ resp.StatusCode,
+ http.StatusText(resp.StatusCode))
+
+ }
+ return nil
+}
+
+func linkMode(s Service) string {
+ if s.Link.Duplex == 1 {
+ return "duplex"
+ } else if s.Link.Duplex == 0 {
+ return "simplex"
+ } else {
+ return "unknown"
+ }
+}
+
+func serviceStatus(s Service) string {
+ if s.Status == 0 {
+ return "running"
+ } else {
+ return "failure"
+ }
+}
+
+func pendingAction(s Service) string {
+ if s.PendingAction > 0 {
+ if s.PendingAction >= len(pendingActions) {
+ return "unknown"
+ }
+ return pendingActions[s.PendingAction-1]
+ } else {
+ return "none"
+ }
+}
+
+func monitoringMode(s Service) string {
+ switch s.MonitorMode {
+ case 0:
+ return "active"
+ case 1:
+ return "passive"
+ }
+ return "unknown"
+}
+
+func monitoringStatus(s Service) string {
+ switch s.MonitoringStatus {
+ case 1:
+ return "monitored"
+ case 2:
+ return "initializing"
+ case 4:
+ return "waiting"
+ }
+ return "not_monitored"
+}
+
+func init() {
+ inputs.Add("monit", func() telegraf.Input {
+ return &Monit{}
+ })
+}
diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go
new file mode 100644
index 0000000000000..1d95b45a51bc5
--- /dev/null
+++ b/plugins/inputs/monit/monit_test.go
@@ -0,0 +1,704 @@
+package monit
+
+import (
+ "errors"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type transportMock struct {
+}
+
+func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
+ errorString := "Get http://127.0.0.1:2812/_status?format=xml: " +
+ "read tcp 192.168.10.2:55610->127.0.0.1:2812: " +
+ "read: connection reset by peer"
+ return nil, errors.New(errorString)
+}
+
+func TestServiceType(t *testing.T) {
+ tests := []struct {
+ name string
+ filename string
+ expected []telegraf.Metric
+ }{
+ {
+ name: "check filesystem service type",
+ filename: "testdata/response_servicetype_0.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_filesystem",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "mode": 555,
+ "block_percent": 29.5,
+ "block_usage": 4424.0,
+ "block_total": 14990.0,
+ "inode_percent": 0.8,
+ "inode_usage": 59674.0,
+ "inode_total": 7680000.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check directory service type",
+ filename: "testdata/response_servicetype_1.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_directory",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "mode": 755,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check file service type",
+ filename: "testdata/response_servicetype_2.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_file",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "mode": 644,
+ "size": 1565,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check process service type",
+ filename: "testdata/response_servicetype_3.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_process",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "cpu_percent": 0.0,
+ "cpu_percent_total": 0.0,
+ "mem_kb": 22892,
+ "mem_kb_total": 22892,
+ "mem_percent": 0.1,
+ "mem_percent_total": 0.1,
+ "pid": 5959,
+ "parent_pid": 1,
+ "threads": 31,
+ "children": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check remote host service type",
+ filename: "testdata/response_servicetype_4.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_remote_host",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "remote_hostname": "192.168.1.10",
+ "port_number": 2812,
+ "request": "",
+ "protocol": "DEFAULT",
+ "type": "TCP",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check system service type",
+ filename: "testdata/response_servicetype_5.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_system",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "cpu_system": 0.1,
+ "cpu_user": 0.0,
+ "cpu_wait": 0.0,
+ "cpu_load_avg_1m": 0.00,
+ "cpu_load_avg_5m": 0.00,
+ "cpu_load_avg_15m": 0.00,
+ "mem_kb": 259668,
+ "mem_percent": 1.5,
+ "swap_kb": 0.0,
+ "swap_percent": 0.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check fifo service type",
+ filename: "testdata/response_servicetype_6.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_fifo",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "mode": 664,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check program service type",
+ filename: "testdata/response_servicetype_7.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_program",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "program_status": 0,
+ "program_started": int64(15728504980000000),
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check network service type",
+ filename: "testdata/response_servicetype_8.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_network",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "link_speed": 1000000000,
+ "link_mode": "duplex",
+ "link_state": 1,
+ "download_packets_now": 0,
+ "download_packets_total": 15243,
+ "download_bytes_now": 0,
+ "download_bytes_total": 5506778,
+ "download_errors_now": 0,
+ "download_errors_total": 0,
+ "upload_packets_now": 0,
+ "upload_packets_total": 8822,
+ "upload_bytes_now": 0,
+ "upload_bytes_total": 1287240,
+ "upload_errors_now": 0,
+ "upload_errors_total": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/_status":
+ http.ServeFile(w, r, tt.filename)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ defer ts.Close()
+
+ plugin := &Monit{
+ Address: ts.URL,
+ }
+
+ plugin.Init()
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime())
+ })
+ }
+}
+
+func TestMonitFailure(t *testing.T) {
+ tests := []struct {
+ name string
+ filename string
+ expected []telegraf.Metric
+ }{
+ {
+ name: "check monit failure status",
+ filename: "testdata/response_servicetype_8_failure.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_network",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "failure",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 8388608,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "link_speed": -1,
+ "link_mode": "unknown",
+ "link_state": 0,
+ "download_packets_now": 0,
+ "download_packets_total": 0,
+ "download_bytes_now": 0,
+ "download_bytes_total": 0,
+ "download_errors_now": 0,
+ "download_errors_total": 0,
+ "upload_packets_now": 0,
+ "upload_packets_total": 0,
+ "upload_bytes_now": 0,
+ "upload_bytes_total": 0,
+ "upload_errors_now": 0,
+ "upload_errors_total": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check passive mode",
+ filename: "testdata/response_servicetype_8_passivemode.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_network",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "passive",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 1,
+ "pending_action_code": 0,
+ "link_speed": 1000000000,
+ "link_mode": "duplex",
+ "link_state": 1,
+ "download_packets_now": 0,
+ "download_packets_total": 15243,
+ "download_bytes_now": 0,
+ "download_bytes_total": 5506778,
+ "download_errors_now": 0,
+ "download_errors_total": 0,
+ "upload_packets_now": 0,
+ "upload_packets_total": 8822,
+ "upload_bytes_now": 0,
+ "upload_bytes_total": 1287240,
+ "upload_errors_now": 0,
+ "upload_errors_total": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check initializing status",
+ filename: "testdata/response_servicetype_8_initializingmode.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_network",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "initializing",
+ "monitoring_mode": "active",
+ "pending_action": "none",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 2,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 0,
+ "link_speed": 1000000000,
+ "link_mode": "duplex",
+ "link_state": 1,
+ "download_packets_now": 0,
+ "download_packets_total": 15243,
+ "download_bytes_now": 0,
+ "download_bytes_total": 5506778,
+ "download_errors_now": 0,
+ "download_errors_total": 0,
+ "upload_packets_now": 0,
+ "upload_packets_total": 8822,
+ "upload_bytes_now": 0,
+ "upload_bytes_total": 1287240,
+ "upload_errors_now": 0,
+ "upload_errors_total": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "check pending action",
+ filename: "testdata/response_servicetype_8_pendingaction.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "monit_network",
+ map[string]string{
+ "version": "5.17.1",
+ "source": "localhost",
+ "platform_name": "Linux",
+ "service": "test",
+ "status": "running",
+ "monitoring_status": "monitored",
+ "monitoring_mode": "active",
+ "pending_action": "exec",
+ },
+ map[string]interface{}{
+ "status_code": 0,
+ "monitoring_status_code": 1,
+ "monitoring_mode_code": 0,
+ "pending_action_code": 5,
+ "link_speed": 1000000000,
+ "link_mode": "duplex",
+ "link_state": 1,
+ "download_packets_now": 0,
+ "download_packets_total": 15243,
+ "download_bytes_now": 0,
+ "download_bytes_total": 5506778,
+ "download_errors_now": 0,
+ "download_errors_total": 0,
+ "upload_packets_now": 0,
+ "upload_packets_total": 8822,
+ "upload_bytes_now": 0,
+ "upload_bytes_total": 1287240,
+ "upload_errors_now": 0,
+ "upload_errors_total": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/_status":
+ http.ServeFile(w, r, tt.filename)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ defer ts.Close()
+
+ plugin := &Monit{
+ Address: ts.URL,
+ }
+
+ plugin.Init()
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime())
+ })
+ }
+}
+
+func checkAuth(r *http.Request, username, password string) bool {
+ user, pass, ok := r.BasicAuth()
+ if !ok {
+ return false
+ }
+ return user == username && pass == password
+}
+
+func TestAllowHosts(t *testing.T) {
+
+ r := &Monit{
+ Address: "http://127.0.0.1:2812",
+ Username: "test",
+ Password: "test",
+ }
+
+ var acc testutil.Accumulator
+
+ r.client.Transport = &transportMock{}
+
+ err := r.Gather(&acc)
+
+ if assert.Error(t, err) {
+ assert.Contains(t, err.Error(), "read: connection reset by peer")
+ }
+}
+
+func TestConnection(t *testing.T) {
+
+ r := &Monit{
+ Address: "http://127.0.0.1:2812",
+ Username: "test",
+ Password: "test",
+ }
+
+ var acc testutil.Accumulator
+
+ r.Init()
+
+ err := r.Gather(&acc)
+
+ if assert.Error(t, err) {
+ assert.Contains(t, err.Error(), "connect: connection refused")
+ }
+}
+
+func TestInvalidUsernameOrPassword(t *testing.T) {
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ if !checkAuth(r, "testing", "testing") {
+ http.Error(w, "Unauthorized.", 401)
+ return
+ }
+
+ switch r.URL.Path {
+ case "/_status":
+ http.ServeFile(w, r, "testdata/response_servicetype_0.xml")
+ default:
+ panic("Cannot handle request")
+ }
+ }))
+
+ defer ts.Close()
+
+ r := &Monit{
+ Address: ts.URL,
+ Username: "test",
+ Password: "test",
+ }
+
+ var acc testutil.Accumulator
+
+ r.Init()
+
+ err := r.Gather(&acc)
+
+ assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200")
+}
+
+func TestNoUsernameOrPasswordConfiguration(t *testing.T) {
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ if !checkAuth(r, "testing", "testing") {
+ http.Error(w, "Unauthorized.", 401)
+ return
+ }
+
+ switch r.URL.Path {
+ case "/_status":
+ http.ServeFile(w, r, "testdata/response_servicetype_0.xml")
+ default:
+ panic("Cannot handle request")
+ }
+ }))
+
+ defer ts.Close()
+
+ r := &Monit{
+ Address: ts.URL,
+ }
+
+ var acc testutil.Accumulator
+
+ r.Init()
+
+ err := r.Gather(&acc)
+
+ assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200")
+}
+
+func TestInvalidXMLAndInvalidTypes(t *testing.T) {
+
+ tests := []struct {
+ name string
+ filename string
+ }{
+ {
+ name: "check filesystem service type",
+ filename: "testdata/response_invalidxml_1.xml",
+ },
+ {
+ name: "check filesystem service type",
+ filename: "testdata/response_invalidxml_2.xml",
+ },
+ {
+ name: "check filesystem service type",
+ filename: "testdata/response_invalidxml_3.xml",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/_status":
+ http.ServeFile(w, r, tt.filename)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ defer ts.Close()
+
+ plugin := &Monit{
+ Address: ts.URL,
+ }
+
+ plugin.Init()
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+
+ if assert.Error(t, err) {
+ assert.Contains(t, err.Error(), "error parsing input:")
+ }
+ })
+ }
+}
diff --git a/plugins/inputs/monit/testdata/response_invalidxml_1.xml b/plugins/inputs/monit/testdata/response_invalidxml_1.xml
new file mode 100644
index 0000000000000..8f1dcbaa0075b
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_invalidxml_1.xml
@@ -0,0 +1,51 @@
+
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572850498
+ 709694
+ 0
+ 0
+ 1
+ 0
+ 0
+ 555
+ 0
+ 0
+ 4096
+
+ 29.5
+ 4424.0
+ 14990.0
+
+ 0.8
+ 59674
+ 7680000
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_invalidxml_2.xml b/plugins/inputs/monit/testdata/response_invalidxml_2.xml
new file mode 100644
index 0000000000000..aab7bc87c1435
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_invalidxml_2.xml
@@ -0,0 +1,52 @@
+
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572850498
+ 709694
+ 0.0
+ 0
+ 1
+ 0
+ 0
+ 555
+ 0
+ 0
+ 4096
+
+ 29.5
+ 4424.0
+ 14990.0
+
+
+ 0.8
+ 59674
+ 7680000
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_invalidxml_3.xml b/plugins/inputs/monit/testdata/response_invalidxml_3.xml
new file mode 100644
index 0000000000000..9fd7ed31db1e1
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_invalidxml_3.xml
@@ -0,0 +1,52 @@
+
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572850498
+ 709694
+ 0
+ 0
+ 1
+ 0
+ 0
+ 555
+ 0
+ 0
+ 4096
+
+ 29.5
+ 4424.0
+ 14990.0
+
+
+ 0.8
+ 59674
+ 7680000
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_0.xml b/plugins/inputs/monit/testdata/response_servicetype_0.xml
new file mode 100644
index 0000000000000..beaeb2003c55e
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_0.xml
@@ -0,0 +1,51 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572850498
+ 709694
+ 0
+ 0
+ 1
+ 0
+ 0
+ 555
+ 0
+ 0
+ 4096
+
+ 29.5
+ 4424.0
+ 14990.0
+
+
+ 0.8
+ 59674
+ 7680000
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_1.xml b/plugins/inputs/monit/testdata/response_servicetype_1.xml
new file mode 100644
index 0000000000000..86f02f1422a8f
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_1.xml
@@ -0,0 +1,41 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572850342
+ 546082
+ 0
+ 0
+ 1
+ 0
+ 0
+ 755
+ 0
+ 0
+ 1572272434
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_2.xml b/plugins/inputs/monit/testdata/response_servicetype_2.xml
new file mode 100644
index 0000000000000..709368007363a
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_2.xml
@@ -0,0 +1,42 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1476628305
+ 302669
+ 0
+ 0
+ 1
+ 0
+ 0
+ 644
+ 1000
+ 1000
+ 1476518441
+ 1565
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_3.xml b/plugins/inputs/monit/testdata/response_servicetype_3.xml
new file mode 100644
index 0000000000000..14a603dc359f1
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_3.xml
@@ -0,0 +1,52 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1476628305
+ 302552
+ 0
+ 0
+ 1
+ 0
+ 0
+ 5959
+ 1
+ 109870
+ 0
+ 31
+
+ 0.1
+ 0.1
+ 22892
+ 22892
+
+
+ 0.0
+ 0.0
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_4.xml b/plugins/inputs/monit/testdata/response_servicetype_4.xml
new file mode 100644
index 0000000000000..d7064e2f764e2
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_4.xml
@@ -0,0 +1,45 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572862451
+ 947671
+ 0
+ 0
+ 1
+ 0
+ 0
+
+ 192.168.1.10
+ 2812
+
+ DEFAULT
+ TCP
+ 0.000145
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_5.xml b/plugins/inputs/monit/testdata/response_servicetype_5.xml
new file mode 100644
index 0000000000000..d0ee2cfcaf46d
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_5.xml
@@ -0,0 +1,57 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1476628305
+ 302682
+ 0
+ 0
+ 1
+ 0
+ 0
+
+
+ 0.00
+ 0.00
+ 0.00
+
+
+ 0.0
+ 0.1
+ 0.0
+
+
+ 1.5
+ 259668
+
+
+ 0.0
+ 0
+
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_6.xml b/plugins/inputs/monit/testdata/response_servicetype_6.xml
new file mode 100644
index 0000000000000..5acabe2dad483
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_6.xml
@@ -0,0 +1,41 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572862451
+ 947495
+ 0
+ 0
+ 1
+ 0
+ 0
+ 664
+ 1000
+ 1000
+ 1572271731
+
+
\ No newline at end of file
diff --git a/plugins/inputs/monit/testdata/response_servicetype_7.xml b/plugins/inputs/monit/testdata/response_servicetype_7.xml
new file mode 100644
index 0000000000000..fbda56c5c0d15
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_7.xml
@@ -0,0 +1,42 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572850498
+ 710675
+ 0
+ 0
+ 1
+ 0
+ 0
+
+ 1572850498
+ 0
+ Stats health check successful.
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8.xml b/plugins/inputs/monit/testdata/response_servicetype_8.xml
new file mode 100644
index 0000000000000..12623a9d4d3ef
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_8.xml
@@ -0,0 +1,70 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572869770
+ 807562
+ 0
+ 0
+ 1
+ 0
+ 0
+
+ 1
+ 1000000000
+ 1
+
+
+ 0
+ 15243
+
+
+ 0
+ 5506778
+
+
+ 0
+ 0
+
+
+
+
+ 0
+ 8822
+
+
+ 0
+ 1287240
+
+
+ 0
+ 0
+
+
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml b/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml
new file mode 100644
index 0000000000000..d68419d5952f9
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml
@@ -0,0 +1,70 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572869770
+ 807562
+ 8388608
+ 0
+ 1
+ 0
+ 0
+
+ 0
+ -1
+ -1
+
+
+ 0
+ 0
+
+
+ 0
+ 0
+
+
+ 0
+ 0
+
+
+
+
+ 0
+ 0
+
+
+ 0
+ 0
+
+
+ 0
+ 0
+
+
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml b/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml
new file mode 100644
index 0000000000000..357f66f3b825f
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml
@@ -0,0 +1,70 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572869770
+ 807562
+ 0
+ 0
+ 2
+ 0
+ 0
+
+ 1
+ 1000000000
+ 1
+
+
+ 0
+ 15243
+
+
+ 0
+ 5506778
+
+
+ 0
+ 0
+
+
+
+
+ 0
+ 8822
+
+
+ 0
+ 1287240
+
+
+ 0
+ 0
+
+
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml b/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml
new file mode 100644
index 0000000000000..a4d9595ae2acb
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml
@@ -0,0 +1,70 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572869770
+ 807562
+ 0
+ 0
+ 1
+ 1
+ 0
+
+ 1
+ 1000000000
+ 1
+
+
+ 0
+ 15243
+
+
+ 0
+ 5506778
+
+
+ 0
+ 0
+
+
+
+
+ 0
+ 8822
+
+
+ 0
+ 1287240
+
+
+ 0
+ 0
+
+
+
+
+
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml b/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml
new file mode 100644
index 0000000000000..df19a64285812
--- /dev/null
+++ b/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml
@@ -0,0 +1,70 @@
+
+
+
+ 0ed39c522be4c3971541412c43141613
+ 1476518435
+ 5.17.1
+ 109878
+ 10
+ 0
+ localhost
+ /var/vcap/bosh/etc/monitrc
+
+ 127.0.0.1
+ 2822
+ 0
+
+
+
+ Linux
+ 4.15.0-65-generic
+ #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019
+ x86_64
+ 8
+ 16432272
+ 16432268
+
+
+ test
+ 1572869770
+ 807562
+ 0
+ 0
+ 1
+ 0
+ 5
+
+ 1
+ 1000000000
+ 1
+
+
+ 0
+ 15243
+
+
+ 0
+ 5506778
+
+
+ 0
+ 0
+
+
+
+
+ 0
+ 8822
+
+
+ 0
+ 1287240
+
+
+ 0
+ 0
+
+
+
+
+
diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md
index da3ce43f53665..a9e8236ee0cf5 100644
--- a/plugins/inputs/mqtt_consumer/README.md
+++ b/plugins/inputs/mqtt_consumer/README.md
@@ -3,13 +3,27 @@
The [MQTT][mqtt] consumer plugin reads from the specified MQTT topics
and creates metrics using one of the supported [input data formats][].
-### Configuration:
+### Configuration
```toml
[[inputs.mqtt_consumer]]
- ## MQTT broker URLs to be used. The format should be scheme://host:port,
- ## schema can be tcp, ssl, or ws.
- servers = ["tcp://localhost:1883"]
+ ## Broker URLs for the MQTT server or cluster. To connect to multiple
+ ## clusters or standalone servers, use a seperate plugin instance.
+ ## example: servers = ["tcp://localhost:1883"]
+ ## servers = ["ssl://localhost:1883"]
+ ## servers = ["ws://localhost:1883"]
+ servers = ["tcp://127.0.0.1:1883"]
+
+ ## Topics that will be subscribed to.
+ topics = [
+ "telegraf/host01/cpu",
+ "telegraf/+/mem",
+ "sensors/#",
+ ]
+
+ ## The message topic will be stored in a tag specified by this value. If set
+ ## to the empty string no topic tag will be created.
+ # topic_tag = "topic"
## QoS policy for messages
## 0 = at most once
@@ -18,10 +32,10 @@ and creates metrics using one of the supported [input data formats][].
##
## When using a QoS of 1 or 2, you should enable persistent_session to allow
## resuming unacknowledged messages.
- qos = 0
+ # qos = 0
## Connection timeout for initial connection in seconds
- connection_timeout = "30s"
+ # connection_timeout = "30s"
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
@@ -33,21 +47,17 @@ and creates metrics using one of the supported [input data formats][].
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
- ## Topics to subscribe to
- topics = [
- "telegraf/host01/cpu",
- "telegraf/+/mem",
- "sensors/#",
- ]
+ ## Persistent session disables clearing of the client session on connection.
+ ## In order for this option to work you must also set client_id to identify
+ ## the client. To receive messages that arrived while the client is offline,
+ ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
+ ## publishing.
+ # persistent_session = false
- # if true, messages that can't be delivered while the subscriber is offline
- # will be delivered when it comes back (such as on service restart).
- # NOTE: if true, client_id MUST be set
- persistent_session = false
- # If empty, a random client ID will be generated.
- client_id = ""
+ ## If unset, a random client ID will be generated.
+ # client_id = ""
- ## username and password to connect MQTT server.
+ ## Username and password to connect MQTT server.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
@@ -65,7 +75,7 @@ and creates metrics using one of the supported [input data formats][].
data_format = "influx"
```
-### Tags:
+### Metrics
- All measurements are tagged with the incoming topic, ie
`topic=telegraf/host01/cpu`
diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go
index da556159e62b8..26122b8e86b88 100644
--- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go
+++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go
@@ -4,14 +4,13 @@ import (
"context"
"errors"
"fmt"
- "log"
"strings"
"time"
"github.com/eclipse/paho.mqtt.golang"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -33,11 +32,21 @@ const (
Connected
)
+type Client interface {
+ Connect() mqtt.Token
+ SubscribeMultiple(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token
+ AddRoute(topic string, callback mqtt.MessageHandler)
+ Disconnect(quiesce uint)
+}
+
+type ClientFactory func(o *mqtt.ClientOptions) Client
+
type MQTTConsumer struct {
- Servers []string
- Topics []string
- Username string
- Password string
+ Servers []string `toml:"servers"`
+ Topics []string `toml:"topics"`
+ TopicTag *string `toml:"topic_tag"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
QoS int `toml:"qos"`
ConnectionTimeout internal.Duration `toml:"connection_timeout"`
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
@@ -51,21 +60,39 @@ type MQTTConsumer struct {
ClientID string `toml:"client_id"`
tls.ClientConfig
- client mqtt.Client
- acc telegraf.TrackingAccumulator
- state ConnectionState
- subscribed bool
- sem semaphore
- messages map[telegraf.TrackingID]bool
+ Log telegraf.Logger
+
+ clientFactory ClientFactory
+ client Client
+ opts *mqtt.ClientOptions
+ acc telegraf.TrackingAccumulator
+ state ConnectionState
+ sem semaphore
+ messages map[telegraf.TrackingID]bool
+ topicTag string
ctx context.Context
cancel context.CancelFunc
}
var sampleConfig = `
- ## MQTT broker URLs to be used. The format should be scheme://host:port,
- ## schema can be tcp, ssl, or ws.
- servers = ["tcp://localhost:1883"]
+ ## Broker URLs for the MQTT server or cluster. To connect to multiple
+ ## clusters or standalone servers, use a seperate plugin instance.
+ ## example: servers = ["tcp://localhost:1883"]
+ ## servers = ["ssl://localhost:1883"]
+ ## servers = ["ws://localhost:1883"]
+ servers = ["tcp://127.0.0.1:1883"]
+
+ ## Topics that will be subscribed to.
+ topics = [
+ "telegraf/host01/cpu",
+ "telegraf/+/mem",
+ "sensors/#",
+ ]
+
+ ## The message topic will be stored in a tag specified by this value. If set
+ ## to the empty string no topic tag will be created.
+ # topic_tag = "topic"
## QoS policy for messages
## 0 = at most once
@@ -74,10 +101,10 @@ var sampleConfig = `
##
## When using a QoS of 1 or 2, you should enable persistent_session to allow
## resuming unacknowledged messages.
- qos = 0
+ # qos = 0
## Connection timeout for initial connection in seconds
- connection_timeout = "30s"
+ # connection_timeout = "30s"
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
@@ -89,21 +116,17 @@ var sampleConfig = `
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
- ## Topics to subscribe to
- topics = [
- "telegraf/host01/cpu",
- "telegraf/+/mem",
- "sensors/#",
- ]
+ ## Persistent session disables clearing of the client session on connection.
+ ## In order for this option to work you must also set client_id to identify
+ ## the client. To receive messages that arrived while the client is offline,
+ ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
+ ## publishing.
+ # persistent_session = false
- # if true, messages that can't be delivered while the subscriber is offline
- # will be delivered when it comes back (such as on service restart).
- # NOTE: if true, client_id MUST be set
- persistent_session = false
- # If empty, a random client ID will be generated.
- client_id = ""
+ ## If unset, a random client ID will be generated.
+ # client_id = ""
- ## username and password to connect MQTT server.
+ ## Username and password to connect MQTT server.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
@@ -133,7 +156,7 @@ func (m *MQTTConsumer) SetParser(parser parsers.Parser) {
m.parser = parser
}
-func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
+func (m *MQTTConsumer) Init() error {
m.state = Disconnected
if m.PersistentSession && m.ClientID == "" {
@@ -148,15 +171,38 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration)
}
- m.acc = acc.WithTracking(m.MaxUndeliveredMessages)
- m.ctx, m.cancel = context.WithCancel(context.Background())
+ m.topicTag = "topic"
+ if m.TopicTag != nil {
+ m.topicTag = *m.TopicTag
+ }
opts, err := m.createOpts()
if err != nil {
return err
}
- m.client = mqtt.NewClient(opts)
+ m.opts = opts
+
+ return nil
+}
+
+func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
+ m.state = Disconnected
+
+ m.acc = acc.WithTracking(m.MaxUndeliveredMessages)
+ m.sem = make(semaphore, m.MaxUndeliveredMessages)
+ m.ctx, m.cancel = context.WithCancel(context.Background())
+
+ m.client = m.clientFactory(m.opts)
+
+ // AddRoute sets up the function for handling messages. These need to be
+ // added in case we find a persistent session containing subscriptions so we
+ // know where to dispatch persisted and new messages to. In the alternate
+ // case that we need to create the subscriptions these will be replaced.
+ for _, topic := range m.Topics {
+ m.client.AddRoute(topic, m.recvMessage)
+ }
+
m.state = Connecting
m.connect()
@@ -164,33 +210,37 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
}
func (m *MQTTConsumer) connect() error {
- if token := m.client.Connect(); token.Wait() && token.Error() != nil {
+ token := m.client.Connect()
+ if token.Wait() && token.Error() != nil {
err := token.Error()
m.state = Disconnected
return err
}
- log.Printf("I! [inputs.mqtt_consumer] Connected %v", m.Servers)
+ m.Log.Infof("Connected %v", m.Servers)
m.state = Connected
- m.sem = make(semaphore, m.MaxUndeliveredMessages)
m.messages = make(map[telegraf.TrackingID]bool)
- // Only subscribe on first connection when using persistent sessions. On
- // subsequent connections the subscriptions should be stored in the
- // session, but the proper way to do this is to check the connection
- // response to ensure a session was found.
- if !m.PersistentSession || !m.subscribed {
- topics := make(map[string]byte)
- for _, topic := range m.Topics {
- topics[topic] = byte(m.QoS)
- }
- subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage)
- subscribeToken.Wait()
- if subscribeToken.Error() != nil {
- m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v",
- strings.Join(m.Topics[:], ","), subscribeToken.Error()))
- }
- m.subscribed = true
+ // Persistent sessions should skip subscription if a session is present, as
+ // the subscriptions are stored by the server.
+ type sessionPresent interface {
+ SessionPresent() bool
+ }
+ if t, ok := token.(sessionPresent); ok && t.SessionPresent() {
+ m.Log.Debugf("Session found %v", m.Servers)
+ return nil
+ }
+
+ topics := make(map[string]byte)
+ for _, topic := range m.Topics {
+ topics[topic] = byte(m.QoS)
+ }
+
+ subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage)
+ subscribeToken.Wait()
+ if subscribeToken.Error() != nil {
+ m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v",
+ strings.Join(m.Topics[:], ","), subscribeToken.Error()))
}
return nil
@@ -198,7 +248,7 @@ func (m *MQTTConsumer) connect() error {
func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) {
m.acc.AddError(fmt.Errorf("connection lost: %v", err))
- log.Printf("D! [inputs.mqtt_consumer] Disconnected %v", m.Servers)
+ m.Log.Debugf("Disconnected %v", m.Servers)
m.state = Disconnected
return
}
@@ -207,12 +257,12 @@ func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) {
for {
select {
case track := <-m.acc.Delivered():
+ <-m.sem
_, ok := m.messages[track.ID()]
if !ok {
// Added by a previous connection
continue
}
- <-m.sem
// No ack, MQTT does not support durable handling
delete(m.messages, track.ID())
case m.sem <- empty{}:
@@ -232,9 +282,11 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess
return err
}
- topic := msg.Topic()
- for _, metric := range metrics {
- metric.AddTag("topic", topic)
+ if m.topicTag != "" {
+ topic := msg.Topic()
+ for _, metric := range metrics {
+ metric.AddTag(m.topicTag, topic)
+ }
}
id := acc.AddTrackingMetricGroup(metrics)
@@ -244,9 +296,9 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess
func (m *MQTTConsumer) Stop() {
if m.state == Connected {
- log.Printf("D! [inputs.mqtt_consumer] Disconnecting %v", m.Servers)
+ m.Log.Debugf("Disconnecting %v", m.Servers)
m.client.Disconnect(200)
- log.Printf("D! [inputs.mqtt_consumer] Disconnected %v", m.Servers)
+ m.Log.Debugf("Disconnected %v", m.Servers)
m.state = Disconnected
}
m.cancel()
@@ -255,7 +307,7 @@ func (m *MQTTConsumer) Stop() {
func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
if m.state == Disconnected {
m.state = Connecting
- log.Printf("D! [inputs.mqtt_consumer] Connecting %v", m.Servers)
+ m.Log.Debugf("Connecting %v", m.Servers)
m.connect()
}
@@ -292,13 +344,13 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
}
if len(m.Servers) == 0 {
- return opts, fmt.Errorf("could not get host infomations")
+ return opts, fmt.Errorf("could not get host informations")
}
for _, server := range m.Servers {
// Preserve support for host:port style servers; deprecated in Telegraf 1.4.4
if !strings.Contains(server, "://") {
- log.Printf("W! [inputs.mqtt_consumer] Server %q should be updated to use `scheme://host:port` format", server)
+ m.Log.Warnf("Server %q should be updated to use `scheme://host:port` format", server)
if tlsCfg == nil {
server = "tcp://" + server
} else {
@@ -316,12 +368,20 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
return opts, nil
}
+func New(factory ClientFactory) *MQTTConsumer {
+ return &MQTTConsumer{
+ Servers: []string{"tcp://127.0.0.1:1883"},
+ ConnectionTimeout: defaultConnectionTimeout,
+ MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
+ clientFactory: factory,
+ state: Disconnected,
+ }
+}
+
func init() {
inputs.Add("mqtt_consumer", func() telegraf.Input {
- return &MQTTConsumer{
- ConnectionTimeout: defaultConnectionTimeout,
- MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
- state: Disconnected,
- }
+ return New(func(o *mqtt.ClientOptions) Client {
+ return mqtt.NewClient(o)
+ })
})
}
diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go
index 4209963bbe0b9..4884fc0508107 100644
--- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go
+++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go
@@ -2,110 +2,377 @@ package mqtt_consumer
import (
"testing"
+ "time"
"github.com/eclipse/paho.mqtt.golang"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
- "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
-const (
- testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n"
- invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n"
-)
+type FakeClient struct {
+ ConnectF func() mqtt.Token
+ SubscribeMultipleF func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token
+ AddRouteF func(topic string, callback mqtt.MessageHandler)
+ DisconnectF func(quiesce uint)
-func newTestMQTTConsumer() *MQTTConsumer {
- n := &MQTTConsumer{
- Topics: []string{"telegraf"},
- Servers: []string{"localhost:1883"},
- }
+ connectCallCount int
+ subscribeCallCount int
+ addRouteCallCount int
+ disconnectCallCount int
+}
- return n
+func (c *FakeClient) Connect() mqtt.Token {
+ c.connectCallCount++
+ return c.ConnectF()
}
-// Test that default client has random ID
-func TestRandomClientID(t *testing.T) {
- m1 := &MQTTConsumer{
- Servers: []string{"localhost:1883"}}
- opts, err := m1.createOpts()
- assert.NoError(t, err)
+func (c *FakeClient) SubscribeMultiple(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token {
+ c.subscribeCallCount++
+ return c.SubscribeMultipleF(filters, callback)
+}
- m2 := &MQTTConsumer{
- Servers: []string{"localhost:1883"}}
- opts2, err2 := m2.createOpts()
- assert.NoError(t, err2)
+func (c *FakeClient) AddRoute(topic string, callback mqtt.MessageHandler) {
+ c.addRouteCallCount++
+ c.AddRouteF(topic, callback)
+}
- assert.NotEqual(t, opts.ClientID, opts2.ClientID)
+func (c *FakeClient) Disconnect(quiesce uint) {
+ c.disconnectCallCount++
+ c.DisconnectF(quiesce)
+}
+
+type FakeParser struct {
+}
+
+// FakeParser satisfies parsers.Parser
+var _ parsers.Parser = &FakeParser{}
+
+func (p *FakeParser) Parse(buf []byte) ([]telegraf.Metric, error) {
+ panic("not implemented")
+}
+
+func (p *FakeParser) ParseLine(line string) (telegraf.Metric, error) {
+ panic("not implemented")
+}
+
+func (p *FakeParser) SetDefaultTags(tags map[string]string) {
+ panic("not implemented")
+}
+
+type FakeToken struct {
+ sessionPresent bool
+}
+
+// FakeToken satisfies mqtt.Token
+var _ mqtt.Token = &FakeToken{}
+
+func (t *FakeToken) Wait() bool {
+ return true
+}
+
+func (t *FakeToken) WaitTimeout(time.Duration) bool {
+ return true
+}
+
+func (t *FakeToken) Error() error {
+ return nil
+}
+
+func (t *FakeToken) SessionPresent() bool {
+ return t.sessionPresent
+}
+
+// Test the basic lifecycle transitions of the plugin.
+func TestLifecycleSanity(t *testing.T) {
+ var acc testutil.Accumulator
+
+ plugin := New(func(o *mqtt.ClientOptions) Client {
+ return &FakeClient{
+ ConnectF: func() mqtt.Token {
+ return &FakeToken{}
+ },
+ AddRouteF: func(topic string, callback mqtt.MessageHandler) {
+ },
+ SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token {
+ return &FakeToken{}
+ },
+ DisconnectF: func(quiesce uint) {
+ },
+ }
+ })
+ plugin.Log = testutil.Logger{}
+ plugin.Servers = []string{"tcp://127.0.0.1"}
+
+ parser := &FakeParser{}
+ plugin.SetParser(parser)
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ err = plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ plugin.Stop()
}
// Test that default client has random ID
-func TestClientID(t *testing.T) {
- m1 := &MQTTConsumer{
- Servers: []string{"localhost:1883"},
- ClientID: "telegraf-test",
- }
- opts, err := m1.createOpts()
- assert.NoError(t, err)
+func TestRandomClientID(t *testing.T) {
+ var err error
- m2 := &MQTTConsumer{
- Servers: []string{"localhost:1883"},
- ClientID: "telegraf-test",
- }
- opts2, err2 := m2.createOpts()
- assert.NoError(t, err2)
+ m1 := New(nil)
+ m1.Log = testutil.Logger{}
+ err = m1.Init()
+ require.NoError(t, err)
+
+ m2 := New(nil)
+ m2.Log = testutil.Logger{}
+ err = m2.Init()
+ require.NoError(t, err)
- assert.Equal(t, "telegraf-test", opts2.ClientID)
- assert.Equal(t, "telegraf-test", opts.ClientID)
+ require.NotEqual(t, m1.opts.ClientID, m2.opts.ClientID)
}
-// Test that Start() fails if client ID is not set but persistent is
+// PersistentSession requires ClientID
func TestPersistentClientIDFail(t *testing.T) {
- m1 := &MQTTConsumer{
- Servers: []string{"localhost:1883"},
- PersistentSession: true,
- }
- acc := testutil.Accumulator{}
- err := m1.Start(&acc)
- assert.Error(t, err)
+ plugin := New(nil)
+ plugin.Log = testutil.Logger{}
+ plugin.PersistentSession = true
+
+ err := plugin.Init()
+ require.Error(t, err)
}
-func mqttMsg(val string) mqtt.Message {
- return &message{
- topic: "telegraf/unit_test",
- payload: []byte(val),
- }
+type Message struct {
}
-// Take the message struct from the paho mqtt client library for returning
-// a test message interface.
-type message struct {
- duplicate bool
- qos byte
- retained bool
- topic string
- messageID uint16
- payload []byte
+func (m *Message) Duplicate() bool {
+ panic("not implemented")
}
-func (m *message) Duplicate() bool {
- return m.duplicate
+func (m *Message) Qos() byte {
+ panic("not implemented")
}
-func (m *message) Qos() byte {
- return m.qos
+func (m *Message) Retained() bool {
+ panic("not implemented")
}
-func (m *message) Retained() bool {
- return m.retained
+func (m *Message) Topic() string {
+ return "telegraf"
}
-func (m *message) Topic() string {
- return m.topic
+func (m *Message) MessageID() uint16 {
+ panic("not implemented")
}
-func (m *message) MessageID() uint16 {
- return m.messageID
+func (m *Message) Payload() []byte {
+ return []byte("cpu time_idle=42i")
}
-func (m *message) Payload() []byte {
- return m.payload
+func (m *Message) Ack() {
+ panic("not implemented")
+}
+
+func TestTopicTag(t *testing.T) {
+ tests := []struct {
+ name string
+ topicTag func() *string
+ expected []telegraf.Metric
+ }{
+ {
+ name: "default topic when topic tag is unset for backwards compatibility",
+ topicTag: func() *string {
+ return nil
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "topic": "telegraf",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "use topic tag when set",
+ topicTag: func() *string {
+ tag := "topic_tag"
+ return &tag
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "topic_tag": "telegraf",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "no topic tag is added when topic tag is set to the empty string",
+ topicTag: func() *string {
+ tag := ""
+ return &tag
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var handler mqtt.MessageHandler
+ client := &FakeClient{
+ ConnectF: func() mqtt.Token {
+ return &FakeToken{}
+ },
+ AddRouteF: func(topic string, callback mqtt.MessageHandler) {
+ handler = callback
+ },
+ SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token {
+ return &FakeToken{}
+ },
+ DisconnectF: func(quiesce uint) {
+ },
+ }
+
+ plugin := New(func(o *mqtt.ClientOptions) Client {
+ return client
+ })
+ plugin.Log = testutil.Logger{}
+ plugin.Topics = []string{"telegraf"}
+ plugin.TopicTag = tt.topicTag()
+
+ parser, err := parsers.NewInfluxParser()
+ require.NoError(t, err)
+ plugin.SetParser(parser)
+
+ err = plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ handler(nil, &Message{})
+
+ plugin.Stop()
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime())
+ })
+ }
+}
+
+func TestAddRouteCalledForEachTopic(t *testing.T) {
+ client := &FakeClient{
+ ConnectF: func() mqtt.Token {
+ return &FakeToken{}
+ },
+ AddRouteF: func(topic string, callback mqtt.MessageHandler) {
+ },
+ SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token {
+ return &FakeToken{}
+ },
+ DisconnectF: func(quiesce uint) {
+ },
+ }
+ plugin := New(func(o *mqtt.ClientOptions) Client {
+ return client
+ })
+ plugin.Log = testutil.Logger{}
+ plugin.Topics = []string{"a", "b"}
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ plugin.Stop()
+
+ require.Equal(t, client.addRouteCallCount, 2)
+}
+
+func TestSubscribeCalledIfNoSession(t *testing.T) {
+ client := &FakeClient{
+ ConnectF: func() mqtt.Token {
+ return &FakeToken{}
+ },
+ AddRouteF: func(topic string, callback mqtt.MessageHandler) {
+ },
+ SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token {
+ return &FakeToken{}
+ },
+ DisconnectF: func(quiesce uint) {
+ },
+ }
+ plugin := New(func(o *mqtt.ClientOptions) Client {
+ return client
+ })
+ plugin.Log = testutil.Logger{}
+ plugin.Topics = []string{"b"}
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ plugin.Stop()
+
+ require.Equal(t, client.subscribeCallCount, 1)
+}
+
+func TestSubscribeNotCalledIfSession(t *testing.T) {
+ client := &FakeClient{
+ ConnectF: func() mqtt.Token {
+ return &FakeToken{sessionPresent: true}
+ },
+ AddRouteF: func(topic string, callback mqtt.MessageHandler) {
+ },
+ SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token {
+ return &FakeToken{}
+ },
+ DisconnectF: func(quiesce uint) {
+ },
+ }
+ plugin := New(func(o *mqtt.ClientOptions) Client {
+ return client
+ })
+ plugin.Log = testutil.Logger{}
+ plugin.Topics = []string{"b"}
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ plugin.Stop()
+
+ require.Equal(t, client.subscribeCallCount, 0)
}
diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md
index 558d4e4427a68..2d71ac159cdd2 100644
--- a/plugins/inputs/multifile/README.md
+++ b/plugins/inputs/multifile/README.md
@@ -40,11 +40,11 @@ Path of the file to be parsed, relative to the `base_dir`.
Name of the field/tag key, defaults to `$(basename file)`.
* `conversion`:
Data format used to parse the file contents:
- * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`.
+ * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`.
* `float`: Converts the value into a float with no adjustment. Same as `float(0)`.
- * `int`: Convertes the value into an integer.
+ * `int`: Converts the value into an integer.
* `string`, `""`: No conversion.
- * `bool`: Convertes the value into a boolean.
+ * `bool`: Converts the value into a boolean.
* `tag`: File content is used as a tag.
### Example Output
diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md
index 564d75e614046..644d4cf8d7887 100644
--- a/plugins/inputs/mysql/README.md
+++ b/plugins/inputs/mysql/README.md
@@ -21,10 +21,9 @@ This plugin gathers the statistic data from MySQL server
### Configuration
```toml
-# Read metrics from one or many mysql servers
[[inputs.mysql]]
## specify servers via a url matching:
- ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
+ ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
## e.g.
## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
@@ -32,60 +31,80 @@ This plugin gathers the statistic data from MySQL server
#
## If no servers are specified, then localhost is used as the host.
servers = ["tcp(127.0.0.1:3306)/"]
- ## the limits for metrics form perf_events_statements
- perf_events_statements_digest_text_limit = 120
- perf_events_statements_limit = 250
- perf_events_statements_time_limit = 86400
- #
+
+ ## Selects the metric output format.
+ ##
+ ## This option exists to maintain backwards compatibility, if you have
+ ## existing metrics do not set or change this value until you are ready to
+ ## migrate to the new format.
+ ##
+ ## If you do not have existing metrics from this plugin set to the latest
+ ## version.
+ ##
+ ## Telegraf >=1.6: metric_version = 2
+ ## <1.6: metric_version = 1 (or unset)
+ metric_version = 2
+
## if the list is empty, then metrics are gathered from all database tables
- table_schema_databases = []
- #
+ # table_schema_databases = []
+
## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
- gather_table_schema = false
- #
+ # gather_table_schema = false
+
## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
- gather_process_list = true
- #
- ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
- gather_user_statistics = true
- #
+ # gather_process_list = false
+
+ ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
+ # gather_user_statistics = false
+
## gather auto_increment columns and max values from information schema
- gather_info_schema_auto_inc = true
- #
+ # gather_info_schema_auto_inc = false
+
## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
- gather_innodb_metrics = true
- #
+ # gather_innodb_metrics = false
+
## gather metrics from SHOW SLAVE STATUS command output
- gather_slave_status = true
- #
+ # gather_slave_status = false
+
## gather metrics from SHOW BINARY LOGS command output
- gather_binary_logs = false
- #
+ # gather_binary_logs = false
+
+ ## gather metrics from SHOW GLOBAL VARIABLES command output
+ # gather_global_variables = true
+
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
- gather_table_io_waits = false
- #
+ # gather_table_io_waits = false
+
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
- gather_table_lock_waits = false
- #
+ # gather_table_lock_waits = false
+
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
- gather_index_io_waits = false
- #
+ # gather_index_io_waits = false
+
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
- gather_event_waits = false
- #
+ # gather_event_waits = false
+
## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
- gather_file_events_stats = false
- #
+ # gather_file_events_stats = false
+
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
- gather_perf_events_statements = false
- #
+ # gather_perf_events_statements = false
+
+ ## the limits for metrics form perf_events_statements
+ # perf_events_statements_digest_text_limit = 120
+ # perf_events_statements_limit = 250
+ # perf_events_statements_time_limit = 86400
+
## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
- interval_slow = "30m"
+ ## example: interval_slow = "30m"
+ # interval_slow = ""
## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
- tls_ca = "/etc/telegraf/ca.pem"
- tls_cert = "/etc/telegraf/cert.pem"
- tls_key = "/etc/telegraf/key.pem"
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
```
#### Metric Version
@@ -98,7 +117,7 @@ InfluxDB due to the change of types. For this reason, you should keep the
If preserving your old data is not required you may wish to drop conflicting
measurements:
-```
+```sql
DROP SERIES from mysql
DROP SERIES from mysql_variables
DROP SERIES from mysql_innodb
@@ -134,7 +153,7 @@ If you wish to remove the `name_suffix` you may use Kapacitor to copy the
historical data to the default name. Do this only after retiring the old
measurement name.
-1. Use the techinique described above to write to multiple locations:
+1. Use the technique described above to write to multiple locations:
```toml
[[inputs.mysql]]
servers = ["tcp(127.0.0.1:3306)/"]
@@ -264,7 +283,7 @@ The unit of fields varies by the tags.
* events_statements_rows_examined_total(float, number)
* events_statements_tmp_tables_total(float, number)
* events_statements_tmp_disk_tables_total(float, number)
- * events_statements_sort_merge_passes_totales(float, number)
+ * events_statements_sort_merge_passes_totals(float, number)
* events_statements_sort_rows_total(float, number)
* events_statements_no_index_used_total(float, number)
* Table schema - gathers statistics of each schema. It has following measurements
diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go
index 0516e22b73bf8..7ce9bd1666173 100644
--- a/plugins/inputs/mysql/mysql.go
+++ b/plugins/inputs/mysql/mysql.go
@@ -9,12 +9,12 @@ import (
"sync"
"time"
+ "github.com/go-sql-driver/mysql"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/inputs/mysql/v1"
-
- "github.com/go-sql-driver/mysql"
+ "github.com/influxdata/telegraf/plugins/inputs/mysql/v2"
)
type Mysql struct {
@@ -36,12 +36,18 @@ type Mysql struct {
GatherTableSchema bool `toml:"gather_table_schema"`
GatherFileEventsStats bool `toml:"gather_file_events_stats"`
GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"`
+ GatherGlobalVars bool `toml:"gather_global_variables"`
IntervalSlow string `toml:"interval_slow"`
MetricVersion int `toml:"metric_version"`
+
+ Log telegraf.Logger `toml:"-"`
tls.ClientConfig
+ lastT time.Time
+ initDone bool
+ scanIntervalSlow uint32
}
-var sampleConfig = `
+const sampleConfig = `
## specify servers via a url matching:
## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
@@ -65,55 +71,59 @@ var sampleConfig = `
## <1.6: metric_version = 1 (or unset)
metric_version = 2
- ## the limits for metrics form perf_events_statements
- perf_events_statements_digest_text_limit = 120
- perf_events_statements_limit = 250
- perf_events_statements_time_limit = 86400
- #
- ## if the list is empty, then metrics are gathered from all databasee tables
- table_schema_databases = []
- #
+ ## if the list is empty, then metrics are gathered from all database tables
+ # table_schema_databases = []
+
## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
- gather_table_schema = false
- #
+ # gather_table_schema = false
+
## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
- gather_process_list = true
- #
+ # gather_process_list = false
+
## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
- gather_user_statistics = true
- #
+ # gather_user_statistics = false
+
## gather auto_increment columns and max values from information schema
- gather_info_schema_auto_inc = true
- #
+ # gather_info_schema_auto_inc = false
+
## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
- gather_innodb_metrics = true
- #
+ # gather_innodb_metrics = false
+
## gather metrics from SHOW SLAVE STATUS command output
- gather_slave_status = true
- #
+ # gather_slave_status = false
+
## gather metrics from SHOW BINARY LOGS command output
- gather_binary_logs = false
- #
+ # gather_binary_logs = false
+
+ ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES
+ # gather_global_variables = true
+
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
- gather_table_io_waits = false
- #
+ # gather_table_io_waits = false
+
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
- gather_table_lock_waits = false
- #
+ # gather_table_lock_waits = false
+
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
- gather_index_io_waits = false
- #
+ # gather_index_io_waits = false
+
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
- gather_event_waits = false
- #
+ # gather_event_waits = false
+
## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
- gather_file_events_stats = false
- #
+ # gather_file_events_stats = false
+
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
- gather_perf_events_statements = false
- #
+ # gather_perf_events_statements = false
+
+ ## the limits for metrics form perf_events_statements
+ # perf_events_statements_digest_text_limit = 120
+ # perf_events_statements_limit = 250
+ # perf_events_statements_time_limit = 86400
+
## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
- interval_slow = "30m"
+ ## example: interval_slow = "30m"
+ # interval_slow = ""
## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
# tls_ca = "/etc/telegraf/ca.pem"
@@ -123,7 +133,13 @@ var sampleConfig = `
# insecure_skip_verify = false
`
-var defaultTimeout = time.Second * time.Duration(5)
+const (
+ defaultTimeout = 5 * time.Second
+ defaultPerfEventsStatementsDigestTextLimit = 120
+ defaultPerfEventsStatementsLimit = 250
+ defaultPerfEventsStatementsTimeLimit = 86400
+ defaultGatherGlobalVars = true
+)
func (m *Mysql) SampleConfig() string {
return sampleConfig
@@ -133,21 +149,16 @@ func (m *Mysql) Description() string {
return "Read metrics from one or many mysql servers"
}
-var (
- localhost = ""
- lastT time.Time
- initDone = false
- scanIntervalSlow uint32
-)
+const localhost = ""
func (m *Mysql) InitMysql() {
if len(m.IntervalSlow) > 0 {
interval, err := time.ParseDuration(m.IntervalSlow)
if err == nil && interval.Seconds() >= 1.0 {
- scanIntervalSlow = uint32(interval.Seconds())
+ m.scanIntervalSlow = uint32(interval.Seconds())
}
}
- initDone = true
+ m.initDone = true
}
func (m *Mysql) Gather(acc telegraf.Accumulator) error {
@@ -156,7 +167,7 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error {
return m.gatherServer(localhost, acc)
}
// Initialise additional query intervals
- if !initDone {
+ if !m.initDone {
m.InitMysql()
}
@@ -184,6 +195,7 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error {
return nil
}
+// These are const but can't be declared as such because golang doesn't allow const maps
var (
// status counter
generalThreadStates = map[string]uint32{
@@ -424,14 +436,16 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
return err
}
- // Global Variables may be gathered less often
- if len(m.IntervalSlow) > 0 {
- if uint32(time.Since(lastT).Seconds()) >= scanIntervalSlow {
- err = m.gatherGlobalVariables(db, serv, acc)
- if err != nil {
- return err
+ if m.GatherGlobalVars {
+ // Global Variables may be gathered less often
+ if len(m.IntervalSlow) > 0 {
+ if uint32(time.Since(m.lastT).Seconds()) >= m.scanIntervalSlow {
+ err = m.gatherGlobalVariables(db, serv, acc)
+ if err != nil {
+ return err
+ }
+ m.lastT = time.Now()
}
- lastT = time.Now()
}
}
@@ -550,14 +564,20 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu
return err
}
key = strings.ToLower(key)
+
// parse mysql version and put into field and tag
if strings.Contains(key, "version") {
fields[key] = string(val)
tags[key] = string(val)
}
- if value, ok := m.parseValue(val); ok {
+
+ value, err := m.parseGlobalVariables(key, val)
+ if err != nil {
+ m.Log.Debugf("Error parsing global variable %q: %v", key, err)
+ } else {
fields[key] = value
}
+
// Send 20 fields at a time
if len(fields) >= 20 {
acc.AddFields("mysql_variables", fields, tags)
@@ -571,6 +591,18 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu
return nil
}
+func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{}, error) {
+ if m.MetricVersion < 2 {
+ v, ok := v1.ParseValue(value)
+ if ok {
+ return v, nil
+ }
+ return v, fmt.Errorf("could not parse value: %q", string(value))
+ } else {
+ return v2.ConvertGlobalVariables(key, value)
+ }
+}
+
// gatherSlaveStatuses can be used to get replication analytics
// When the server is slave, then it returns only one row.
// If the multi-source replication is set, then everything works differently
@@ -744,7 +776,10 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum
}
} else {
key = strings.ToLower(key)
- if value, ok := m.parseValue(val); ok {
+ value, err := v2.ConvertGlobalStatus(key, val)
+ if err != nil {
+ m.Log.Debugf("Error parsing global status: %v", err)
+ } else {
fields[key] = value
}
}
@@ -1735,6 +1770,11 @@ func getDSNTag(dsn string) string {
func init() {
inputs.Add("mysql", func() telegraf.Input {
- return &Mysql{}
+ return &Mysql{
+ PerfEventsStatementsDigestTextLimit: defaultPerfEventsStatementsDigestTextLimit,
+ PerfEventsStatementsLimit: defaultPerfEventsStatementsLimit,
+ PerfEventsStatementsTimeLimit: defaultPerfEventsStatementsTimeLimit,
+ GatherGlobalVars: defaultGatherGlobalVars,
+ }
})
}
diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go
index b4983ba0e028f..be9c338bf7b0e 100644
--- a/plugins/inputs/mysql/mysql_test.go
+++ b/plugins/inputs/mysql/mysql_test.go
@@ -26,6 +26,54 @@ func TestMysqlDefaultsToLocal(t *testing.T) {
assert.True(t, acc.HasMeasurement("mysql"))
}
+func TestMysqlMultipleInstances(t *testing.T) {
+ // Invoke Gather() from two separate configurations and
+ // confirm they don't interfere with each other
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+ testServer := "root@tcp(127.0.0.1:3306)/?tls=false"
+ m := &Mysql{
+ Servers: []string{testServer},
+ IntervalSlow: "30s",
+ }
+
+ var acc, acc2 testutil.Accumulator
+ err := m.Gather(&acc)
+ require.NoError(t, err)
+ assert.True(t, acc.HasMeasurement("mysql"))
+ // acc should have global variables
+ assert.True(t, acc.HasMeasurement("mysql_variables"))
+
+ m2 := &Mysql{
+ Servers: []string{testServer},
+ }
+ err = m2.Gather(&acc2)
+ require.NoError(t, err)
+ assert.True(t, acc2.HasMeasurement("mysql"))
+ // acc2 should not have global variables
+ assert.False(t, acc2.HasMeasurement("mysql_variables"))
+}
+
+func TestMysqlMultipleInits(t *testing.T) {
+ m := &Mysql{
+ IntervalSlow: "30s",
+ }
+ m2 := &Mysql{}
+
+ m.InitMysql()
+ assert.True(t, m.initDone)
+ assert.False(t, m2.initDone)
+ assert.Equal(t, m.scanIntervalSlow, uint32(30))
+ assert.Equal(t, m2.scanIntervalSlow, uint32(0))
+
+ m2.InitMysql()
+ assert.True(t, m.initDone)
+ assert.True(t, m2.initDone)
+ assert.Equal(t, m.scanIntervalSlow, uint32(30))
+ assert.Equal(t, m2.scanIntervalSlow, uint32(0))
+}
+
func TestMysqlGetDSNTag(t *testing.T) {
tests := []struct {
input string
diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go
new file mode 100644
index 0000000000000..a3ac3e976d6a3
--- /dev/null
+++ b/plugins/inputs/mysql/v2/convert.go
@@ -0,0 +1,103 @@
+package v2
+
+import (
+ "bytes"
+ "database/sql"
+ "fmt"
+ "strconv"
+)
+
+type ConversionFunc func(value sql.RawBytes) (interface{}, error)
+
+func ParseInt(value sql.RawBytes) (interface{}, error) {
+ v, err := strconv.ParseInt(string(value), 10, 64)
+
+ // Ignore ErrRange. When this error is set the returned value is "the
+ // maximum magnitude integer of the appropriate bitSize and sign."
+ if err, ok := err.(*strconv.NumError); ok && err.Err == strconv.ErrRange {
+ return v, nil
+ }
+
+ return v, err
+}
+
+func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) {
+ if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) {
+ return int64(1), nil
+ }
+
+ return int64(0), nil
+}
+
+func ParseGTIDMode(value sql.RawBytes) (interface{}, error) {
+ // https://dev.mysql.com/doc/refman/8.0/en/replication-mode-change-online-concepts.html
+ v := string(value)
+ switch v {
+ case "OFF":
+ return int64(0), nil
+ case "ON":
+ return int64(1), nil
+ case "OFF_PERMISSIVE":
+ return int64(0), nil
+ case "ON_PERMISSIVE":
+ return int64(1), nil
+ default:
+ return nil, fmt.Errorf("unrecognized gtid_mode: %q", v)
+ }
+}
+
+func ParseValue(value sql.RawBytes) (interface{}, error) {
+ if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 {
+ return 1, nil
+ }
+
+ if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 {
+ return 0, nil
+ }
+
+ if val, err := strconv.ParseInt(string(value), 10, 64); err == nil {
+ return val, nil
+ }
+ if val, err := strconv.ParseFloat(string(value), 64); err == nil {
+ return val, nil
+ }
+
+ if len(string(value)) > 0 {
+ return string(value), nil
+ }
+
+ return nil, fmt.Errorf("unconvertible value: %q", string(value))
+}
+
+var GlobalStatusConversions = map[string]ConversionFunc{
+ "ssl_ctx_verify_depth": ParseInt,
+ "ssl_verify_depth": ParseInt,
+}
+
+var GlobalVariableConversions = map[string]ConversionFunc{
+ "gtid_mode": ParseGTIDMode,
+}
+
+func ConvertGlobalStatus(key string, value sql.RawBytes) (interface{}, error) {
+ if bytes.Equal(value, []byte("")) {
+ return nil, nil
+ }
+
+ if conv, ok := GlobalStatusConversions[key]; ok {
+ return conv(value)
+ }
+
+ return ParseValue(value)
+}
+
+func ConvertGlobalVariables(key string, value sql.RawBytes) (interface{}, error) {
+ if bytes.Equal(value, []byte("")) {
+ return nil, nil
+ }
+
+ if conv, ok := GlobalVariableConversions[key]; ok {
+ return conv(value)
+ }
+
+ return ParseValue(value)
+}
diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go
new file mode 100644
index 0000000000000..47189c18d1576
--- /dev/null
+++ b/plugins/inputs/mysql/v2/convert_test.go
@@ -0,0 +1,86 @@
+package v2
+
+import (
+ "database/sql"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestConvertGlobalStatus(t *testing.T) {
+ tests := []struct {
+ name string
+ key string
+ value sql.RawBytes
+ expected interface{}
+ expectedErr error
+ }{
+ {
+ name: "default",
+ key: "ssl_ctx_verify_depth",
+ value: []byte("0"),
+ expected: int64(0),
+ expectedErr: nil,
+ },
+ {
+ name: "overflow int64",
+ key: "ssl_ctx_verify_depth",
+ value: []byte("18446744073709551615"),
+ expected: int64(9223372036854775807),
+ expectedErr: nil,
+ },
+ {
+ name: "defined variable but unset",
+ key: "ssl_ctx_verify_depth",
+ value: []byte(""),
+ expected: nil,
+ expectedErr: nil,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual, err := ConvertGlobalStatus(tt.key, tt.value)
+ require.Equal(t, tt.expectedErr, err)
+ require.Equal(t, tt.expected, actual)
+ })
+ }
+}
+
+func TestCovertGlobalVariables(t *testing.T) {
+ tests := []struct {
+ name string
+ key string
+ value sql.RawBytes
+ expected interface{}
+ expectedErr error
+ }{
+ {
+ name: "boolean type mysql<=5.6",
+ key: "gtid_mode",
+ value: []byte("ON"),
+ expected: int64(1),
+ expectedErr: nil,
+ },
+ {
+ name: "enum type mysql>=5.7",
+ key: "gtid_mode",
+ value: []byte("ON_PERMISSIVE"),
+ expected: int64(1),
+ expectedErr: nil,
+ },
+ {
+ name: "defined variable but unset",
+ key: "ssl_ctx_verify_depth",
+ value: []byte(""),
+ expected: nil,
+ expectedErr: nil,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual, err := ConvertGlobalVariables(tt.key, tt.value)
+ require.Equal(t, tt.expectedErr, err)
+ require.Equal(t, tt.expected, actual)
+ })
+ }
+}
diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go
index ba1cc803c6d45..1afb0046dc3a5 100644
--- a/plugins/inputs/nats/nats.go
+++ b/plugins/inputs/nats/nats.go
@@ -1,21 +1,19 @@
-// +build !freebsd
+// +build !freebsd freebsd,cgo
package nats
import (
+ "encoding/json"
"io/ioutil"
"net/http"
"net/url"
"path"
"time"
- "encoding/json"
-
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
-
- gnatsd "github.com/nats-io/gnatsd/server"
+ gnatsd "github.com/nats-io/nats-server/v2/server"
)
type Nats struct {
diff --git a/plugins/inputs/nats/nats_freebsd.go b/plugins/inputs/nats/nats_freebsd.go
index c23a6eec5ab91..08d08ba760df0 100644
--- a/plugins/inputs/nats/nats_freebsd.go
+++ b/plugins/inputs/nats/nats_freebsd.go
@@ -1,3 +1,3 @@
-// +build freebsd
+// +build freebsd,!cgo
package nats
diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go
index ef387f7e4a649..ece22288ff9af 100644
--- a/plugins/inputs/nats/nats_test.go
+++ b/plugins/inputs/nats/nats_test.go
@@ -1,4 +1,4 @@
-// +build !freebsd
+// +build !freebsd freebsd,cgo
package nats
diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md
index 8a89d90c5fe82..ae40d9185100a 100644
--- a/plugins/inputs/nats_consumer/README.md
+++ b/plugins/inputs/nats_consumer/README.md
@@ -12,13 +12,30 @@ instances of telegraf can read from a NATS cluster in parallel.
[[inputs.nats_consumer]]
## urls of NATS servers
servers = ["nats://localhost:4222"]
- ## Use Transport Layer Security
- secure = false
+
## subject(s) to consume
subjects = ["telegraf"]
+
## name a queue group
queue_group = "telegraf_consumers"
+ ## Optional credentials
+ # username = ""
+ # password = ""
+
+ ## Optional NATS 2.0 and NATS NGS compatible user credentials
+ # credentials = "/etc/telegraf/nats.creds"
+
+ ## Use Transport Layer Security
+ # secure = false
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+
## Sets the limits for pending msgs and bytes for each subscription
## These shouldn't need to be adjusted except in very high throughput scenarios
# pending_message_limit = 65536
diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go
index 4411d8c3ec89c..057c77ee795c4 100644
--- a/plugins/inputs/nats_consumer/nats_consumer.go
+++ b/plugins/inputs/nats_consumer/nats_consumer.go
@@ -3,13 +3,14 @@ package natsconsumer
import (
"context"
"fmt"
- "log"
+ "strings"
"sync"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
- nats "github.com/nats-io/go-nats"
+ "github.com/nats-io/nats.go"
)
var (
@@ -31,10 +32,17 @@ func (e natsError) Error() string {
}
type natsConsumer struct {
- QueueGroup string `toml:"queue_group"`
- Subjects []string `toml:"subjects"`
- Servers []string `toml:"servers"`
- Secure bool `toml:"secure"`
+ QueueGroup string `toml:"queue_group"`
+ Subjects []string `toml:"subjects"`
+ Servers []string `toml:"servers"`
+ Secure bool `toml:"secure"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ Credentials string `toml:"credentials"`
+
+ tls.ClientConfig
+
+ Log telegraf.Logger
// Client pending limits:
PendingMessageLimit int `toml:"pending_message_limit"`
@@ -61,13 +69,30 @@ type natsConsumer struct {
var sampleConfig = `
## urls of NATS servers
servers = ["nats://localhost:4222"]
- ## Use Transport Layer Security
- secure = false
+
## subject(s) to consume
subjects = ["telegraf"]
+
## name a queue group
queue_group = "telegraf_consumers"
+ ## Optional credentials
+ # username = ""
+ # password = ""
+
+ ## Optional NATS 2.0 and NATS NGS compatible user credentials
+ # credentials = "/etc/telegraf/nats.creds"
+
+ ## Use Transport Layer Security
+ # secure = false
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+
## Sets the limits for pending msgs and bytes for each subscription
## These shouldn't need to be adjusted except in very high throughput scenarios
# pending_message_limit = 65536
@@ -116,26 +141,37 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
var connectErr error
- // set default NATS connection options
- opts := nats.DefaultOptions
+ options := []nats.Option{
+ nats.MaxReconnects(-1),
+ nats.ErrorHandler(n.natsErrHandler),
+ }
- // override max reconnection tries
- opts.MaxReconnect = -1
+ // override authentication, if any was specified
+ if n.Username != "" && n.Password != "" {
+ options = append(options, nats.UserInfo(n.Username, n.Password))
+ }
- // override servers if any were specified
- opts.Servers = n.Servers
+ if n.Credentials != "" {
+ options = append(options, nats.UserCredentials(n.Credentials))
+ }
- opts.Secure = n.Secure
+ if n.Secure {
+ tlsConfig, err := n.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ options = append(options, nats.Secure(tlsConfig))
+ }
if n.conn == nil || n.conn.IsClosed() {
- n.conn, connectErr = opts.Connect()
+ n.conn, connectErr = nats.Connect(strings.Join(n.Servers, ","), options...)
if connectErr != nil {
return connectErr
}
// Setup message and error channels
n.errs = make(chan error)
- n.conn.SetErrorHandler(n.natsErrHandler)
n.in = make(chan *nats.Msg, 1000)
for _, subj := range n.Subjects {
@@ -145,14 +181,13 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
if err != nil {
return err
}
- // ensure that the subscription has been processed by the server
- if err = n.conn.Flush(); err != nil {
- return err
- }
+
// set the subscription pending limits
- if err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit); err != nil {
+ err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit)
+ if err != nil {
return err
}
+
n.subs = append(n.subs, sub)
}
}
@@ -167,7 +202,7 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
go n.receiver(ctx)
}()
- log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n",
+ n.Log.Infof("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v",
n.conn.ConnectedUrl(), n.Subjects, n.QueueGroup)
return nil
@@ -185,21 +220,21 @@ func (n *natsConsumer) receiver(ctx context.Context) {
case <-n.acc.Delivered():
<-sem
case err := <-n.errs:
- n.acc.AddError(err)
+ n.Log.Error(err)
case sem <- empty{}:
select {
case <-ctx.Done():
return
case err := <-n.errs:
<-sem
- n.acc.AddError(err)
+ n.Log.Error(err)
case <-n.acc.Delivered():
<-sem
<-sem
case msg := <-n.in:
metrics, err := n.parser.Parse(msg.Data)
if err != nil {
- n.acc.AddError(fmt.Errorf("subject: %s, error: %s", msg.Subject, err.Error()))
+ n.Log.Errorf("Subject: %s, error: %s", msg.Subject, err.Error())
<-sem
continue
}
@@ -213,8 +248,8 @@ func (n *natsConsumer) receiver(ctx context.Context) {
func (n *natsConsumer) clean() {
for _, sub := range n.subs {
if err := sub.Unsubscribe(); err != nil {
- n.acc.AddError(fmt.Errorf("Error unsubscribing from subject %s in queue %s: %s\n",
- sub.Subject, sub.Queue, err.Error()))
+ n.Log.Errorf("Error unsubscribing from subject %s in queue %s: %s",
+ sub.Subject, sub.Queue, err.Error())
}
}
diff --git a/plugins/inputs/neptune_apex/README.md b/plugins/inputs/neptune_apex/README.md
index 5531d3fa90faf..6fd28a16a6d21 100644
--- a/plugins/inputs/neptune_apex/README.md
+++ b/plugins/inputs/neptune_apex/README.md
@@ -59,8 +59,8 @@ programming. These tags are clearly marked in the list below and should be consi
- amp (float, Ampere) is the amount of current flowing through the 120V outlet.
- watt (float, Watt) represents the amount of energy flowing through the 120V outlet.
- xstatus (string) indicates the xstatus of an outlet. Found on wireless Vortech devices.
- - power_failed (int64, Unix epoch in ns) when the controller last lost power.
- - power_restored (int64, Unix epoch in ns) when the controller last powered on.
+ - power_failed (int64, Unix epoch in ns) when the controller last lost power. Omitted if the apex reports it as "none"
+ - power_restored (int64, Unix epoch in ns) when the controller last powered on. Omitted if the apex reports it as "none"
- serial (string, serial number)
- time:
- The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with
@@ -71,7 +71,7 @@ programming. These tags are clearly marked in the list below and should be consi
Get the max, mean, and min for the temperature in the last hour:
-```
+```sql
SELECT mean("value") FROM "neptune_apex" WHERE ("probe_type" = 'Temp') AND time >= now() - 6h GROUP BY time(20s)
```
@@ -79,7 +79,7 @@ SELECT mean("value") FROM "neptune_apex" WHERE ("probe_type" = 'Temp') AND time
#### sendRequest failure
This indicates a problem communicating with the local Apex controller. If on Mac/Linux, try curl:
-```
+```sh
$ curl apex.local/cgi-bin/status.xml
```
to isolate the problem.
diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go
index 370407a416e27..8161ac7b4880a 100644
--- a/plugins/inputs/neptune_apex/neptune_apex.go
+++ b/plugins/inputs/neptune_apex/neptune_apex.go
@@ -110,27 +110,21 @@ func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error {
err, data)
}
+ mainFields := map[string]interface{}{
+ "serial": r.Serial,
+ }
var reportTime time.Time
- var powerFailed, powerRestored int64
+
if reportTime, err = parseTime(r.Date, r.Timezone); err != nil {
return err
}
- if val, err := parseTime(r.PowerFailed, r.Timezone); err != nil {
- return err
- } else {
- powerFailed = val.UnixNano()
+ if val, err := parseTime(r.PowerFailed, r.Timezone); err == nil {
+ mainFields["power_failed"] = val.UnixNano()
}
- if val, err := parseTime(r.PowerRestored, r.Timezone); err != nil {
- return err
- } else {
- powerRestored = val.UnixNano()
+ if val, err := parseTime(r.PowerRestored, r.Timezone); err == nil {
+ mainFields["power_restored"] = val.UnixNano()
}
- mainFields := map[string]interface{}{
- "serial": r.Serial,
- "power_failed": powerFailed,
- "power_restored": powerRestored,
- }
acc.AddFields(Measurement, mainFields,
map[string]string{
"source": r.Hostname,
diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go
index 4a3cc64585f59..cefa5fad14662 100644
--- a/plugins/inputs/neptune_apex/neptune_apex_test.go
+++ b/plugins/inputs/neptune_apex/neptune_apex_test.go
@@ -226,7 +226,22 @@ func TestParseXML(t *testing.T) {
`12/22/2018 21:55:37
-8.0 a
12/22/2018 22:55:37 `),
- wantErr: true,
+ wantMetrics: []*testutil.Metric{
+ {
+ Measurement: Measurement,
+ Time: goodTime,
+ Tags: map[string]string{
+ "source": "",
+ "type": "controller",
+ "hardware": "",
+ "software": "",
+ },
+ Fields: map[string]interface{}{
+ "serial": "",
+ "power_restored": int64(1545548137000000000),
+ },
+ },
+ },
},
{
name: "Power restored time failure",
@@ -234,7 +249,22 @@ func TestParseXML(t *testing.T) {
`12/22/2018 21:55:37
-8.0 a
12/22/2018 22:55:37 `),
- wantErr: true,
+ wantMetrics: []*testutil.Metric{
+ {
+ Measurement: Measurement,
+ Time: goodTime,
+ Tags: map[string]string{
+ "source": "",
+ "type": "controller",
+ "hardware": "",
+ "software": "",
+ },
+ Fields: map[string]interface{}{
+ "serial": "",
+ "power_failed": int64(1545548137000000000),
+ },
+ },
+ },
},
{
name: "Power failed failure",
diff --git a/plugins/inputs/net/NET_README.md b/plugins/inputs/net/NET_README.md
index d9e747119da14..d2571d29e9ede 100644
--- a/plugins/inputs/net/NET_README.md
+++ b/plugins/inputs/net/NET_README.md
@@ -40,7 +40,7 @@ Different platforms gather the data above with different mechanisms. Telegraf us
Under freebsd/openbsd and darwin the plugin uses netstat.
Additionally, for the time being _only under Linux_, the plugin gathers system wide stats for different network protocols using /proc/net/snmp (tcp, udp, icmp, etc.).
-Explanation of the different metrics exposed by snmp is out of the scope of this document. The best way to find information would be tracing the constants in the Linux kernel source [here](http://lxr.free-electrons.com/source/net/ipv4/proc.c) and their usage. If /proc/net/snmp cannot be read for some reason, telegraf ignores the error silently.
+Explanation of the different metrics exposed by snmp is out of the scope of this document. The best way to find information would be tracing the constants in the Linux kernel source [here](https://elixir.bootlin.com/linux/latest/source/net/ipv4/proc.c) and their usage. If /proc/net/snmp cannot be read for some reason, telegraf ignores the error silently.
### Tags:
@@ -53,7 +53,7 @@ Under Linux the system wide protocol metrics have the interface=all tag.
You can use the following query to get the upload/download traffic rate per second for all interfaces in the last hour. The query uses the [derivative function](https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative) which calculates the rate of change between subsequent field values.
-```
+```sql
SELECT derivative(first(bytes_recv), 1s) as "download bytes/sec", derivative(first(bytes_sent), 1s) as "upload bytes/sec" FROM net WHERE time > now() - 1h AND interface != 'all' GROUP BY time(10s), interface fill(0);
```
@@ -70,4 +70,4 @@ net,interface=eth0,host=HOST bytes_sent=451838509i,bytes_recv=3284081640i,packet
$ ./telegraf --config telegraf.conf --input-filter net --test
net,interface=eth0,host=HOST bytes_sent=451838509i,bytes_recv=3284081640i,packets_sent=2663590i,packets_recv=3585442i,err_in=0i,err_out=0i,drop_in=4i,drop_out=0i 1492834180000000000
net,interface=all,host=HOST ip_reasmfails=0i,icmp_insrcquenchs=0i,icmp_outtimestamps=0i,ip_inhdrerrors=0i,ip_inunknownprotos=0i,icmp_intimeexcds=10i,icmp_outaddrmasks=0i,icmp_indestunreachs=11005i,icmpmsg_outtype0=6i,tcp_retranssegs=14669i,udplite_outdatagrams=0i,ip_reasmtimeout=0i,ip_outnoroutes=2577i,ip_inaddrerrors=186i,icmp_outaddrmaskreps=0i,tcp_incsumerrors=0i,tcp_activeopens=55965i,ip_reasmoks=0i,icmp_inechos=6i,icmp_outdestunreachs=9417i,ip_reasmreqds=0i,icmp_outtimestampreps=0i,tcp_rtoalgorithm=1i,icmpmsg_intype3=11005i,icmpmsg_outtype69=129i,tcp_outsegs=2777459i,udplite_rcvbuferrors=0i,ip_fragoks=0i,icmp_inmsgs=13398i,icmp_outerrors=0i,tcp_outrsts=14951i,udplite_noports=0i,icmp_outmsgs=11517i,icmp_outechoreps=6i,icmpmsg_intype11=10i,icmp_inparmprobs=0i,ip_forwdatagrams=0i,icmp_inechoreps=1909i,icmp_outredirects=0i,icmp_intimestampreps=0i,icmpmsg_intype5=468i,tcp_rtomax=120000i,tcp_maxconn=-1i,ip_fragcreates=0i,ip_fragfails=0i,icmp_inredirects=468i,icmp_outtimeexcds=0i,icmp_outechos=1965i,icmp_inaddrmasks=0i,tcp_inerrs=389i,tcp_rtomin=200i,ip_defaultttl=64i,ip_outrequests=3366408i,ip_forwarding=2i,udp_incsumerrors=0i,udp_indatagrams=522136i,udplite_incsumerrors=0i,ip_outdiscards=871i,icmp_inerrors=958i,icmp_outsrcquenchs=0i,icmpmsg_intype0=1909i,tcp_insegs=3580226i,udp_outdatagrams=577265i,udp_rcvbuferrors=0i,udplite_sndbuferrors=0i,icmp_incsumerrors=0i,icmp_outparmprobs=0i,icmpmsg_outtype3=9417i,tcp_attemptfails=2652i,udplite_inerrors=0i,udplite_indatagrams=0i,ip_inreceives=4172969i,icmpmsg_outtype8=1965i,tcp_currestab=59i,udp_noports=5961i,ip_indelivers=4099279i,ip_indiscards=0i,tcp_estabresets=5818i,udp_sndbuferrors=3i,icmp_intimestamps=0i,icmpmsg_intype8=6i,udp_inerrors=0i,icmp_inaddrmaskreps=0i,tcp_passiveopens=452i 1492831540000000000
-``
+```
diff --git a/plugins/inputs/net/net.go b/plugins/inputs/net/net.go
index 35d4a2448038c..f91501860e749 100644
--- a/plugins/inputs/net/net.go
+++ b/plugins/inputs/net/net.go
@@ -54,6 +54,15 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
}
}
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ return fmt.Errorf("error getting list of interfaces: %s", err)
+ }
+ interfacesByName := map[string]net.Interface{}
+ for _, iface := range interfaces {
+ interfacesByName[iface.Name] = iface
+ }
+
for _, io := range netio {
if len(s.Interfaces) != 0 {
var found bool
@@ -66,8 +75,8 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
continue
}
} else if !s.skipChecks {
- iface, err := net.InterfaceByName(io.Name)
- if err != nil {
+ iface, ok := interfacesByName[io.Name]
+ if !ok {
continue
}
diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md
index dcfb341d50dac..2c492408beef2 100644
--- a/plugins/inputs/net_response/README.md
+++ b/plugins/inputs/net_response/README.md
@@ -43,7 +43,6 @@ verify text in the response.
- result
- fields:
- response_time (float, seconds)
- - success (int) # success 0, failure 1
- result_code (int, success = 0, timeout = 1, connection_failed = 2, read_failed = 3, string_mismatch = 4)
- result_type (string) **DEPRECATED in 1.7; use result tag**
- string_found (boolean) **DEPRECATED in 1.4; use result tag**
diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go
index e411aa6474ade..3f75a6058115d 100644
--- a/plugins/inputs/net_response/net_response.go
+++ b/plugins/inputs/net_response/net_response.go
@@ -223,9 +223,6 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error {
} else {
return errors.New("Bad protocol")
}
- for key, value := range returnTags {
- tags[key] = value
- }
// Merge the tags
for k, v := range returnTags {
tags[k] = v
diff --git a/plugins/inputs/nginx/README.md b/plugins/inputs/nginx/README.md
index 7b5215dc3fdb0..bc4916507ef25 100644
--- a/plugins/inputs/nginx/README.md
+++ b/plugins/inputs/nginx/README.md
@@ -1,8 +1,8 @@
-# Telegraf Plugin: Nginx
+# Nginx Input Plugin
### Configuration:
-```
+```toml
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
## An array of Nginx stub_status URI to gather stats.
@@ -39,14 +39,14 @@
### Example Output:
Using this configuration:
-```
+```toml
[[inputs.nginx]]
## An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
```
When run with:
-```
+```sh
./telegraf --config telegraf.conf --input-filter nginx --test
```
diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go
index 1a1a115d3a1e3..4834137542039 100644
--- a/plugins/inputs/nginx/nginx.go
+++ b/plugins/inputs/nginx/nginx.go
@@ -13,7 +13,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
diff --git a/plugins/inputs/nginx_plus/README.md b/plugins/inputs/nginx_plus/README.md
index bfa9593c4ae39..cb0713ed848ff 100644
--- a/plugins/inputs/nginx_plus/README.md
+++ b/plugins/inputs/nginx_plus/README.md
@@ -1,4 +1,4 @@
-# Telegraf Plugin: nginx_plus
+# Nginx Plus Input Plugin
Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/).
@@ -7,7 +7,7 @@ Structures for Nginx Plus have been built based on history of
### Configuration:
-```
+```toml
# Read Nginx Plus' advanced status information
[[inputs.nginx_plus]]
## An array of Nginx status URIs to gather stats.
@@ -81,14 +81,14 @@ Structures for Nginx Plus have been built based on history of
### Example Output:
Using this configuration:
-```
+```toml
[[inputs.nginx_plus]]
## An array of Nginx Plus status URIs to gather stats.
urls = ["http://localhost/status"]
```
When run with:
-```
+```sh
./telegraf -config telegraf.conf -input-filter nginx_plus -test
```
diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go
index 089ba7d938c30..5b0fb2596ebf8 100644
--- a/plugins/inputs/nginx_plus/nginx_plus.go
+++ b/plugins/inputs/nginx_plus/nginx_plus.go
@@ -14,15 +14,16 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type NginxPlus struct {
- Urls []string
+ Urls []string `toml:"urls"`
+ ResponseTimeout internal.Duration `toml:"response_timeout"`
+ tls.ClientConfig
client *http.Client
-
- ResponseTimeout internal.Duration
}
var sampleConfig = `
@@ -31,6 +32,13 @@ var sampleConfig = `
# HTTP response timeout (default: 5s)
response_timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
`
func (n *NginxPlus) SampleConfig() string {
@@ -74,14 +82,20 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error {
}
func (n *NginxPlus) createHttpClient() (*http.Client, error) {
-
if n.ResponseTimeout.Duration < time.Second {
n.ResponseTimeout.Duration = time.Second * 5
}
+ tlsConfig, err := n.ClientConfig.TLSConfig()
+ if err != nil {
+ return nil, err
+ }
+
client := &http.Client{
- Transport: &http.Transport{},
- Timeout: n.ResponseTimeout.Duration,
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConfig,
+ },
+ Timeout: n.ResponseTimeout.Duration,
}
return client, nil
diff --git a/plugins/inputs/nginx_plus_api/README.md b/plugins/inputs/nginx_plus_api/README.md
index e90645e4372ae..57cb127b5dd12 100644
--- a/plugins/inputs/nginx_plus_api/README.md
+++ b/plugins/inputs/nginx_plus_api/README.md
@@ -1,10 +1,10 @@
-# Telegraf Plugin: nginx_plus_api
+# Nginx Plus API Input Plugin
Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/).
### Configuration:
-```
+```toml
# Read Nginx Plus API advanced status information
[[inputs.nginx_plus_api]]
## An array of Nginx API URIs to gather stats.
@@ -29,6 +29,24 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use
| nginx_plus_stream_upstream_peer | nginx_plus_api_stream_upstream_peers |
| nginx.stream.zone | nginx_plus_api_stream_server_zones |
+### Measurements by API version
+
+| Measurement | API version (api_version) |
+|--------------------------------------|---------------------------|
+| nginx_plus_api_processes | >= 3 |
+| nginx_plus_api_connections | >= 3 |
+| nginx_plus_api_ssl | >= 3 |
+| nginx_plus_api_http_requests | >= 3 |
+| nginx_plus_api_http_server_zones | >= 3 |
+| nginx_plus_api_http_upstreams | >= 3 |
+| nginx_plus_api_http_upstream_peers | >= 3 |
+| nginx_plus_api_http_caches | >= 3 |
+| nginx_plus_api_stream_upstreams | >= 3 |
+| nginx_plus_api_stream_upstream_peers | >= 3 |
+| nginx_plus_api_stream_server_zones | >= 3 |
+| nginx_plus_api_http_location_zones | >= 5 |
+| nginx_plus_api_resolver_zones | >= 5 |
+
### Measurements & Fields:
- nginx_plus_api_processes
@@ -129,7 +147,29 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use
- connections
- received
- sent
-
+- nginx_plus_api_location_zones
+ - requests
+ - responses_1xx
+ - responses_2xx
+ - responses_3xx
+ - responses_4xx
+ - responses_5xx
+ - responses_total
+ - received
+ - sent
+ - discarded
+- nginx_plus_api_resolver_zones
+ - name
+ - srv
+ - addr
+ - noerror
+ - formerr
+ - servfail
+ - nxdomain
+ - notimp
+ - refused
+ - timedout
+ - unknown
### Tags:
@@ -142,7 +182,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use
- source
- port
-- nginx_plus_api_http_server_zones, nginx_plus_api_upstream_server_zones
+- nginx_plus_api_http_server_zones, nginx_plus_api_upstream_server_zones, nginx_plus_api_http_location_zones, nginx_plus_api_resolver_zones
- source
- port
- zone
@@ -161,54 +201,53 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use
### Example Output:
Using this configuration:
-```
+```toml
[[inputs.nginx_plus_api]]
## An array of Nginx Plus API URIs to gather stats.
urls = ["http://localhost/api"]
```
When run with:
-```
+```sh
./telegraf -config telegraf.conf -input-filter nginx_plus_api -test
```
It produces:
```
-> nginx_plus_api_processes,host=localhost,port=80,source=localhost respawned=0i 1539163505000000000
-> nginx_plus_api_connections,host=localhost,port=80,source=localhost accepted=120890747i,active=6i,dropped=0i,idle=67i 1539163505000000000
-> nginx_plus_api_ssl,host=localhost,port=80,source=localhost handshakes=2983938i,handshakes_failed=54350i,session_reuses=2485267i 1539163506000000000
-> nginx_plus_api_http_requests,host=localhost,port=80,source=localhost current=12i,total=175270198i 1539163506000000000
-> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=hg.nginx.org discarded=45i,processing=0i,received=35723884i,requests=134102i,responses_1xx=0i,responses_2xx=96890i,responses_3xx=6892i,responses_4xx=30270i,responses_5xx=5i,responses_total=134057i,sent=3681826618i 1539163506000000000
-> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=trac.nginx.org discarded=4034i,processing=9i,received=282399663i,requests=336129i,responses_1xx=0i,responses_2xx=101264i,responses_3xx=25454i,responses_4xx=68961i,responses_5xx=136407i,responses_total=332086i,sent=2346677493i 1539163506000000000
-> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=lxr.nginx.org discarded=4i,processing=1i,received=7223569i,requests=29661i,responses_1xx=0i,responses_2xx=28584i,responses_3xx=73i,responses_4xx=390i,responses_5xx=609i,responses_total=29656i,sent=5811238975i 1539163506000000000
-> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=trac-backend keepalive=0i,zombies=0i 1539163506000000000
-> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=trac-backend,upstream_address=10.0.0.1:8080 active=0i,backup=false,downtime=53870i,fails=5i,header_time=421i,healthchecks_checks=17275i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=1885213684i,requests=88476i,response_time=423i,responses_1xx=0i,responses_2xx=50997i,responses_3xx=205i,responses_4xx=34344i,responses_5xx=2076i,responses_total=87622i,sent=189938404i,state="up",unavail=5i,weight=1i 1539163506000000000
-> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=trac-backend,upstream_address=10.0.0.1:8081 active=0i,backup=true,downtime=173957231i,fails=0i,healthchecks_checks=17394i,healthchecks_fails=17394i,healthchecks_last_passed=false,healthchecks_unhealthy=1i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="unhealthy",unavail=0i,weight=1i 1539163506000000000
-> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=hg-backend keepalive=0i,zombies=0i 1539163506000000000
-> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=hg-backend,upstream_address=10.0.0.1:8088 active=0i,backup=false,downtime=0i,fails=0i,header_time=22i,healthchecks_checks=17319i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=3724240605i,requests=89563i,response_time=44i,responses_1xx=0i,responses_2xx=81996i,responses_3xx=6886i,responses_4xx=639i,responses_5xx=5i,responses_total=89526i,sent=31597952i,state="up",unavail=0i,weight=5i 1539163506000000000
-> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=hg-backend,upstream_address=10.0.0.1:8089 active=0i,backup=true,downtime=173957231i,fails=0i,healthchecks_checks=17394i,healthchecks_fails=17394i,healthchecks_last_passed=false,healthchecks_unhealthy=1i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="unhealthy",unavail=0i,weight=1i 1539163506000000000
-> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=lxr-backend keepalive=0i,zombies=0i 1539163506000000000
-> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=lxr-backend,upstream_address=unix:/tmp/cgi.sock active=0i,backup=false,downtime=0i,fails=609i,header_time=111i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=6220215064i,requests=28278i,response_time=172i,responses_1xx=0i,responses_2xx=27665i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=27665i,sent=21337016i,state="up",unavail=0i,weight=1i 1539163506000000000
-> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=lxr-backend,upstream_address=unix:/tmp/cgib.sock active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,max_conns=42i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1539163506000000000
-> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=demo-backend keepalive=0i,zombies=0i 1539163506000000000
-> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=demo-backend,upstream_address=10.0.0.2:15431 active=0i,backup=false,downtime=0i,fails=0i,healthchecks_checks=173640i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1539163506000000000
-> nginx_plus_api_http_caches,cache=http_cache,host=localhost,port=80,source=localhost bypass_bytes=0i,bypass_bytes_written=0i,bypass_responses=0i,bypass_responses_written=0i,cold=false,expired_bytes=133671410i,expired_bytes_written=129210272i,expired_responses=15721i,expired_responses_written=15213i,hit_bytes=2459840828i,hit_responses=231195i,max_size=536870912i,miss_bytes=18742246i,miss_bytes_written=85199i,miss_responses=2816i,miss_responses_written=69i,revalidated_bytes=0i,revalidated_responses=0i,size=774144i,stale_bytes=0i,stale_responses=0i,updating_bytes=0i,updating_responses=0i 1539163506000000000
-> nginx_plus_api_stream_server_zones,host=localhost,port=80,source=localhost,zone=postgresql_loadbalancer connections=173639i,processing=0i,received=17884817i,sent=33685966i 1539163506000000000
-> nginx_plus_api_stream_server_zones,host=localhost,port=80,source=localhost,zone=dns_loadbalancer connections=97255i,processing=0i,received=2699082i,sent=16566552i 1539163506000000000
-> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=postgresql_backends zombies=0i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15432 active=0i,backup=false,connect_time=4i,connections=57880i,downtime=0i,fails=0i,first_byte_time=10i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228720i,response_time=10i,sent=5961640i,state="up",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15433 active=0i,backup=false,connect_time=3i,connections=57880i,downtime=0i,fails=0i,first_byte_time=9i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228720i,response_time=10i,sent=5961640i,state="up",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15434 active=0i,backup=false,connect_time=2i,connections=57879i,downtime=0i,fails=0i,first_byte_time=9i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228526i,response_time=9i,sent=5961537i,state="up",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=3,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15435 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=dns_udp_backends zombies=0i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.5:53 active=0i,backup=false,connect_time=0i,connections=64837i,downtime=0i,fails=0i,first_byte_time=17i,healthchecks_checks=34761i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=10996616i,response_time=17i,sent=1791693i,state="up",unavail=0i,weight=2i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.2:53 active=0i,backup=false,connect_time=0i,connections=32418i,downtime=0i,fails=0i,first_byte_time=17i,healthchecks_checks=34761i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=5569936i,response_time=17i,sent=907389i,state="up",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.7:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=unused_tcp_backends zombies=0i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=95.211.80.227:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=206.251.255.63:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=3,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=[2001:1af8:4060:a004:21::e3]:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000
-> nginx_plus_api_stream_upstream_peers,host=localhost,id=4,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=[2606:7100:1:69::3f]:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000
+> nginx_plus_api_processes,port=80,source=demo.nginx.com respawned=0i 1570696321000000000
+> nginx_plus_api_connections,port=80,source=demo.nginx.com accepted=68998606i,active=7i,dropped=0i,idle=57i 1570696322000000000
+> nginx_plus_api_ssl,port=80,source=demo.nginx.com handshakes=9398978i,handshakes_failed=289353i,session_reuses=1004389i 1570696322000000000
+> nginx_plus_api_http_requests,port=80,source=demo.nginx.com current=51i,total=264649353i 1570696322000000000
+> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=hg.nginx.org discarded=5i,processing=0i,received=24123604i,requests=60138i,responses_1xx=0i,responses_2xx=59353i,responses_3xx=531i,responses_4xx=249i,responses_5xx=0i,responses_total=60133i,sent=830165221i 1570696322000000000
+> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=trac.nginx.org discarded=250i,processing=0i,received=2184618i,requests=12404i,responses_1xx=0i,responses_2xx=8579i,responses_3xx=2513i,responses_4xx=583i,responses_5xx=479i,responses_total=12154i,sent=139384159i 1570696322000000000
+> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=lxr.nginx.org discarded=1i,processing=0i,received=1011701i,requests=4523i,responses_1xx=0i,responses_2xx=4332i,responses_3xx=28i,responses_4xx=39i,responses_5xx=123i,responses_total=4522i,sent=72631354i 1570696322000000000
+> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=trac-backend keepalive=0i,zombies=0i 1570696322000000000
+> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=trac-backend,upstream_address=10.0.0.1:8080 active=0i,backup=false,downtime=0i,fails=0i,header_time=235i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=88581178i,requests=3180i,response_time=235i,responses_1xx=0i,responses_2xx=3168i,responses_3xx=5i,responses_4xx=6i,responses_5xx=0i,responses_total=3179i,sent=1321720i,state="up",unavail=0i,weight=1i 1570696322000000000
+> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=trac-backend,upstream_address=10.0.0.1:8081 active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000
+> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=hg-backend keepalive=0i,zombies=0i 1570696322000000000
+> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=hg-backend,upstream_address=10.0.0.1:8088 active=0i,backup=false,downtime=0i,fails=0i,header_time=22i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=909402572i,requests=18514i,response_time=88i,responses_1xx=0i,responses_2xx=17799i,responses_3xx=531i,responses_4xx=179i,responses_5xx=0i,responses_total=18509i,sent=10608107i,state="up",unavail=0i,weight=5i 1570696322000000000
+> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=hg-backend,upstream_address=10.0.0.1:8089 active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000
+> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=lxr-backend keepalive=0i,zombies=0i 1570696322000000000
+> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=lxr-backend,upstream_address=unix:/tmp/cgi.sock active=0i,backup=false,downtime=0i,fails=123i,header_time=91i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=71782888i,requests=4354i,response_time=91i,responses_1xx=0i,responses_2xx=4230i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=4230i,sent=3088656i,state="up",unavail=0i,weight=1i 1570696322000000000
+> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=lxr-backend,upstream_address=unix:/tmp/cgib.sock active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,max_conns=42i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000
+> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=demo-backend keepalive=0i,zombies=0i 1570696322000000000
+> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=demo-backend,upstream_address=10.0.0.2:15431 active=0i,backup=false,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000
+> nginx_plus_api_http_caches,cache=http_cache,port=80,source=demo.nginx.com bypass_bytes=0i,bypass_bytes_written=0i,bypass_responses=0i,bypass_responses_written=0i,cold=false,expired_bytes=381518640i,expired_bytes_written=363449785i,expired_responses=42114i,expired_responses_written=39954i,hit_bytes=6321885979i,hit_responses=596730i,max_size=536870912i,miss_bytes=48512185i,miss_bytes_written=155600i,miss_responses=6052i,miss_responses_written=136i,revalidated_bytes=0i,revalidated_responses=0i,size=765952i,stale_bytes=0i,stale_responses=0i,updating_bytes=0i,updating_responses=0i 1570696323000000000
+> nginx_plus_api_stream_server_zones,port=80,source=demo.nginx.com,zone=postgresql_loadbalancer connections=0i,processing=0i,received=0i,sent=0i 1570696323000000000
+> nginx_plus_api_stream_server_zones,port=80,source=demo.nginx.com,zone=dns_loadbalancer connections=0i,processing=0i,received=0i,sent=0i 1570696323000000000
+> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=postgresql_backends zombies=0i 1570696323000000000
+> nginx_plus_api_stream_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15432 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000
+> nginx_plus_api_stream_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15433 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000
+> nginx_plus_api_stream_upstream_peers,id=2,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15434 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000
+> nginx_plus_api_stream_upstream_peers,id=3,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15435 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1570696323000000000
+> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=dns_udp_backends zombies=0i 1570696323000000000
+> nginx_plus_api_stream_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.5:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=2i 1570696323000000000
+> nginx_plus_api_stream_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.2:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000
+> nginx_plus_api_stream_upstream_peers,id=2,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.7:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1570696323000000000
+> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=unused_tcp_backends zombies=0i 1570696323000000000
+> nginx_plus_api_http_location_zones,port=80,source=demo.nginx.com,zone=swagger discarded=0i,received=1622i,requests=8i,responses_1xx=0i,responses_2xx=7i,responses_3xx=0i,responses_4xx=1i,responses_5xx=0i,responses_total=8i,sent=638333i 1570696323000000000
+> nginx_plus_api_http_location_zones,port=80,source=demo.nginx.com,zone=api-calls discarded=64i,received=337530181i,requests=1726513i,responses_1xx=0i,responses_2xx=1726428i,responses_3xx=0i,responses_4xx=21i,responses_5xx=0i,responses_total=1726449i,sent=1902577668i 1570696323000000000
+> nginx_plus_api_resolver_zones,port=80,source=demo.nginx.com,zone=resolver1 addr=0i,formerr=0i,name=0i,noerror=0i,notimp=0i,nxdomain=0i,refused=0i,servfail=0i,srv=0i,timedout=0i,unknown=0i 1570696324000000000
```
### Reference material
diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go
index d44f793f16c8c..8ec1ea0f7725f 100644
--- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go
+++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go
@@ -9,17 +9,17 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type NginxPlusApi struct {
- Urls []string
-
- ApiVersion int64
+ Urls []string `toml:"urls"`
+ ApiVersion int64 `toml:"api_version"`
+ ResponseTimeout internal.Duration `toml:"response_timeout"`
+ tls.ClientConfig
client *http.Client
-
- ResponseTimeout internal.Duration
}
const (
@@ -31,10 +31,13 @@ const (
connectionsPath = "connections"
sslPath = "ssl"
- httpRequestsPath = "http/requests"
- httpServerZonesPath = "http/server_zones"
- httpUpstreamsPath = "http/upstreams"
- httpCachesPath = "http/caches"
+ httpRequestsPath = "http/requests"
+ httpServerZonesPath = "http/server_zones"
+ httpLocationZonesPath = "http/location_zones"
+ httpUpstreamsPath = "http/upstreams"
+ httpCachesPath = "http/caches"
+
+ resolverZonesPath = "resolvers"
streamServerZonesPath = "stream/server_zones"
streamUpstreamsPath = "stream/upstreams"
@@ -49,6 +52,13 @@ var sampleConfig = `
# HTTP response timeout (default: 5s)
response_timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
`
func (n *NginxPlusApi) SampleConfig() string {
@@ -100,9 +110,16 @@ func (n *NginxPlusApi) createHttpClient() (*http.Client, error) {
n.ResponseTimeout.Duration = time.Second * 5
}
+ tlsConfig, err := n.ClientConfig.TLSConfig()
+ if err != nil {
+ return nil, err
+ }
+
client := &http.Client{
- Transport: &http.Transport{},
- Timeout: n.ResponseTimeout.Duration,
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConfig,
+ },
+ Timeout: n.ResponseTimeout.Duration,
}
return client, nil
diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go
index 5583670e4b9ac..6aaaff2d344c7 100644
--- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go
+++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go
@@ -2,6 +2,7 @@ package nginx_plus_api
import (
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"net"
@@ -13,16 +14,38 @@ import (
"github.com/influxdata/telegraf"
)
+var (
+ // errNotFound signals that the NGINX API routes does not exist.
+ errNotFound = errors.New("not found")
+)
+
func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) {
- acc.AddError(n.gatherProcessesMetrics(addr, acc))
- acc.AddError(n.gatherConnectionsMetrics(addr, acc))
- acc.AddError(n.gatherSslMetrics(addr, acc))
- acc.AddError(n.gatherHttpRequestsMetrics(addr, acc))
- acc.AddError(n.gatherHttpServerZonesMetrics(addr, acc))
- acc.AddError(n.gatherHttpUpstreamsMetrics(addr, acc))
- acc.AddError(n.gatherHttpCachesMetrics(addr, acc))
- acc.AddError(n.gatherStreamServerZonesMetrics(addr, acc))
- acc.AddError(n.gatherStreamUpstreamsMetrics(addr, acc))
+ addError(acc, n.gatherProcessesMetrics(addr, acc))
+ addError(acc, n.gatherConnectionsMetrics(addr, acc))
+ addError(acc, n.gatherSslMetrics(addr, acc))
+ addError(acc, n.gatherHttpRequestsMetrics(addr, acc))
+ addError(acc, n.gatherHttpServerZonesMetrics(addr, acc))
+ addError(acc, n.gatherHttpUpstreamsMetrics(addr, acc))
+ addError(acc, n.gatherHttpCachesMetrics(addr, acc))
+ addError(acc, n.gatherStreamServerZonesMetrics(addr, acc))
+ addError(acc, n.gatherStreamUpstreamsMetrics(addr, acc))
+
+ if n.ApiVersion >= 5 {
+ addError(acc, n.gatherHttpLocationZonesMetrics(addr, acc))
+ addError(acc, n.gatherResolverZonesMetrics(addr, acc))
+ }
+}
+
+func addError(acc telegraf.Accumulator, err error) {
+ // This plugin has hardcoded API resource paths it checks that may not
+ // be in the nginx.conf. Currently, this is to prevent logging of
+ // paths that are not configured.
+ //
+ // The correct solution is to do a GET to /api to get the available paths
+ // on the server rather than simply ignore.
+ if err != errNotFound {
+ acc.AddError(err)
+ }
}
func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) {
@@ -30,12 +53,20 @@ func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) {
resp, err := n.client.Get(url)
if err != nil {
- return nil, fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
+ return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err)
}
defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
+
+ switch resp.StatusCode {
+ case http.StatusOK:
+ case http.StatusNotFound:
+ // format as special error to catch and ignore as some nginx API
+ // features are either optional, or only available in some versions
+ return nil, errNotFound
+ default:
+ return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
}
+
contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0]
switch contentType {
case "application/json":
@@ -195,6 +226,53 @@ func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf.
return nil
}
+// Added in 5 API version
+func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
+ body, err := n.gatherUrl(addr, httpLocationZonesPath)
+ if err != nil {
+ return err
+ }
+
+ var httpLocationZones HttpLocationZones
+
+ if err := json.Unmarshal(body, &httpLocationZones); err != nil {
+ return err
+ }
+
+ tags := getTags(addr)
+
+ for zoneName, zone := range httpLocationZones {
+ zoneTags := map[string]string{}
+ for k, v := range tags {
+ zoneTags[k] = v
+ }
+ zoneTags["zone"] = zoneName
+ acc.AddFields(
+ "nginx_plus_api_http_location_zones",
+ func() map[string]interface{} {
+ result := map[string]interface{}{
+ "requests": zone.Requests,
+ "responses_1xx": zone.Responses.Responses1xx,
+ "responses_2xx": zone.Responses.Responses2xx,
+ "responses_3xx": zone.Responses.Responses3xx,
+ "responses_4xx": zone.Responses.Responses4xx,
+ "responses_5xx": zone.Responses.Responses5xx,
+ "responses_total": zone.Responses.Total,
+ "received": zone.Received,
+ "sent": zone.Sent,
+ }
+ if zone.Discarded != nil {
+ result["discarded"] = *zone.Discarded
+ }
+ return result
+ }(),
+ zoneTags,
+ )
+ }
+
+ return nil
+}
+
func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
body, err := n.gatherUrl(addr, httpUpstreamsPath)
if err != nil {
@@ -368,6 +446,50 @@ func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegra
return nil
}
+// Added in 5 API version
+func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
+ body, err := n.gatherUrl(addr, resolverZonesPath)
+ if err != nil {
+ return err
+ }
+
+ var resolverZones ResolverZones
+
+ if err := json.Unmarshal(body, &resolverZones); err != nil {
+ return err
+ }
+
+ tags := getTags(addr)
+
+ for zoneName, resolver := range resolverZones {
+ zoneTags := map[string]string{}
+ for k, v := range tags {
+ zoneTags[k] = v
+ }
+ zoneTags["zone"] = zoneName
+ acc.AddFields(
+ "nginx_plus_api_resolver_zones",
+ map[string]interface{}{
+ "name": resolver.Requests.Name,
+ "srv": resolver.Requests.Srv,
+ "addr": resolver.Requests.Addr,
+
+ "noerror": resolver.Responses.Noerror,
+ "formerr": resolver.Responses.Formerr,
+ "servfail": resolver.Responses.Servfail,
+ "nxdomain": resolver.Responses.Nxdomain,
+ "notimp": resolver.Responses.Notimp,
+ "refused": resolver.Responses.Refused,
+ "timedout": resolver.Responses.Timedout,
+ "unknown": resolver.Responses.Unknown,
+ },
+ zoneTags,
+ )
+ }
+
+ return nil
+}
+
func (n *NginxPlusApi) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
body, err := n.gatherUrl(addr, streamUpstreamsPath)
if err != nil {
diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go
index 8105f35fb28ac..f309886cff58e 100644
--- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go
+++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go
@@ -35,6 +35,45 @@ const sslPayload = `
}
`
+const resolverZonesPayload = `
+{
+ "resolver_zone1": {
+ "requests": {
+ "name": 25460,
+ "srv": 130,
+ "addr": 2580
+ },
+ "responses": {
+ "noerror": 26499,
+ "formerr": 0,
+ "servfail": 3,
+ "nxdomain": 0,
+ "notimp": 0,
+ "refused": 0,
+ "timedout": 243,
+ "unknown": 478
+ }
+ },
+ "resolver_zone2": {
+ "requests": {
+ "name": 325460,
+ "srv": 1130,
+ "addr": 12580
+ },
+ "responses": {
+ "noerror": 226499,
+ "formerr": 0,
+ "servfail": 283,
+ "nxdomain": 0,
+ "notimp": 0,
+ "refused": 0,
+ "timedout": 743,
+ "unknown": 1478
+ }
+ }
+}
+`
+
const httpRequestsPayload = `
{
"total": 10624511,
@@ -77,6 +116,39 @@ const httpServerZonesPayload = `
}
`
+const httpLocationZonesPayload = `
+{
+ "site1": {
+ "requests": 736395,
+ "responses": {
+ "1xx": 0,
+ "2xx": 727290,
+ "3xx": 4614,
+ "4xx": 934,
+ "5xx": 1535,
+ "total": 734373
+ },
+ "discarded": 2020,
+ "received": 180157219,
+ "sent": 20183175459
+ },
+ "site2": {
+ "requests": 185307,
+ "responses": {
+ "1xx": 0,
+ "2xx": 112674,
+ "3xx": 45383,
+ "4xx": 2504,
+ "5xx": 4419,
+ "total": 164980
+ },
+ "discarded": 20326,
+ "received": 51575327,
+ "sent": 2983241510
+ }
+}
+`
+
const httpUpstreamsPayload = `
{
"trac-backend": {
@@ -448,11 +520,11 @@ const streamServerZonesPayload = `
`
func TestGatherProcessesMetrics(t *testing.T) {
- ts, n := prepareEndpoint(processesPath, defaultApiVersion, processesPayload)
+ ts, n := prepareEndpoint(t, processesPath, defaultApiVersion, processesPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherProcessesMetrics(addr, &acc))
@@ -468,12 +540,12 @@ func TestGatherProcessesMetrics(t *testing.T) {
})
}
-func TestGatherConnectioinsMetrics(t *testing.T) {
- ts, n := prepareEndpoint(connectionsPath, defaultApiVersion, connectionsPayload)
+func TestGatherConnectionsMetrics(t *testing.T) {
+ ts, n := prepareEndpoint(t, connectionsPath, defaultApiVersion, connectionsPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherConnectionsMetrics(addr, &acc))
@@ -493,11 +565,11 @@ func TestGatherConnectioinsMetrics(t *testing.T) {
}
func TestGatherSslMetrics(t *testing.T) {
- ts, n := prepareEndpoint(sslPath, defaultApiVersion, sslPayload)
+ ts, n := prepareEndpoint(t, sslPath, defaultApiVersion, sslPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherSslMetrics(addr, &acc))
@@ -516,11 +588,11 @@ func TestGatherSslMetrics(t *testing.T) {
}
func TestGatherHttpRequestsMetrics(t *testing.T) {
- ts, n := prepareEndpoint(httpRequestsPath, defaultApiVersion, httpRequestsPayload)
+ ts, n := prepareEndpoint(t, httpRequestsPath, defaultApiVersion, httpRequestsPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherHttpRequestsMetrics(addr, &acc))
@@ -538,11 +610,11 @@ func TestGatherHttpRequestsMetrics(t *testing.T) {
}
func TestGatherHttpServerZonesMetrics(t *testing.T) {
- ts, n := prepareEndpoint(httpServerZonesPath, defaultApiVersion, httpServerZonesPayload)
+ ts, n := prepareEndpoint(t, httpServerZonesPath, defaultApiVersion, httpServerZonesPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherHttpServerZonesMetrics(addr, &acc))
@@ -591,12 +663,64 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) {
})
}
-func TestHatherHttpUpstreamsMetrics(t *testing.T) {
- ts, n := prepareEndpoint(httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload)
+func TestGatherHttpLocationZonesMetrics(t *testing.T) {
+ ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultApiVersion, httpLocationZonesPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
+
+ require.NoError(t, n.gatherHttpLocationZonesMetrics(addr, &acc))
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_plus_api_http_location_zones",
+ map[string]interface{}{
+ "discarded": int64(2020),
+ "received": int64(180157219),
+ "requests": int64(736395),
+ "responses_1xx": int64(0),
+ "responses_2xx": int64(727290),
+ "responses_3xx": int64(4614),
+ "responses_4xx": int64(934),
+ "responses_5xx": int64(1535),
+ "responses_total": int64(734373),
+ "sent": int64(20183175459),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "zone": "site1",
+ })
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_plus_api_http_location_zones",
+ map[string]interface{}{
+ "discarded": int64(20326),
+ "received": int64(51575327),
+ "requests": int64(185307),
+ "responses_1xx": int64(0),
+ "responses_2xx": int64(112674),
+ "responses_3xx": int64(45383),
+ "responses_4xx": int64(2504),
+ "responses_5xx": int64(4419),
+ "responses_total": int64(164980),
+ "sent": int64(2983241510),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "zone": "site2",
+ })
+}
+
+func TestGatherHttpUpstreamsMetrics(t *testing.T) {
+ ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload)
+ defer ts.Close()
+
+ var acc testutil.Accumulator
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherHttpUpstreamsMetrics(addr, &acc))
@@ -764,11 +888,11 @@ func TestHatherHttpUpstreamsMetrics(t *testing.T) {
}
func TestGatherHttpCachesMetrics(t *testing.T) {
- ts, n := prepareEndpoint(httpCachesPath, defaultApiVersion, httpCachesPayload)
+ ts, n := prepareEndpoint(t, httpCachesPath, defaultApiVersion, httpCachesPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherHttpCachesMetrics(addr, &acc))
@@ -841,12 +965,66 @@ func TestGatherHttpCachesMetrics(t *testing.T) {
})
}
+func TestGatherResolverZonesMetrics(t *testing.T) {
+ ts, n := prepareEndpoint(t, resolverZonesPath, defaultApiVersion, resolverZonesPayload)
+ defer ts.Close()
+
+ var acc testutil.Accumulator
+ addr, host, port := prepareAddr(t, ts)
+
+ require.NoError(t, n.gatherResolverZonesMetrics(addr, &acc))
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_plus_api_resolver_zones",
+ map[string]interface{}{
+ "name": int64(25460),
+ "srv": int64(130),
+ "addr": int64(2580),
+ "noerror": int64(26499),
+ "formerr": int64(0),
+ "servfail": int64(3),
+ "nxdomain": int64(0),
+ "notimp": int64(0),
+ "refused": int64(0),
+ "timedout": int64(243),
+ "unknown": int64(478),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "zone": "resolver_zone1",
+ })
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_plus_api_resolver_zones",
+ map[string]interface{}{
+ "name": int64(325460),
+ "srv": int64(1130),
+ "addr": int64(12580),
+ "noerror": int64(226499),
+ "formerr": int64(0),
+ "servfail": int64(283),
+ "nxdomain": int64(0),
+ "notimp": int64(0),
+ "refused": int64(0),
+ "timedout": int64(743),
+ "unknown": int64(1478),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "zone": "resolver_zone2",
+ })
+}
+
func TestGatherStreamUpstreams(t *testing.T) {
- ts, n := prepareEndpoint(streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload)
+ ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherStreamUpstreamsMetrics(addr, &acc))
@@ -984,12 +1162,12 @@ func TestGatherStreamUpstreams(t *testing.T) {
}
-func TestGatherStreamServerZonesMatrics(t *testing.T) {
- ts, n := prepareEndpoint(streamServerZonesPath, defaultApiVersion, streamServerZonesPayload)
+func TestGatherStreamServerZonesMetrics(t *testing.T) {
+ ts, n := prepareEndpoint(t, streamServerZonesPath, defaultApiVersion, streamServerZonesPayload)
defer ts.Close()
var acc testutil.Accumulator
- addr, host, port := prepareAddr(ts)
+ addr, host, port := prepareAddr(t, ts)
require.NoError(t, n.gatherStreamServerZonesMetrics(addr, &acc))
@@ -1024,10 +1202,92 @@ func TestGatherStreamServerZonesMatrics(t *testing.T) {
})
}
-func prepareAddr(ts *httptest.Server) (*url.URL, string, string) {
+func TestUnavailableEndpoints(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ n := &NginxPlusApi{
+ client: ts.Client(),
+ }
+
+ addr, err := url.Parse(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var acc testutil.Accumulator
+ n.gatherMetrics(addr, &acc)
+ require.NoError(t, acc.FirstError())
+}
+
+func TestServerError(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusInternalServerError)
+ }))
+ defer ts.Close()
+
+ n := &NginxPlusApi{
+ client: ts.Client(),
+ }
+
+ addr, err := url.Parse(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var acc testutil.Accumulator
+ n.gatherMetrics(addr, &acc)
+ require.Error(t, acc.FirstError())
+}
+
+func TestMalformedJSON(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprintln(w, "this is not JSON")
+ }))
+ defer ts.Close()
+
+ n := &NginxPlusApi{
+ client: ts.Client(),
+ }
+
+ addr, err := url.Parse(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var acc testutil.Accumulator
+ n.gatherMetrics(addr, &acc)
+ require.Error(t, acc.FirstError())
+}
+
+func TestUnknownContentType(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ }))
+ defer ts.Close()
+
+ n := &NginxPlusApi{
+ client: ts.Client(),
+ }
+
+ addr, err := url.Parse(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var acc testutil.Accumulator
+ n.gatherMetrics(addr, &acc)
+ require.Error(t, acc.FirstError())
+}
+
+func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) {
+ t.Helper()
addr, err := url.Parse(fmt.Sprintf("%s/api", ts.URL))
if err != nil {
- panic(err)
+ t.Fatal(err)
}
host, port, err := net.SplitHostPort(addr.Host)
@@ -1046,7 +1306,7 @@ func prepareAddr(ts *httptest.Server) (*url.URL, string, string) {
return addr, host, port
}
-func prepareEndpoint(path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) {
+func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var rsp string
@@ -1054,7 +1314,7 @@ func prepareEndpoint(path string, apiVersion int64, payload string) (*httptest.S
rsp = payload
w.Header()["Content-Type"] = []string{"application/json"}
} else {
- panic("Cannot handle request")
+ t.Errorf("unknown request path")
}
fmt.Fprintln(w, rsp)
@@ -1067,7 +1327,7 @@ func prepareEndpoint(path string, apiVersion int64, payload string) (*httptest.S
client, err := n.createHttpClient()
if err != nil {
- panic(err)
+ t.Fatal(err)
}
n.client = client
diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go
index b8240f8444b7d..868bc04e445eb 100644
--- a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go
+++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go
@@ -17,6 +17,24 @@ type Ssl struct { // added in version 6
SessionReuses int64 `json:"session_reuses"`
}
+type ResolverZones map[string]struct {
+ Requests struct {
+ Name int64 `json:"name"`
+ Srv int64 `json:"srv"`
+ Addr int64 `json:"addr"`
+ } `json:"requests"`
+ Responses struct {
+ Noerror int64 `json:"noerror"`
+ Formerr int64 `json:"formerr"`
+ Servfail int64 `json:"servfail"`
+ Nxdomain int64 `json:"nxdomain"`
+ Notimp int64 `json:"notimp"`
+ Refused int64 `json:"refused"`
+ Timedout int64 `json:"timedout"`
+ Unknown int64 `json:"unknown"`
+ } `json:"responses"`
+}
+
type HttpRequests struct {
Total int64 `json:"total"`
Current int64 `json:"current"`
@@ -40,6 +58,14 @@ type HttpServerZones map[string]struct {
Sent int64 `json:"sent"`
}
+type HttpLocationZones map[string]struct {
+ Requests int64 `json:"requests"`
+ Responses ResponseStats `json:"responses"`
+ Discarded *int64 `json:"discarded"` // added in version 6
+ Received int64 `json:"received"`
+ Sent int64 `json:"sent"`
+}
+
type HealthCheckStats struct {
Checks int64 `json:"checks"`
Fails int64 `json:"fails"`
diff --git a/plugins/inputs/nginx_sts/README.md b/plugins/inputs/nginx_sts/README.md
new file mode 100644
index 0000000000000..935bc9af83c62
--- /dev/null
+++ b/plugins/inputs/nginx_sts/README.md
@@ -0,0 +1,116 @@
+# Nginx Stream STS Input Plugin
+
+This plugin gathers Nginx status using external virtual host traffic status
+module - https://github.com/vozlt/nginx-module-sts. This is an Nginx module
+that provides access to stream host status information. It contains the current
+status such as servers, upstreams, caches. This is similar to the live activity
+monitoring of Nginx plus. For module configuration details please see its
+[documentation](https://github.com/vozlt/nginx-module-sts#synopsis).
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration
+
+```toml
+[[inputs.nginx_sts]]
+ ## An array of ngx_http_status_module or status URI to gather stats.
+ urls = ["http://localhost/status"]
+
+ ## HTTP response timeout (default: 5s)
+ response_timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+```
+
+### Metrics
+
+- nginx_sts_connections
+ - tags:
+ - source
+ - port
+ - fields:
+ - active
+ - reading
+ - writing
+ - waiting
+ - accepted
+ - handled
+ - requests
+
++ nginx_sts_server
+ - tags:
+ - source
+ - port
+ - zone
+ - fields:
+ - connects
+ - in_bytes
+ - out_bytes
+ - response_1xx_count
+ - response_2xx_count
+ - response_3xx_count
+ - response_4xx_count
+ - response_5xx_count
+ - session_msec_counter
+ - session_msec
+
+- nginx_sts_filter
+ - tags:
+ - source
+ - port
+ - filter_name
+ - filter_key
+ - fields:
+ - connects
+ - in_bytes
+ - out_bytes
+ - response_1xx_count
+ - response_2xx_count
+ - response_3xx_count
+ - response_4xx_count
+ - response_5xx_count
+ - session_msec_counter
+ - session_msec
+
++ nginx_sts_upstream
+ - tags:
+ - source
+ - port
+ - upstream
+ - upstream_address
+ - fields:
+ - connects
+ - in_bytes
+ - out_bytes
+ - response_1xx_count
+ - response_2xx_count
+ - response_3xx_count
+ - response_4xx_count
+ - response_5xx_count
+ - session_msec_counter
+ - session_msec
+ - upstream_session_msec_counter
+ - upstream_session_msec
+ - upstream_connect_msec_counter
+ - upstream_connect_msec
+ - upstream_firstbyte_msec_counter
+ - upstream_firstbyte_msec
+ - weight
+ - max_fails
+ - fail_timeout
+ - backup
+ - down
+
+### Example Output:
+
+```
+nginx_sts_upstream,host=localhost,port=80,source=127.0.0.1,upstream=backend_cluster,upstream_address=1.2.3.4:8080 upstream_connect_msec_counter=0i,out_bytes=0i,down=false,connects=0i,session_msec=0i,upstream_session_msec=0i,upstream_session_msec_counter=0i,upstream_connect_msec=0i,upstream_firstbyte_msec_counter=0i,response_3xx_count=0i,session_msec_counter=0i,weight=1i,max_fails=1i,backup=false,upstream_firstbyte_msec=0i,in_bytes=0i,response_1xx_count=0i,response_2xx_count=0i,response_4xx_count=0i,response_5xx_count=0i,fail_timeout=10i 1584699180000000000
+nginx_sts_upstream,host=localhost,port=80,source=127.0.0.1,upstream=backend_cluster,upstream_address=9.8.7.6:8080 upstream_firstbyte_msec_counter=0i,response_2xx_count=0i,down=false,upstream_session_msec_counter=0i,out_bytes=0i,response_5xx_count=0i,weight=1i,max_fails=1i,fail_timeout=10i,connects=0i,session_msec_counter=0i,upstream_session_msec=0i,in_bytes=0i,response_1xx_count=0i,response_3xx_count=0i,response_4xx_count=0i,session_msec=0i,upstream_connect_msec=0i,upstream_connect_msec_counter=0i,upstream_firstbyte_msec=0i,backup=false 1584699180000000000
+nginx_sts_server,host=localhost,port=80,source=127.0.0.1,zone=* response_2xx_count=0i,response_4xx_count=0i,response_5xx_count=0i,session_msec_counter=0i,in_bytes=0i,out_bytes=0i,session_msec=0i,response_1xx_count=0i,response_3xx_count=0i,connects=0i 1584699180000000000
+nginx_sts_connections,host=localhost,port=80,source=127.0.0.1 waiting=1i,accepted=146i,handled=146i,requests=13421i,active=3i,reading=0i,writing=2i 1584699180000000000
+```
diff --git a/plugins/inputs/nginx_sts/nginx_sts.go b/plugins/inputs/nginx_sts/nginx_sts.go
new file mode 100644
index 0000000000000..046460069c65d
--- /dev/null
+++ b/plugins/inputs/nginx_sts/nginx_sts.go
@@ -0,0 +1,304 @@
+package nginx_sts
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type NginxSTS struct {
+ Urls []string `toml:"urls"`
+ ResponseTimeout internal.Duration `toml:"response_timeout"`
+ tls.ClientConfig
+
+ client *http.Client
+}
+
+var sampleConfig = `
+ ## An array of ngx_http_status_module or status URI to gather stats.
+ urls = ["http://localhost/status"]
+
+ ## HTTP response timeout (default: 5s)
+ response_timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+`
+
+func (n *NginxSTS) SampleConfig() string {
+ return sampleConfig
+}
+
+func (n *NginxSTS) Description() string {
+ return "Read Nginx virtual host traffic status module information (nginx-module-sts)"
+}
+
+func (n *NginxSTS) Gather(acc telegraf.Accumulator) error {
+ var wg sync.WaitGroup
+
+ // Create an HTTP client that is re-used for each
+ // collection interval
+
+ if n.client == nil {
+ client, err := n.createHTTPClient()
+ if err != nil {
+ return err
+ }
+ n.client = client
+ }
+
+ for _, u := range n.Urls {
+ addr, err := url.Parse(u)
+ if err != nil {
+ acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
+ continue
+ }
+
+ wg.Add(1)
+ go func(addr *url.URL) {
+ defer wg.Done()
+ acc.AddError(n.gatherURL(addr, acc))
+ }(addr)
+ }
+
+ wg.Wait()
+ return nil
+}
+
+func (n *NginxSTS) createHTTPClient() (*http.Client, error) {
+ if n.ResponseTimeout.Duration < time.Second {
+ n.ResponseTimeout.Duration = time.Second * 5
+ }
+
+ tlsConfig, err := n.ClientConfig.TLSConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConfig,
+ },
+ Timeout: n.ResponseTimeout.Duration,
+ }
+
+ return client, nil
+}
+
+func (n *NginxSTS) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
+ resp, err := n.client.Get(addr.String())
+ if err != nil {
+ return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
+ }
+ contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0]
+ switch contentType {
+ case "application/json":
+ return gatherStatusURL(bufio.NewReader(resp.Body), getTags(addr), acc)
+ default:
+ return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType)
+ }
+}
+
+type NginxSTSResponse struct {
+ Connections struct {
+ Active uint64 `json:"active"`
+ Reading uint64 `json:"reading"`
+ Writing uint64 `json:"writing"`
+ Waiting uint64 `json:"waiting"`
+ Accepted uint64 `json:"accepted"`
+ Handled uint64 `json:"handled"`
+ Requests uint64 `json:"requests"`
+ } `json:"connections"`
+ Hostname string `json:"hostName"`
+ StreamFilterZones map[string]map[string]Server `json:"streamFilterZones"`
+ StreamServerZones map[string]Server `json:"streamServerZones"`
+ StreamUpstreamZones map[string][]Upstream `json:"streamUpstreamZones"`
+}
+
+type Server struct {
+ ConnectCounter uint64 `json:"connectCounter"`
+ InBytes uint64 `json:"inBytes"`
+ OutBytes uint64 `json:"outBytes"`
+ SessionMsecCounter uint64 `json:"sessionMsecCounter"`
+ SessionMsec uint64 `json:"sessionMsec"`
+ Responses struct {
+ OneXx uint64 `json:"1xx"`
+ TwoXx uint64 `json:"2xx"`
+ ThreeXx uint64 `json:"3xx"`
+ FourXx uint64 `json:"4xx"`
+ FiveXx uint64 `json:"5xx"`
+ } `json:"responses"`
+}
+
+type Upstream struct {
+ Server string `json:"server"`
+ ConnectCounter uint64 `json:"connectCounter"`
+ InBytes uint64 `json:"inBytes"`
+ OutBytes uint64 `json:"outBytes"`
+ Responses struct {
+ OneXx uint64 `json:"1xx"`
+ TwoXx uint64 `json:"2xx"`
+ ThreeXx uint64 `json:"3xx"`
+ FourXx uint64 `json:"4xx"`
+ FiveXx uint64 `json:"5xx"`
+ } `json:"responses"`
+ SessionMsecCounter uint64 `json:"sessionMsecCounter"`
+ SessionMsec uint64 `json:"sessionMsec"`
+ USessionMsecCounter uint64 `json:"uSessionMsecCounter"`
+ USessionMsec uint64 `json:"uSessionMsec"`
+ UConnectMsecCounter uint64 `json:"uConnectMsecCounter"`
+ UConnectMsec uint64 `json:"uConnectMsec"`
+ UFirstByteMsecCounter uint64 `json:"uFirstByteMsecCounter"`
+ UFirstByteMsec uint64 `json:"uFirstByteMsec"`
+ Weight uint64 `json:"weight"`
+ MaxFails uint64 `json:"maxFails"`
+ FailTimeout uint64 `json:"failTimeout"`
+ Backup bool `json:"backup"`
+ Down bool `json:"down"`
+}
+
+func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error {
+ dec := json.NewDecoder(r)
+ status := &NginxSTSResponse{}
+ if err := dec.Decode(status); err != nil {
+ return fmt.Errorf("Error while decoding JSON response")
+ }
+
+ acc.AddFields("nginx_sts_connections", map[string]interface{}{
+ "active": status.Connections.Active,
+ "reading": status.Connections.Reading,
+ "writing": status.Connections.Writing,
+ "waiting": status.Connections.Waiting,
+ "accepted": status.Connections.Accepted,
+ "handled": status.Connections.Handled,
+ "requests": status.Connections.Requests,
+ }, tags)
+
+ for zoneName, zone := range status.StreamServerZones {
+ zoneTags := map[string]string{}
+ for k, v := range tags {
+ zoneTags[k] = v
+ }
+ zoneTags["zone"] = zoneName
+
+ acc.AddFields("nginx_sts_server", map[string]interface{}{
+ "connects": zone.ConnectCounter,
+ "in_bytes": zone.InBytes,
+ "out_bytes": zone.OutBytes,
+ "session_msec_counter": zone.SessionMsecCounter,
+ "session_msec": zone.SessionMsec,
+
+ "response_1xx_count": zone.Responses.OneXx,
+ "response_2xx_count": zone.Responses.TwoXx,
+ "response_3xx_count": zone.Responses.ThreeXx,
+ "response_4xx_count": zone.Responses.FourXx,
+ "response_5xx_count": zone.Responses.FiveXx,
+ }, zoneTags)
+ }
+
+ for filterName, filters := range status.StreamFilterZones {
+ for filterKey, upstream := range filters {
+ filterTags := map[string]string{}
+ for k, v := range tags {
+ filterTags[k] = v
+ }
+ filterTags["filter_key"] = filterKey
+ filterTags["filter_name"] = filterName
+
+ acc.AddFields("nginx_sts_filter", map[string]interface{}{
+ "connects": upstream.ConnectCounter,
+ "in_bytes": upstream.InBytes,
+ "out_bytes": upstream.OutBytes,
+ "session_msec_counter": upstream.SessionMsecCounter,
+ "session_msec": upstream.SessionMsec,
+
+ "response_1xx_count": upstream.Responses.OneXx,
+ "response_2xx_count": upstream.Responses.TwoXx,
+ "response_3xx_count": upstream.Responses.ThreeXx,
+ "response_4xx_count": upstream.Responses.FourXx,
+ "response_5xx_count": upstream.Responses.FiveXx,
+ }, filterTags)
+ }
+ }
+
+ for upstreamName, upstreams := range status.StreamUpstreamZones {
+ for _, upstream := range upstreams {
+ upstreamServerTags := map[string]string{}
+ for k, v := range tags {
+ upstreamServerTags[k] = v
+ }
+ upstreamServerTags["upstream"] = upstreamName
+ upstreamServerTags["upstream_address"] = upstream.Server
+ acc.AddFields("nginx_sts_upstream", map[string]interface{}{
+ "connects": upstream.ConnectCounter,
+ "session_msec": upstream.SessionMsec,
+ "session_msec_counter": upstream.SessionMsecCounter,
+ "upstream_session_msec": upstream.USessionMsec,
+ "upstream_session_msec_counter": upstream.USessionMsecCounter,
+ "upstream_connect_msec": upstream.UConnectMsec,
+ "upstream_connect_msec_counter": upstream.UConnectMsecCounter,
+ "upstream_firstbyte_msec": upstream.UFirstByteMsec,
+ "upstream_firstbyte_msec_counter": upstream.UFirstByteMsecCounter,
+ "in_bytes": upstream.InBytes,
+ "out_bytes": upstream.OutBytes,
+
+ "response_1xx_count": upstream.Responses.OneXx,
+ "response_2xx_count": upstream.Responses.TwoXx,
+ "response_3xx_count": upstream.Responses.ThreeXx,
+ "response_4xx_count": upstream.Responses.FourXx,
+ "response_5xx_count": upstream.Responses.FiveXx,
+
+ "weight": upstream.Weight,
+ "max_fails": upstream.MaxFails,
+ "fail_timeout": upstream.FailTimeout,
+ "backup": upstream.Backup,
+ "down": upstream.Down,
+ }, upstreamServerTags)
+ }
+ }
+
+ return nil
+}
+
+// Get tag(s) for the nginx plugin
+func getTags(addr *url.URL) map[string]string {
+ h := addr.Host
+ host, port, err := net.SplitHostPort(h)
+ if err != nil {
+ host = addr.Host
+ if addr.Scheme == "http" {
+ port = "80"
+ } else if addr.Scheme == "https" {
+ port = "443"
+ } else {
+ port = ""
+ }
+ }
+ return map[string]string{"source": host, "port": port}
+}
+
+func init() {
+ inputs.Add("nginx_sts", func() telegraf.Input {
+ return &NginxSTS{}
+ })
+}
diff --git a/plugins/inputs/nginx_sts/nginx_sts_test.go b/plugins/inputs/nginx_sts/nginx_sts_test.go
new file mode 100644
index 0000000000000..18081eadf7f43
--- /dev/null
+++ b/plugins/inputs/nginx_sts/nginx_sts_test.go
@@ -0,0 +1,398 @@
+package nginx_sts
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+const sampleStatusResponse = `
+{
+ "hostName": "test.example.com",
+ "nginxVersion": "1.12.2",
+ "loadMsec": 1518180328331,
+ "nowMsec": 1518256058416,
+ "connections": {
+ "active": 111,
+ "reading": 222,
+ "writing": 333,
+ "waiting": 444,
+ "accepted": 555,
+ "handled": 666,
+ "requests": 777
+ },
+ "streamServerZones": {
+ "example.com": {
+ "connectCounter": 1415887,
+ "inBytes": 1296356607,
+ "outBytes": 4404939605,
+ "responses": {
+ "1xx": 100,
+ "2xx": 200,
+ "3xx": 300,
+ "4xx": 400,
+ "5xx": 500
+ },
+ "sessionMsecCounter": 13,
+ "sessionMsec": 14
+ },
+ "other.example.com": {
+ "connectCounter": 505,
+ "inBytes": 171388,
+ "outBytes": 1273382,
+ "responses": {
+ "1xx": 101,
+ "2xx": 201,
+ "3xx": 301,
+ "4xx": 401,
+ "5xx": 501
+ },
+ "sessionMsecCounter": 12,
+ "sessionMsec": 15
+ }
+ },
+ "streamFilterZones": {
+ "country": {
+ "FI": {
+ "connectCounter": 60,
+ "inBytes": 2570,
+ "outBytes": 53597,
+ "responses": {
+ "1xx": 106,
+ "2xx": 206,
+ "3xx": 306,
+ "4xx": 406,
+ "5xx": 506
+ },
+ "sessionMsecCounter": 12,
+ "sessionMsec": 15
+ }
+ }
+ },
+ "streamUpstreamZones": {
+ "backend_cluster": [
+ {
+ "server": "127.0.0.1:6000",
+ "connectCounter": 2103849,
+ "inBytes": 1774680141,
+ "outBytes": 11727669190,
+ "responses": {
+ "1xx": 103,
+ "2xx": 203,
+ "3xx": 303,
+ "4xx": 403,
+ "5xx": 503
+ },
+ "sessionMsecCounter": 31,
+ "sessionMsec": 131,
+ "uSessionMsecCounter": 32,
+ "uSessionMsec": 132,
+ "uConnectMsecCounter": 33,
+ "uConnectMsec": 130,
+ "uFirstByteMsecCounter": 34,
+ "uFirstByteMsec": 129,
+ "weight": 32,
+ "maxFails": 33,
+ "failTimeout": 34,
+ "backup": false,
+ "down": false
+ }
+ ],
+ "::nogroups": [
+ {
+ "server": "127.0.0.1:4433",
+ "connectCounter": 8,
+ "inBytes": 5013,
+ "outBytes": 487585,
+ "responses": {
+ "1xx": 104,
+ "2xx": 204,
+ "3xx": 304,
+ "4xx": 404,
+ "5xx": 504
+ },
+ "sessionMsecCounter": 31,
+ "sessionMsec": 131,
+ "uSessionMsecCounter": 32,
+ "uSessionMsec": 132,
+ "uConnectMsecCounter": 33,
+ "uConnectMsec": 130,
+ "uFirstByteMsecCounter": 34,
+ "uFirstByteMsec": 129,
+ "weight": 36,
+ "maxFails": 37,
+ "failTimeout": 38,
+ "backup": true,
+ "down": false
+ },
+ {
+ "server": "127.0.0.1:8080",
+ "connectCounter": 7,
+ "inBytes": 2926,
+ "outBytes": 3846638,
+ "responses": {
+ "1xx": 105,
+ "2xx": 205,
+ "3xx": 305,
+ "4xx": 405,
+ "5xx": 505
+ },
+ "sessionMsecCounter": 31,
+ "sessionMsec": 131,
+ "uSessionMsecCounter": 32,
+ "uSessionMsec": 132,
+ "uConnectMsecCounter": 33,
+ "uConnectMsec": 130,
+ "uFirstByteMsecCounter": 34,
+ "uFirstByteMsec": 129,
+ "weight": 41,
+ "maxFails": 42,
+ "failTimeout": 43,
+ "backup": true,
+ "down": true
+ }
+ ]
+ }
+}
+`
+
+func TestNginxPlusGeneratesMetrics(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var rsp string
+
+ if r.URL.Path == "/status" {
+ rsp = sampleStatusResponse
+ w.Header()["Content-Type"] = []string{"application/json"}
+ } else {
+ panic("Cannot handle request")
+ }
+
+ fmt.Fprintln(w, rsp)
+ }))
+ defer ts.Close()
+
+ n := &NginxSTS{
+ Urls: []string{fmt.Sprintf("%s/status", ts.URL)},
+ }
+
+ var acc testutil.Accumulator
+
+ err := n.Gather(&acc)
+
+ require.NoError(t, err)
+
+ addr, err := url.Parse(ts.URL)
+ if err != nil {
+ panic(err)
+ }
+
+ host, port, err := net.SplitHostPort(addr.Host)
+ if err != nil {
+ host = addr.Host
+ if addr.Scheme == "http" {
+ port = "80"
+ } else if addr.Scheme == "https" {
+ port = "443"
+ } else {
+ port = ""
+ }
+ }
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_sts_connections",
+ map[string]interface{}{
+ "accepted": uint64(555),
+ "active": uint64(111),
+ "handled": uint64(666),
+ "reading": uint64(222),
+ "requests": uint64(777),
+ "waiting": uint64(444),
+ "writing": uint64(333),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ })
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_sts_server",
+ map[string]interface{}{
+ "connects": uint64(1415887),
+ "in_bytes": uint64(1296356607),
+ "out_bytes": uint64(4404939605),
+ "session_msec_counter": uint64(13),
+ "session_msec": uint64(14),
+
+ "response_1xx_count": uint64(100),
+ "response_2xx_count": uint64(200),
+ "response_3xx_count": uint64(300),
+ "response_4xx_count": uint64(400),
+ "response_5xx_count": uint64(500),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "zone": "example.com",
+ })
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_sts_filter",
+ map[string]interface{}{
+ "connects": uint64(60),
+ "in_bytes": uint64(2570),
+ "out_bytes": uint64(53597),
+ "session_msec_counter": uint64(12),
+ "session_msec": uint64(15),
+
+ "response_1xx_count": uint64(106),
+ "response_2xx_count": uint64(206),
+ "response_3xx_count": uint64(306),
+ "response_4xx_count": uint64(406),
+ "response_5xx_count": uint64(506),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "filter_key": "FI",
+ "filter_name": "country",
+ })
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_sts_server",
+ map[string]interface{}{
+ "connects": uint64(505),
+ "in_bytes": uint64(171388),
+ "out_bytes": uint64(1273382),
+ "session_msec_counter": uint64(12),
+ "session_msec": uint64(15),
+
+ "response_1xx_count": uint64(101),
+ "response_2xx_count": uint64(201),
+ "response_3xx_count": uint64(301),
+ "response_4xx_count": uint64(401),
+ "response_5xx_count": uint64(501),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "zone": "other.example.com",
+ })
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_sts_upstream",
+ map[string]interface{}{
+ "connects": uint64(2103849),
+ "in_bytes": uint64(1774680141),
+ "out_bytes": uint64(11727669190),
+
+ "response_1xx_count": uint64(103),
+ "response_2xx_count": uint64(203),
+ "response_3xx_count": uint64(303),
+ "response_4xx_count": uint64(403),
+ "response_5xx_count": uint64(503),
+
+ "session_msec_counter": uint64(31),
+ "session_msec": uint64(131),
+ "upstream_session_msec_counter": uint64(32),
+ "upstream_session_msec": uint64(132),
+ "upstream_connect_msec_counter": uint64(33),
+ "upstream_connect_msec": uint64(130),
+ "upstream_firstbyte_msec_counter": uint64(34),
+ "upstream_firstbyte_msec": uint64(129),
+
+ "weight": uint64(32),
+ "max_fails": uint64(33),
+ "fail_timeout": uint64(34),
+ "backup": bool(false),
+ "down": bool(false),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "upstream": "backend_cluster",
+ "upstream_address": "127.0.0.1:6000",
+ })
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_sts_upstream",
+ map[string]interface{}{
+ "connects": uint64(8),
+ "in_bytes": uint64(5013),
+ "out_bytes": uint64(487585),
+
+ "response_1xx_count": uint64(104),
+ "response_2xx_count": uint64(204),
+ "response_3xx_count": uint64(304),
+ "response_4xx_count": uint64(404),
+ "response_5xx_count": uint64(504),
+
+ "session_msec_counter": uint64(31),
+ "session_msec": uint64(131),
+ "upstream_session_msec_counter": uint64(32),
+ "upstream_session_msec": uint64(132),
+ "upstream_connect_msec_counter": uint64(33),
+ "upstream_connect_msec": uint64(130),
+ "upstream_firstbyte_msec_counter": uint64(34),
+ "upstream_firstbyte_msec": uint64(129),
+
+ "weight": uint64(36),
+ "max_fails": uint64(37),
+ "fail_timeout": uint64(38),
+ "backup": bool(true),
+ "down": bool(false),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "upstream": "::nogroups",
+ "upstream_address": "127.0.0.1:4433",
+ })
+
+ acc.AssertContainsTaggedFields(
+ t,
+ "nginx_sts_upstream",
+ map[string]interface{}{
+ "connects": uint64(7),
+ "in_bytes": uint64(2926),
+ "out_bytes": uint64(3846638),
+
+ "response_1xx_count": uint64(105),
+ "response_2xx_count": uint64(205),
+ "response_3xx_count": uint64(305),
+ "response_4xx_count": uint64(405),
+ "response_5xx_count": uint64(505),
+
+ "session_msec_counter": uint64(31),
+ "session_msec": uint64(131),
+ "upstream_session_msec_counter": uint64(32),
+ "upstream_session_msec": uint64(132),
+ "upstream_connect_msec_counter": uint64(33),
+ "upstream_connect_msec": uint64(130),
+ "upstream_firstbyte_msec_counter": uint64(34),
+ "upstream_firstbyte_msec": uint64(129),
+
+ "weight": uint64(41),
+ "max_fails": uint64(42),
+ "fail_timeout": uint64(43),
+ "backup": bool(true),
+ "down": bool(true),
+ },
+ map[string]string{
+ "source": host,
+ "port": port,
+ "upstream": "::nogroups",
+ "upstream_address": "127.0.0.1:8080",
+ })
+}
diff --git a/plugins/inputs/nginx_upstream_check/README.md b/plugins/inputs/nginx_upstream_check/README.md
index 4ff76889dc9d4..58bee07be931d 100644
--- a/plugins/inputs/nginx_upstream_check/README.md
+++ b/plugins/inputs/nginx_upstream_check/README.md
@@ -1,4 +1,4 @@
-# Telegraf Plugin: Nginx_upstream_check
+# Nginx Upstream Check Input Plugin
Read the status output of the nginx_upstream_check (https://github.com/yaoweibin/nginx_upstream_check_module).
This module can periodically check the servers in the Nginx's upstream with configured request and interval to determine
@@ -10,7 +10,7 @@ checks. This information can be exported in JSON format and parsed by this input
### Configuration:
-```
+```toml
## An URL where Nginx Upstream check module is enabled
## It should be set to return a JSON formatted response
url = "http://127.0.0.1/status?format=json"
@@ -63,7 +63,7 @@ state of every server and, possible, add some monitoring to watch over it. Influ
### Example Output:
When run with:
-```
+```sh
./telegraf --config telegraf.conf --input-filter nginx_upstream_check --test
```
diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go
index e5a2e096d79b7..0fe2907c9a08a 100644
--- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go
+++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go
@@ -2,14 +2,18 @@ package nginx_upstream_check
import (
"encoding/json"
- "github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
- "github.com/influxdata/telegraf/plugins/inputs"
+ "fmt"
+ "io"
+ "io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
const sampleConfig = `
@@ -44,7 +48,7 @@ const sampleConfig = `
const description = "Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)"
type NginxUpstreamCheck struct {
- URL string `toml:"uls"`
+ URL string `toml:"url"`
Username string `toml:"username"`
Password string `toml:"password"`
@@ -148,6 +152,11 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e
}
defer response.Body.Close()
+ if response.StatusCode != http.StatusOK {
+ // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
+ body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200))
+ return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body)
+ }
err = json.NewDecoder(response.Body).Decode(value)
if err != nil {
diff --git a/plugins/inputs/nginx_vts/README.md b/plugins/inputs/nginx_vts/README.md
index ac22b7c2dbdc0..fe9e7fd6ea62f 100644
--- a/plugins/inputs/nginx_vts/README.md
+++ b/plugins/inputs/nginx_vts/README.md
@@ -1,11 +1,11 @@
-# Telegraf Plugin: nginx_vts
+# Nginx Virtual Host Traffic (VTS) Input Plugin
This plugin gathers Nginx status using external virtual host traffic status module - https://github.com/vozlt/nginx-module-vts. This is an Nginx module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus.
For module configuration details please see its [documentation](https://github.com/vozlt/nginx-module-vts#synopsis).
### Configuration:
-```
+```toml
# Read nginx status information using nginx-module-vts module
[[inputs.nginx_vts]]
## An array of Nginx status URIs to gather stats.
@@ -99,14 +99,14 @@ For module configuration details please see its [documentation](https://github.c
### Example Output:
Using this configuration:
-```
+```toml
[[inputs.nginx_vts]]
## An array of Nginx status URIs to gather stats.
urls = ["http://localhost/status"]
```
When run with:
-```
+```sh
./telegraf -config telegraf.conf -input-filter nginx_vts -test
```
diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go
index 66a16e6c19b0a..57453c0b4e3b0 100644
--- a/plugins/inputs/nginx_vts/nginx_vts.go
+++ b/plugins/inputs/nginx_vts/nginx_vts.go
@@ -13,15 +13,16 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type NginxVTS struct {
- Urls []string
+ Urls []string `toml:"urls"`
+ ResponseTimeout internal.Duration `toml:"response_timeout"`
+ tls.ClientConfig
client *http.Client
-
- ResponseTimeout internal.Duration
}
var sampleConfig = `
@@ -30,6 +31,13 @@ var sampleConfig = `
## HTTP response timeout (default: 5s)
response_timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
`
func (n *NginxVTS) SampleConfig() string {
@@ -77,9 +85,16 @@ func (n *NginxVTS) createHTTPClient() (*http.Client, error) {
n.ResponseTimeout.Duration = time.Second * 5
}
+ tlsConfig, err := n.ClientConfig.TLSConfig()
+ if err != nil {
+ return nil, err
+ }
+
client := &http.Client{
- Transport: &http.Transport{},
- Timeout: n.ResponseTimeout.Duration,
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConfig,
+ },
+ Timeout: n.ResponseTimeout.Duration,
}
return client, nil
diff --git a/plugins/inputs/nsd/README.md b/plugins/inputs/nsd/README.md
new file mode 100644
index 0000000000000..2d7f8833c2db8
--- /dev/null
+++ b/plugins/inputs/nsd/README.md
@@ -0,0 +1,176 @@
+# NSD Input Plugin
+
+This plugin gathers stats from
+[NSD](https://www.nlnetlabs.nl/projects/nsd/about) - an authoritative DNS name
+server.
+
+### Configuration:
+
+```toml
+# A plugin to collect stats from the NSD DNS resolver
+[[inputs.nsd]]
+ ## Address of server to connect to, optionally ':port'. Defaults to the
+ ## address in the nsd config file.
+ server = "127.0.0.1:8953"
+
+ ## If running as a restricted user you can prepend sudo for additional access:
+ # use_sudo = false
+
+ ## The default location of the nsd-control binary can be overridden with:
+ # binary = "/usr/sbin/nsd-control"
+
+ ## The default location of the nsd config file can be overridden with:
+ # config_file = "/etc/nsd/nsd.conf"
+
+ ## The default timeout of 1s can be overridden with:
+ # timeout = "1s"
+```
+
+#### Permissions:
+
+It's important to note that this plugin references nsd-control, which may
+require additional permissions to execute successfully. Depending on the
+user/group permissions of the telegraf user executing this plugin, you may
+need to alter the group membership, set facls, or use sudo.
+
+**Group membership (Recommended)**:
+```bash
+$ groups telegraf
+telegraf : telegraf
+
+$ usermod -a -G nsd telegraf
+
+$ groups telegraf
+telegraf : telegraf nsd
+```
+
+**Sudo privileges**:
+If you use this method, you will need the following in your telegraf config:
+```toml
+[[inputs.nsd]]
+ use_sudo = true
+```
+
+You will also need to update your sudoers file:
+```bash
+$ visudo
+# Add the following line:
+Cmnd_Alias NSDCONTROLCTL = /usr/sbin/nsd-control
+telegraf ALL=(ALL) NOPASSWD: NSDCONTROLCTL
+Defaults!NSDCONTROLCTL !logfile, !syslog, !pam_session
+```
+
+Please use the solution you see as most appropriate.
+
+### Metrics:
+
+This is the full list of stats provided by nsd-control. In the output, the
+dots in the nsd-control stat name are replaced by underscores (see
+https://www.nlnetlabs.nl/documentation/nsd/nsd-control/ for details).
+
+- nsd
+ - fields:
+ - num_queries
+ - time_boot
+ - time_elapsed
+ - size_db_disk
+ - size_db_mem
+ - size_xfrd_mem
+ - size_config_disk
+ - size_config_mem
+ - num_type_TYPE0
+ - num_type_A
+ - num_type_NS
+ - num_type_MD
+ - num_type_MF
+ - num_type_CNAME
+ - num_type_SOA
+ - num_type_MB
+ - num_type_MG
+ - num_type_MR
+ - num_type_NULL
+ - num_type_WKS
+ - num_type_PTR
+ - num_type_HINFO
+ - num_type_MINFO
+ - num_type_MX
+ - num_type_TXT
+ - num_type_RP
+ - num_type_AFSDB
+ - num_type_X25
+ - num_type_ISDN
+ - num_type_RT
+ - num_type_NSAP
+ - num_type_SIG
+ - num_type_KEY
+ - num_type_PX
+ - num_type_AAAA
+ - num_type_LOC
+ - num_type_NXT
+ - num_type_SRV
+ - num_type_NAPTR
+ - num_type_KX
+ - num_type_CERT
+ - num_type_DNAME
+ - num_type_OPT
+ - num_type_APL
+ - num_type_DS
+ - num_type_SSHFP
+ - num_type_IPSECKEY
+ - num_type_RRSIG
+ - num_type_NSEC
+ - num_type_DNSKEY
+ - num_type_DHCID
+ - num_type_NSEC3
+ - num_type_NSEC3PARAM
+ - num_type_TLSA
+ - num_type_SMIMEA
+ - num_type_CDS
+ - num_type_CDNSKEY
+ - num_type_OPENPGPKEY
+ - num_type_CSYNC
+ - num_type_SPF
+ - num_type_NID
+ - num_type_L32
+ - num_type_L64
+ - num_type_LP
+ - num_type_EUI48
+ - num_type_EUI64
+ - num_type_TYPE252
+ - num_type_TYPE253
+ - num_type_TYPE255
+ - num_opcode_QUERY
+ - num_opcode_NOTIFY
+ - num_class_CLASS0
+ - num_class_IN
+ - num_class_CH
+ - num_rcode_NOERROR
+ - num_rcode_FORMERR
+ - num_rcode_SERVFAIL
+ - num_rcode_NXDOMAIN
+ - num_rcode_NOTIMP
+ - num_rcode_REFUSED
+ - num_rcode_YXDOMAIN
+ - num_rcode_NOTAUTH
+ - num_edns
+ - num_ednserr
+ - num_udp
+ - num_udp6
+ - num_tcp
+ - num_tcp6
+ - num_tls
+ - num_tls6
+ - num_answer_wo_aa
+ - num_rxerr
+ - num_txerr
+ - num_raxfr
+ - num_truncated
+ - num_dropped
+ - zone_master
+ - zone_slave
+
+- nsd_servers
+ - tags:
+ - server
+ - fields:
+ - queries
diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go
new file mode 100644
index 0000000000000..3c5d2695dcb33
--- /dev/null
+++ b/plugins/inputs/nsd/nsd.go
@@ -0,0 +1,167 @@
+package nsd
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "net"
+ "os/exec"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error)
+
+// NSD is used to store configuration values
+type NSD struct {
+ Binary string
+ Timeout internal.Duration
+ UseSudo bool
+ Server string
+ ConfigFile string
+
+ filter filter.Filter
+ run runner
+}
+
+var defaultBinary = "/usr/sbin/nsd-control"
+var defaultTimeout = internal.Duration{Duration: time.Second}
+
+var sampleConfig = `
+ ## Address of server to connect to, optionally ':port'. Defaults to the
+ ## address in the nsd config file.
+ server = "127.0.0.1:8953"
+
+ ## If running as a restricted user you can prepend sudo for additional access:
+ # use_sudo = false
+
+ ## The default location of the nsd-control binary can be overridden with:
+ # binary = "/usr/sbin/nsd-control"
+
+ ## The default location of the nsd config file can be overridden with:
+ # config_file = "/etc/nsd/nsd.conf"
+
+ ## The default timeout of 1s can be overridden with:
+ # timeout = "1s"
+`
+
+// Description displays what this plugin is about
+func (s *NSD) Description() string {
+ return "A plugin to collect stats from the NSD authoritative DNS name server"
+}
+
+// SampleConfig displays configuration instructions
+func (s *NSD) SampleConfig() string {
+ return sampleConfig
+}
+
+// Shell out to nsd_stat and return the output
+func nsdRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) {
+ cmdArgs := []string{"stats_noreset"}
+
+ if Server != "" {
+ host, port, err := net.SplitHostPort(Server)
+ if err == nil {
+ Server = host + "@" + port
+ }
+
+ cmdArgs = append([]string{"-s", Server}, cmdArgs...)
+ }
+
+ if ConfigFile != "" {
+ cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...)
+ }
+
+ cmd := exec.Command(cmdName, cmdArgs...)
+
+ if UseSudo {
+ cmdArgs = append([]string{cmdName}, cmdArgs...)
+ cmd = exec.Command("sudo", cmdArgs...)
+ }
+
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := internal.RunTimeout(cmd, Timeout.Duration)
+ if err != nil {
+ return &out, fmt.Errorf("error running nsd-control: %s (%s %v)", err, cmdName, cmdArgs)
+ }
+
+ return &out, nil
+}
+
+// Gather collects stats from nsd-control and adds them to the Accumulator
+func (s *NSD) Gather(acc telegraf.Accumulator) error {
+ out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ConfigFile)
+ if err != nil {
+ return fmt.Errorf("error gathering metrics: %s", err)
+ }
+
+ // Process values
+ fields := make(map[string]interface{})
+ fieldsServers := make(map[string]map[string]interface{})
+
+ scanner := bufio.NewScanner(out)
+ for scanner.Scan() {
+ cols := strings.Split(scanner.Text(), "=")
+
+ // Check split correctness
+ if len(cols) != 2 {
+ continue
+ }
+
+ stat := cols[0]
+ value := cols[1]
+
+ fieldValue, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v",
+ stat, value))
+ continue
+ }
+
+ if strings.HasPrefix(stat, "server") {
+ statTokens := strings.Split(stat, ".")
+ if len(statTokens) > 1 {
+ serverId := strings.TrimPrefix(statTokens[0], "server")
+ if _, err := strconv.Atoi(serverId); err == nil {
+ serverTokens := statTokens[1:]
+ field := strings.Join(serverTokens[:], "_")
+ if fieldsServers[serverId] == nil {
+ fieldsServers[serverId] = make(map[string]interface{})
+ }
+ fieldsServers[serverId][field] = fieldValue
+ }
+ }
+ } else {
+ field := strings.Replace(stat, ".", "_", -1)
+ fields[field] = fieldValue
+ }
+ }
+
+ acc.AddFields("nsd", fields, nil)
+ for thisServerId, thisServerFields := range fieldsServers {
+ thisServerTag := map[string]string{"server": thisServerId}
+ acc.AddFields("nsd_servers", thisServerFields, thisServerTag)
+ }
+
+ return nil
+}
+
+func init() {
+ inputs.Add("nsd", func() telegraf.Input {
+ return &NSD{
+ run: nsdRunner,
+ Binary: defaultBinary,
+ Timeout: defaultTimeout,
+ UseSudo: false,
+ Server: "",
+ ConfigFile: "",
+ }
+ })
+}
diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go
new file mode 100644
index 0000000000000..ee527f7b7f0b2
--- /dev/null
+++ b/plugins/inputs/nsd/nsd_test.go
@@ -0,0 +1,244 @@
+package nsd
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+)
+
+var TestTimeout = internal.Duration{Duration: time.Second}
+
+func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server string, ConfigFile string) func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) {
+ return func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) {
+ return bytes.NewBuffer([]byte(output)), nil
+ }
+}
+
+func TestParseFullOutput(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ v := &NSD{
+ run: NSDControl(fullOutput, TestTimeout, true, "", ""),
+ }
+ err := v.Gather(acc)
+
+ assert.NoError(t, err)
+
+ assert.True(t, acc.HasMeasurement("nsd"))
+ assert.True(t, acc.HasMeasurement("nsd_servers"))
+
+ assert.Len(t, acc.Metrics, 2)
+ assert.Equal(t, 99, acc.NFields())
+
+ acc.AssertContainsFields(t, "nsd", parsedFullOutput)
+ acc.AssertContainsFields(t, "nsd_servers", parsedFullOutputServerAsTag)
+
+}
+
+var parsedFullOutputServerAsTag = map[string]interface{}{
+ "queries": float64(75576),
+}
+
+var parsedFullOutput = map[string]interface{}{
+ "num_queries": float64(75557),
+ "time_boot": float64(2944405.500253),
+ "time_elapsed": float64(2944405.500253),
+ "size_db_disk": float64(98304),
+ "size_db_mem": float64(22784),
+ "size_xfrd_mem": float64(83956312),
+ "size_config_disk": float64(0),
+ "size_config_mem": float64(6088),
+ "num_type_TYPE0": float64(6),
+ "num_type_A": float64(46311),
+ "num_type_NS": float64(478),
+ "num_type_MD": float64(0),
+ "num_type_MF": float64(0),
+ "num_type_CNAME": float64(272),
+ "num_type_SOA": float64(596),
+ "num_type_MB": float64(0),
+ "num_type_MG": float64(0),
+ "num_type_MR": float64(0),
+ "num_type_NULL": float64(0),
+ "num_type_WKS": float64(0),
+ "num_type_PTR": float64(83),
+ "num_type_HINFO": float64(1),
+ "num_type_MINFO": float64(0),
+ "num_type_MX": float64(296),
+ "num_type_TXT": float64(794),
+ "num_type_RP": float64(0),
+ "num_type_AFSDB": float64(0),
+ "num_type_X25": float64(0),
+ "num_type_ISDN": float64(0),
+ "num_type_RT": float64(0),
+ "num_type_NSAP": float64(0),
+ "num_type_SIG": float64(0),
+ "num_type_KEY": float64(1),
+ "num_type_PX": float64(0),
+ "num_type_AAAA": float64(22736),
+ "num_type_LOC": float64(2),
+ "num_type_NXT": float64(0),
+ "num_type_SRV": float64(93),
+ "num_type_NAPTR": float64(5),
+ "num_type_KX": float64(0),
+ "num_type_CERT": float64(0),
+ "num_type_DNAME": float64(0),
+ "num_type_OPT": float64(0),
+ "num_type_APL": float64(0),
+ "num_type_DS": float64(0),
+ "num_type_SSHFP": float64(0),
+ "num_type_IPSECKEY": float64(0),
+ "num_type_RRSIG": float64(21),
+ "num_type_NSEC": float64(0),
+ "num_type_DNSKEY": float64(325),
+ "num_type_DHCID": float64(0),
+ "num_type_NSEC3": float64(0),
+ "num_type_NSEC3PARAM": float64(0),
+ "num_type_TLSA": float64(35),
+ "num_type_SMIMEA": float64(0),
+ "num_type_CDS": float64(0),
+ "num_type_CDNSKEY": float64(0),
+ "num_type_OPENPGPKEY": float64(0),
+ "num_type_CSYNC": float64(0),
+ "num_type_SPF": float64(16),
+ "num_type_NID": float64(0),
+ "num_type_L32": float64(0),
+ "num_type_L64": float64(0),
+ "num_type_LP": float64(0),
+ "num_type_EUI48": float64(0),
+ "num_type_EUI64": float64(0),
+ "num_type_TYPE252": float64(962),
+ "num_type_TYPE253": float64(2),
+ "num_type_TYPE255": float64(1840),
+ "num_opcode_QUERY": float64(75527),
+ "num_opcode_NOTIFY": float64(6),
+ "num_class_CLASS0": float64(6),
+ "num_class_IN": float64(75395),
+ "num_class_CH": float64(132),
+ "num_rcode_NOERROR": float64(65541),
+ "num_rcode_FORMERR": float64(8),
+ "num_rcode_SERVFAIL": float64(0),
+ "num_rcode_NXDOMAIN": float64(6642),
+ "num_rcode_NOTIMP": float64(18),
+ "num_rcode_REFUSED": float64(3341),
+ "num_rcode_YXDOMAIN": float64(0),
+ "num_rcode_NOTAUTH": float64(2),
+ "num_edns": float64(71398),
+ "num_ednserr": float64(0),
+ "num_udp": float64(34111),
+ "num_udp6": float64(40429),
+ "num_tcp": float64(1015),
+ "num_tcp6": float64(2),
+ "num_tls": float64(0),
+ "num_tls6": float64(0),
+ "num_answer_wo_aa": float64(13),
+ "num_rxerr": float64(0),
+ "num_txerr": float64(0),
+ "num_raxfr": float64(954),
+ "num_truncated": float64(1),
+ "num_dropped": float64(5),
+ "zone_master": float64(2),
+ "zone_slave": float64(1),
+}
+
+var fullOutput = `server0.queries=75576
+num.queries=75557
+time.boot=2944405.500253
+time.elapsed=2944405.500253
+size.db.disk=98304
+size.db.mem=22784
+size.xfrd.mem=83956312
+size.config.disk=0
+size.config.mem=6088
+num.type.TYPE0=6
+num.type.A=46311
+num.type.NS=478
+num.type.MD=0
+num.type.MF=0
+num.type.CNAME=272
+num.type.SOA=596
+num.type.MB=0
+num.type.MG=0
+num.type.MR=0
+num.type.NULL=0
+num.type.WKS=0
+num.type.PTR=83
+num.type.HINFO=1
+num.type.MINFO=0
+num.type.MX=296
+num.type.TXT=794
+num.type.RP=0
+num.type.AFSDB=0
+num.type.X25=0
+num.type.ISDN=0
+num.type.RT=0
+num.type.NSAP=0
+num.type.SIG=0
+num.type.KEY=1
+num.type.PX=0
+num.type.AAAA=22736
+num.type.LOC=2
+num.type.NXT=0
+num.type.SRV=93
+num.type.NAPTR=5
+num.type.KX=0
+num.type.CERT=0
+num.type.DNAME=0
+num.type.OPT=0
+num.type.APL=0
+num.type.DS=0
+num.type.SSHFP=0
+num.type.IPSECKEY=0
+num.type.RRSIG=21
+num.type.NSEC=0
+num.type.DNSKEY=325
+num.type.DHCID=0
+num.type.NSEC3=0
+num.type.NSEC3PARAM=0
+num.type.TLSA=35
+num.type.SMIMEA=0
+num.type.CDS=0
+num.type.CDNSKEY=0
+num.type.OPENPGPKEY=0
+num.type.CSYNC=0
+num.type.SPF=16
+num.type.NID=0
+num.type.L32=0
+num.type.L64=0
+num.type.LP=0
+num.type.EUI48=0
+num.type.EUI64=0
+num.type.TYPE252=962
+num.type.TYPE253=2
+num.type.TYPE255=1840
+num.opcode.QUERY=75527
+num.opcode.NOTIFY=6
+num.class.CLASS0=6
+num.class.IN=75395
+num.class.CH=132
+num.rcode.NOERROR=65541
+num.rcode.FORMERR=8
+num.rcode.SERVFAIL=0
+num.rcode.NXDOMAIN=6642
+num.rcode.NOTIMP=18
+num.rcode.REFUSED=3341
+num.rcode.YXDOMAIN=0
+num.rcode.NOTAUTH=2
+num.edns=71398
+num.ednserr=0
+num.udp=34111
+num.udp6=40429
+num.tcp=1015
+num.tcp6=2
+num.tls=0
+num.tls6=0
+num.answer_wo_aa=13
+num.rxerr=0
+num.txerr=0
+num.raxfr=954
+num.truncated=1
+num.dropped=5
+zone.master=2
+zone.slave=1`
diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go
index 5eab48ea5c774..fe941982646b1 100644
--- a/plugins/inputs/nsq/nsq.go
+++ b/plugins/inputs/nsq/nsq.go
@@ -33,7 +33,7 @@ import (
"time"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go
index 1d3b541e5f1ce..23af13a4c82bc 100644
--- a/plugins/inputs/nsq/nsq_test.go
+++ b/plugins/inputs/nsq/nsq_test.go
@@ -151,7 +151,7 @@ func TestNSQStatsV1(t *testing.T) {
}
}
-// v1 version of localhost/stats?format=json reesponse body
+// v1 version of localhost/stats?format=json response body
var responseV1 = `
{
"version": "1.0.0-compat",
diff --git a/plugins/inputs/nsq_consumer/README.md b/plugins/inputs/nsq_consumer/README.md
index 0dae26e8c9584..d1e7194bbd7e0 100644
--- a/plugins/inputs/nsq_consumer/README.md
+++ b/plugins/inputs/nsq_consumer/README.md
@@ -10,8 +10,10 @@ of the supported [input data formats][].
[[inputs.nsq_consumer]]
## Server option still works but is deprecated, we just prepend it to the nsqd array.
# server = "localhost:4150"
+
## An array representing the NSQD TCP HTTP Endpoints
nsqd = ["localhost:4150"]
+
## An array representing the NSQLookupd HTTP Endpoints
nsqlookupd = ["localhost:4161"]
topic = "telegraf"
diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go
index de7572316a375..2c25cce7d8114 100644
--- a/plugins/inputs/nsq_consumer/nsq_consumer.go
+++ b/plugins/inputs/nsq_consumer/nsq_consumer.go
@@ -2,7 +2,6 @@ package nsq_consumer
import (
"context"
- "log"
"sync"
"github.com/influxdata/telegraf"
@@ -18,10 +17,12 @@ const (
type empty struct{}
type semaphore chan empty
-type logger struct{}
+type logger struct {
+ log telegraf.Logger
+}
func (l *logger) Output(calldepth int, s string) error {
- log.Println("D! [inputs.nsq_consumer] " + s)
+ l.log.Debug(s)
return nil
}
@@ -39,6 +40,8 @@ type NSQConsumer struct {
parser parsers.Parser
consumer *nsq.Consumer
+ Log telegraf.Logger
+
mu sync.Mutex
messages map[telegraf.TrackingID]*nsq.Message
wg sync.WaitGroup
@@ -48,8 +51,10 @@ type NSQConsumer struct {
var sampleConfig = `
## Server option still works but is deprecated, we just prepend it to the nsqd array.
# server = "localhost:4150"
+
## An array representing the NSQD TCP HTTP Endpoints
nsqd = ["localhost:4150"]
+
## An array representing the NSQLookupd HTTP Endpoints
nsqlookupd = ["localhost:4161"]
topic = "telegraf"
@@ -98,7 +103,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error {
n.cancel = cancel
n.connect()
- n.consumer.SetLogger(&logger{}, nsq.LogLevelInfo)
+ n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo)
n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error {
metrics, err := n.parser.Parse(message.Body)
if err != nil {
diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go
index 6558dfba29b57..e07b125ccdb8f 100644
--- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go
+++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go
@@ -36,6 +36,7 @@ func TestReadsMetricsFromNSQ(t *testing.T) {
newMockNSQD(script, addr.String())
consumer := &NSQConsumer{
+ Log: testutil.Logger{},
Server: "127.0.0.1:4155",
Topic: "telegraf",
Channel: "consume",
@@ -50,8 +51,6 @@ func TestReadsMetricsFromNSQ(t *testing.T) {
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
if err := consumer.Start(&acc); err != nil {
t.Fatal(err.Error())
- } else {
- defer consumer.Stop()
}
waitForPoint(&acc, t)
diff --git a/plugins/inputs/nstat/README.md b/plugins/inputs/nstat/README.md
index 5d2ca6c0a8f65..c0ebc2654f5b8 100644
--- a/plugins/inputs/nstat/README.md
+++ b/plugins/inputs/nstat/README.md
@@ -1,4 +1,4 @@
-## Nstat input plugin
+# Nstat Input Plugin
Plugin collects network metrics from `/proc/net/netstat`, `/proc/net/snmp` and `/proc/net/snmp6` files
diff --git a/plugins/inputs/ntpq/README.md b/plugins/inputs/ntpq/README.md
index f6ee8e2af28e5..e691200ddd682 100644
--- a/plugins/inputs/ntpq/README.md
+++ b/plugins/inputs/ntpq/README.md
@@ -29,7 +29,7 @@ server (RMS of difference of multiple time samples, milliseconds);
```toml
# Get standard NTP query metrics, requires ntpq executable
[[inputs.ntpq]]
- ## If false, set the -n ntpq flag. Can reduce metric gather times.
+ ## If false, add -n for ntpq command. Can reduce metric gather times.
dns_lookup = true
```
diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go
index ce7bb96d789f0..80b5dcd0f16be 100644
--- a/plugins/inputs/ntpq/ntpq.go
+++ b/plugins/inputs/ntpq/ntpq.go
@@ -21,30 +21,11 @@ var tagHeaders map[string]string = map[string]string{
"t": "type",
}
-// Mapping of the ntpq tag key to the index in the command output
-var tagI map[string]int = map[string]int{
- "remote": -1,
- "refid": -1,
- "stratum": -1,
- "type": -1,
-}
-
-// Mapping of float metrics to their index in the command output
-var floatI map[string]int = map[string]int{
- "delay": -1,
- "offset": -1,
- "jitter": -1,
-}
-
-// Mapping of int metrics to their index in the command output
-var intI map[string]int = map[string]int{
- "when": -1,
- "poll": -1,
- "reach": -1,
-}
-
type NTPQ struct {
- runQ func() ([]byte, error)
+ runQ func() ([]byte, error)
+ tagI map[string]int
+ floatI map[string]int
+ intI map[string]int
DNSLookup bool `toml:"dns_lookup"`
}
@@ -75,6 +56,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
}
lineCounter := 0
+ numColumns := 0
scanner := bufio.NewScanner(bytes.NewReader(out))
for scanner.Scan() {
line := scanner.Text()
@@ -96,30 +78,35 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
// If lineCounter == 0, then this is the header line
if lineCounter == 0 {
+ numColumns = len(fields)
for i, field := range fields {
// Check if field is a tag:
if tagKey, ok := tagHeaders[field]; ok {
- tagI[tagKey] = i
+ n.tagI[tagKey] = i
continue
}
// check if field is a float metric:
- if _, ok := floatI[field]; ok {
- floatI[field] = i
+ if _, ok := n.floatI[field]; ok {
+ n.floatI[field] = i
continue
}
// check if field is an int metric:
- if _, ok := intI[field]; ok {
- intI[field] = i
+ if _, ok := n.intI[field]; ok {
+ n.intI[field] = i
continue
}
}
} else {
+ if len(fields) != numColumns {
+ continue
+ }
+
mFields := make(map[string]interface{})
// Get tags from output
- for key, index := range tagI {
+ for key, index := range n.tagI {
if index == -1 {
continue
}
@@ -127,7 +114,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
}
// Get integer metrics from output
- for key, index := range intI {
+ for key, index := range n.intI {
if index == -1 || index >= len(fields) {
continue
}
@@ -177,7 +164,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
}
// get float metrics from output
- for key, index := range floatI {
+ for key, index := range n.floatI {
if index == -1 || index >= len(fields) {
continue
}
@@ -217,10 +204,40 @@ func (n *NTPQ) runq() ([]byte, error) {
return cmd.Output()
}
+func newNTPQ() *NTPQ {
+ // Mapping of the ntpq tag key to the index in the command output
+ tagI := map[string]int{
+ "remote": -1,
+ "refid": -1,
+ "stratum": -1,
+ "type": -1,
+ }
+
+ // Mapping of float metrics to their index in the command output
+ floatI := map[string]int{
+ "delay": -1,
+ "offset": -1,
+ "jitter": -1,
+ }
+
+ // Mapping of int metrics to their index in the command output
+ intI := map[string]int{
+ "when": -1,
+ "poll": -1,
+ "reach": -1,
+ }
+
+ n := &NTPQ{
+ tagI: tagI,
+ floatI: floatI,
+ intI: intI,
+ }
+ n.runQ = n.runq
+ return n
+}
+
func init() {
inputs.Add("ntpq", func() telegraf.Input {
- n := &NTPQ{}
- n.runQ = n.runq
- return n
+ return newNTPQ()
})
}
diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go
index 47b8cf8f49b1b..b0db77e45784f 100644
--- a/plugins/inputs/ntpq/ntpq_test.go
+++ b/plugins/inputs/ntpq/ntpq_test.go
@@ -3,10 +3,12 @@ package ntpq
import (
"fmt"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
-
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestSingleNTPQ(t *testing.T) {
@@ -14,9 +16,8 @@ func TestSingleNTPQ(t *testing.T) {
ret: []byte(singleNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
@@ -39,43 +40,13 @@ func TestSingleNTPQ(t *testing.T) {
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
-func TestMissingJitterField(t *testing.T) {
- tt := tester{
- ret: []byte(missingJitterField),
- err: nil,
- }
- n := &NTPQ{
- runQ: tt.runqTest,
- }
-
- acc := testutil.Accumulator{}
- assert.NoError(t, acc.GatherError(n.Gather))
-
- fields := map[string]interface{}{
- "when": int64(101),
- "poll": int64(256),
- "reach": int64(37),
- "delay": float64(51.016),
- "offset": float64(233.010),
- }
- tags := map[string]string{
- "remote": "uschi5-ntp-002.",
- "state_prefix": "*",
- "refid": "10.177.80.46",
- "stratum": "2",
- "type": "u",
- }
- acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
-}
-
func TestBadIntNTPQ(t *testing.T) {
tt := tester{
ret: []byte(badIntParseNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.Error(t, acc.GatherError(n.Gather))
@@ -102,9 +73,8 @@ func TestBadFloatNTPQ(t *testing.T) {
ret: []byte(badFloatParseNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.Error(t, acc.GatherError(n.Gather))
@@ -131,9 +101,8 @@ func TestDaysNTPQ(t *testing.T) {
ret: []byte(whenDaysNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
@@ -161,9 +130,8 @@ func TestHoursNTPQ(t *testing.T) {
ret: []byte(whenHoursNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
@@ -191,9 +159,8 @@ func TestMinutesNTPQ(t *testing.T) {
ret: []byte(whenMinutesNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
@@ -221,9 +188,8 @@ func TestBadWhenNTPQ(t *testing.T) {
ret: []byte(whenBadNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.Error(t, acc.GatherError(n.Gather))
@@ -253,9 +219,8 @@ func TestParserNTPQ(t *testing.T) {
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
@@ -316,9 +281,8 @@ func TestMultiNTPQ(t *testing.T) {
ret: []byte(multiNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
@@ -357,14 +321,12 @@ func TestMultiNTPQ(t *testing.T) {
}
func TestBadHeaderNTPQ(t *testing.T) {
- resetVars()
tt := tester{
ret: []byte(badHeaderNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
@@ -387,14 +349,12 @@ func TestBadHeaderNTPQ(t *testing.T) {
}
func TestMissingDelayColumnNTPQ(t *testing.T) {
- resetVars()
tt := tester{
ret: []byte(missingDelayNTPQ),
err: nil,
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
@@ -420,14 +380,68 @@ func TestFailedNTPQ(t *testing.T) {
ret: []byte(singleNTPQ),
err: fmt.Errorf("Test failure"),
}
- n := &NTPQ{
- runQ: tt.runqTest,
- }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
acc := testutil.Accumulator{}
assert.Error(t, acc.GatherError(n.Gather))
}
+// It is possible for the output of ntqp to be missing the refid column. This
+// is believed to be http://bugs.ntp.org/show_bug.cgi?id=3484 which is fixed
+// in ntp-4.2.8p12 (included first in Debian Buster).
+func TestNoRefID(t *testing.T) {
+ now := time.Now()
+ expected := []telegraf.Metric{
+ testutil.MustMetric("ntpq",
+ map[string]string{
+ "refid": "10.177.80.37",
+ "remote": "83.137.98.96",
+ "stratum": "2",
+ "type": "u",
+ },
+ map[string]interface{}{
+ "delay": float64(54.033),
+ "jitter": float64(449514),
+ "offset": float64(243.426),
+ "poll": int64(1024),
+ "reach": int64(377),
+ "when": int64(740),
+ },
+ now),
+ testutil.MustMetric("ntpq",
+ map[string]string{
+ "refid": "10.177.80.37",
+ "remote": "131.188.3.221",
+ "stratum": "2",
+ "type": "u",
+ },
+ map[string]interface{}{
+ "delay": float64(111.820),
+ "jitter": float64(449528),
+ "offset": float64(261.921),
+ "poll": int64(1024),
+ "reach": int64(377),
+ "when": int64(783),
+ },
+ now),
+ }
+
+ tt := tester{
+ ret: []byte(noRefID),
+ err: nil,
+ }
+ n := newNTPQ()
+ n.runQ = tt.runqTest
+
+ acc := testutil.Accumulator{
+ TimeFunc: func() time.Time { return now },
+ }
+
+ require.NoError(t, acc.GatherError(n.Gather))
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
+}
+
type tester struct {
ret []byte
err error
@@ -437,48 +451,11 @@ func (t *tester) runqTest() ([]byte, error) {
return t.ret, t.err
}
-func resetVars() {
- // Mapping of ntpq header names to tag keys
- tagHeaders = map[string]string{
- "remote": "remote",
- "refid": "refid",
- "st": "stratum",
- "t": "type",
- }
-
- // Mapping of the ntpq tag key to the index in the command output
- tagI = map[string]int{
- "remote": -1,
- "refid": -1,
- "stratum": -1,
- "type": -1,
- }
-
- // Mapping of float metrics to their index in the command output
- floatI = map[string]int{
- "delay": -1,
- "offset": -1,
- "jitter": -1,
- }
-
- // Mapping of int metrics to their index in the command output
- intI = map[string]int{
- "when": -1,
- "poll": -1,
- "reach": -1,
- }
-}
-
var singleNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462
`
-var missingJitterField = ` remote refid st t when poll reach delay offset jitter
-==============================================================================
-*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010
-`
-
var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462
@@ -527,6 +504,7 @@ var multiNTPQ = ` remote refid st t when poll reach delay
5.9.29.107 10.177.80.37 2 u 703 1024 377 205.704 160.406 449602.
91.189.94.4 10.177.80.37 2 u 673 1024 377 143.047 274.726 449445.
`
+
var multiParserNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*SHM(0) .PPS. 1 u 60 64 377 0.000 0.045 1.012
@@ -535,3 +513,10 @@ var multiParserNTPQ = ` remote refid st t when poll reach d
+37.58.57.238 ( 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
-SHM(1) .GPS. 1 u 121 128 377 0.000 10.105 2.012
`
+
+var noRefID = ` remote refid st t when poll reach delay offset jitter
+==============================================================================
+ 83.137.98.96 10.177.80.37 2 u 740 1024 377 54.033 243.426 449514.
+ 91.189.94.4 2 u 673 1024 377 143.047 274.726 449445.
+ 131.188.3.221 10.177.80.37 2 u 783 1024 377 111.820 261.921 449528.
+`
diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md
index b59f2ee6a5e82..bbe90e005c6d6 100644
--- a/plugins/inputs/nvidia_smi/README.md
+++ b/plugins/inputs/nvidia_smi/README.md
@@ -1,4 +1,4 @@
-# `nvidia-smi` Input Plugin
+# Nvidia System Management Interface (SMI) Input Plugin
This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) binary to pull GPU stats including memory and GPU usage, temp and other.
@@ -17,6 +17,9 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid
#### Windows
On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe`
+On Windows 10, you may also find this located here `C:\Windows\System32\nvidia-smi.exe`
+
+You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe`
### Metrics
- measurement: `nvidia_smi`
@@ -28,6 +31,9 @@ On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corpor
- `uuid` (A unique identifier for the GPU e.g. `GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665`)
- fields
- `fan_speed` (integer, percentage)
+ - `fbc_stats_session_count` (integer)
+ - `fbc_stats_average_fps` (integer)
+ - `fbc_stats_average_latency` (integer)
- `memory_free` (integer, MiB)
- `memory_used` (integer, MiB)
- `memory_total` (integer, MiB)
@@ -35,18 +41,49 @@ On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corpor
- `temperature_gpu` (integer, degrees C)
- `utilization_gpu` (integer, percentage)
- `utilization_memory` (integer, percentage)
+ - `utilization_encoder` (integer, percentage)
+ - `utilization_decoder` (integer, percentage)
+ - `pcie_link_gen_current` (integer)
+ - `pcie_link_width_current` (integer)
+ - `encoder_stats_session_count` (integer)
+ - `encoder_stats_average_fps` (integer)
+ - `encoder_stats_average_latency` (integer)
+ - `clocks_current_graphics` (integer, MHz)
+ - `clocks_current_sm` (integer, MHz)
+ - `clocks_current_memory` (integer, MHz)
+ - `clocks_current_video` (integer, MHz)
### Sample Query
The below query could be used to alert on the average temperature of the your GPUs over the last minute
-```
+```sql
SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP BY time(1m), "index", "name", "host"
```
+### Troubleshooting
+
+Check the full output by running `nvidia-smi` binary manually.
+
+Linux:
+```sh
+sudo -u telegraf -- /usr/bin/nvidia-smi -q -x
+```
+
+Windows:
+```
+"C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" -q -x
+```
+
+Please include the output of this command if opening an GitHub issue.
+
### Example Output
```
nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000
nvidia_smi,compute_mode=Default,host=8218cf,index=1,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=50i,utilization_gpu=100i,utilization_memory=85i 1523991122000000000
nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-d4cfc28d-0481-8d07-b81a-ddfc63d74adf fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=58i,utilization_gpu=100i,utilization_memory=86i 1523991122000000000
```
+
+### Limitations
+Note that there seems to be an issue with getting current memory clock values when the memory is overclocked.
+This may or may not apply to everyone but it's confirmed to be an issue on an EVGA 2080 Ti.
diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go
index ea708f24fb5ba..688c3d4bb7680 100644
--- a/plugins/inputs/nvidia_smi/nvidia_smi.go
+++ b/plugins/inputs/nvidia_smi/nvidia_smi.go
@@ -1,7 +1,7 @@
package nvidia_smi
import (
- "bufio"
+ "encoding/xml"
"fmt"
"os"
"os/exec"
@@ -14,32 +14,12 @@ import (
"github.com/influxdata/telegraf/plugins/inputs"
)
-var (
- measurement = "nvidia_smi"
- metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw"
- metricNames = [][]string{
- {"fan_speed", "integer"},
- {"memory_total", "integer"},
- {"memory_used", "integer"},
- {"memory_free", "integer"},
- {"pstate", "tag"},
- {"temperature_gpu", "integer"},
- {"name", "tag"},
- {"uuid", "tag"},
- {"compute_mode", "tag"},
- {"utilization_gpu", "integer"},
- {"utilization_memory", "integer"},
- {"index", "tag"},
- {"power_draw", "float"},
- }
-)
+const measurement = "nvidia_smi"
// NvidiaSMI holds the methods for this plugin
type NvidiaSMI struct {
BinPath string
Timeout internal.Duration
-
- metrics string
}
// Description returns the description of the NvidiaSMI plugin
@@ -60,7 +40,6 @@ func (smi *NvidiaSMI) SampleConfig() string {
// Gather implements the telegraf interface
func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error {
-
if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) {
return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath)
}
@@ -83,84 +62,193 @@ func init() {
return &NvidiaSMI{
BinPath: "/usr/bin/nvidia-smi",
Timeout: internal.Duration{Duration: 5 * time.Second},
- metrics: metrics,
}
})
}
-func (smi *NvidiaSMI) pollSMI() (string, error) {
+func (smi *NvidiaSMI) pollSMI() ([]byte, error) {
// Construct and execute metrics query
- opts := []string{"--format=noheader,nounits,csv", fmt.Sprintf("--query-gpu=%s", smi.metrics)}
- ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, opts...), smi.Timeout.Duration)
+ ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), smi.Timeout.Duration)
if err != nil {
- return "", err
+ return nil, err
}
- return string(ret), nil
+ return ret, nil
}
-func gatherNvidiaSMI(ret string, acc telegraf.Accumulator) error {
- // First split the lines up and handle each one
- scanner := bufio.NewScanner(strings.NewReader(ret))
- for scanner.Scan() {
- tags, fields, err := parseLine(scanner.Text())
- if err != nil {
- return err
- }
- acc.AddFields(measurement, fields, tags)
+func gatherNvidiaSMI(ret []byte, acc telegraf.Accumulator) error {
+ smi := &SMI{}
+ err := xml.Unmarshal(ret, smi)
+ if err != nil {
+ return err
}
- if err := scanner.Err(); err != nil {
- return fmt.Errorf("Error scanning text %s", ret)
+ metrics := smi.genTagsFields()
+
+ for _, metric := range metrics {
+ acc.AddFields(measurement, metric.fields, metric.tags)
}
return nil
}
-func parseLine(line string) (map[string]string, map[string]interface{}, error) {
- tags := make(map[string]string, 0)
- fields := make(map[string]interface{}, 0)
+type metric struct {
+ tags map[string]string
+ fields map[string]interface{}
+}
- // Next split up the comma delimited metrics
- met := strings.Split(line, ",")
+func (s *SMI) genTagsFields() []metric {
+ metrics := []metric{}
+ for i, gpu := range s.GPU {
+ tags := map[string]string{
+ "index": strconv.Itoa(i),
+ }
+ fields := map[string]interface{}{}
+
+ setTagIfUsed(tags, "pstate", gpu.PState)
+ setTagIfUsed(tags, "name", gpu.ProdName)
+ setTagIfUsed(tags, "uuid", gpu.UUID)
+ setTagIfUsed(tags, "compute_mode", gpu.ComputeMode)
+
+ setIfUsed("int", fields, "fan_speed", gpu.FanSpeed)
+ setIfUsed("int", fields, "memory_total", gpu.Memory.Total)
+ setIfUsed("int", fields, "memory_used", gpu.Memory.Used)
+ setIfUsed("int", fields, "memory_free", gpu.Memory.Free)
+ setIfUsed("int", fields, "temperature_gpu", gpu.Temp.GPUTemp)
+ setIfUsed("int", fields, "utilization_gpu", gpu.Utilization.GPU)
+ setIfUsed("int", fields, "utilization_memory", gpu.Utilization.Memory)
+ setIfUsed("int", fields, "utilization_encoder", gpu.Utilization.Encoder)
+ setIfUsed("int", fields, "utilization_decoder", gpu.Utilization.Decoder)
+ setIfUsed("int", fields, "pcie_link_gen_current", gpu.PCI.LinkInfo.PCIEGen.CurrentLinkGen)
+ setIfUsed("int", fields, "pcie_link_width_current", gpu.PCI.LinkInfo.LinkWidth.CurrentLinkWidth)
+ setIfUsed("int", fields, "encoder_stats_session_count", gpu.Encoder.SessionCount)
+ setIfUsed("int", fields, "encoder_stats_average_fps", gpu.Encoder.AverageFPS)
+ setIfUsed("int", fields, "encoder_stats_average_latency", gpu.Encoder.AverageLatency)
+ setIfUsed("int", fields, "fbc_stats_session_count", gpu.FBC.SessionCount)
+ setIfUsed("int", fields, "fbc_stats_average_fps", gpu.FBC.AverageFPS)
+ setIfUsed("int", fields, "fbc_stats_average_latency", gpu.FBC.AverageLatency)
+ setIfUsed("int", fields, "clocks_current_graphics", gpu.Clocks.Graphics)
+ setIfUsed("int", fields, "clocks_current_sm", gpu.Clocks.SM)
+ setIfUsed("int", fields, "clocks_current_memory", gpu.Clocks.Memory)
+ setIfUsed("int", fields, "clocks_current_video", gpu.Clocks.Video)
+
+ setIfUsed("float", fields, "power_draw", gpu.Power.PowerDraw)
+ metrics = append(metrics, metric{tags, fields})
+ }
+ return metrics
+}
- // Make sure there are as many metrics in the line as there were queried.
- if len(met) == len(metricNames) {
- for i, m := range metricNames {
- col := strings.TrimSpace(met[i])
+func setTagIfUsed(m map[string]string, k, v string) {
+ if v != "" {
+ m[k] = v
+ }
+}
- // Handle the tags
- if m[1] == "tag" {
- tags[m[0]] = col
- continue
- }
+func setIfUsed(t string, m map[string]interface{}, k, v string) {
+ vals := strings.Fields(v)
+ if len(vals) < 1 {
+ return
+ }
- if strings.Contains(col, "[Not Supported]") {
- continue
- }
+ val := vals[0]
+ if k == "pcie_link_width_current" {
+ val = strings.TrimSuffix(vals[0], "x")
+ }
- // Parse the integers
- if m[1] == "integer" {
- out, err := strconv.ParseInt(col, 10, 64)
- if err != nil {
- return tags, fields, err
- }
- fields[m[0]] = out
+ switch t {
+ case "float":
+ if val != "" {
+ f, err := strconv.ParseFloat(val, 64)
+ if err == nil {
+ m[k] = f
}
-
- // Parse the floats
- if m[1] == "float" {
- out, err := strconv.ParseFloat(col, 64)
- if err != nil {
- return tags, fields, err
- }
- fields[m[0]] = out
+ }
+ case "int":
+ if val != "" {
+ i, err := strconv.Atoi(val)
+ if err == nil {
+ m[k] = i
}
}
-
- // Return the tags and fields
- return tags, fields, nil
}
+}
+
+// SMI defines the structure for the output of _nvidia-smi -q -x_.
+type SMI struct {
+ GPU GPU `xml:"gpu"`
+}
+
+// GPU defines the structure of the GPU portion of the smi output.
+type GPU []struct {
+ FanSpeed string `xml:"fan_speed"` // int
+ Memory MemoryStats `xml:"fb_memory_usage"`
+ PState string `xml:"performance_state"`
+ Temp TempStats `xml:"temperature"`
+ ProdName string `xml:"product_name"`
+ UUID string `xml:"uuid"`
+ ComputeMode string `xml:"compute_mode"`
+ Utilization UtilizationStats `xml:"utilization"`
+ Power PowerReadings `xml:"power_readings"`
+ PCI PCI `xml:"pci"`
+ Encoder EncoderStats `xml:"encoder_stats"`
+ FBC FBCStats `xml:"fbc_stats"`
+ Clocks ClockStats `xml:"clocks"`
+}
+
+// MemoryStats defines the structure of the memory portions in the smi output.
+type MemoryStats struct {
+ Total string `xml:"total"` // int
+ Used string `xml:"used"` // int
+ Free string `xml:"free"` // int
+}
+
+// TempStats defines the structure of the temperature portion of the smi output.
+type TempStats struct {
+ GPUTemp string `xml:"gpu_temp"` // int
+}
+
+// UtilizationStats defines the structure of the utilization portion of the smi output.
+type UtilizationStats struct {
+ GPU string `xml:"gpu_util"` // int
+ Memory string `xml:"memory_util"` // int
+ Encoder string `xml:"encoder_util"` // int
+ Decoder string `xml:"decoder_util"` // int
+}
+
+// PowerReadings defines the structure of the power_readings portion of the smi output.
+type PowerReadings struct {
+ PowerDraw string `xml:"power_draw"` // float
+}
+
+// PCI defines the structure of the pci portion of the smi output.
+type PCI struct {
+ LinkInfo struct {
+ PCIEGen struct {
+ CurrentLinkGen string `xml:"current_link_gen"` // int
+ } `xml:"pcie_gen"`
+ LinkWidth struct {
+ CurrentLinkWidth string `xml:"current_link_width"` // int
+ } `xml:"link_widths"`
+ } `xml:"pci_gpu_link_info"`
+}
+
+// EncoderStats defines the structure of the encoder_stats portion of the smi output.
+type EncoderStats struct {
+ SessionCount string `xml:"session_count"` // int
+ AverageFPS string `xml:"average_fps"` // int
+ AverageLatency string `xml:"average_latency"` // int
+}
+
+// FBCStats defines the structure of the fbc_stats portion of the smi output.
+type FBCStats struct {
+ SessionCount string `xml:"session_count"` // int
+ AverageFPS string `xml:"average_fps"` // int
+ AverageLatency string `xml:"average_latency"` // int
+}
- // If the line is empty return an emptyline error
- return tags, fields, fmt.Errorf("Different number of metrics returned (%d) than expeced (%d)", len(met), len(metricNames))
+// ClockStats defines the structure of the clocks portion of the smi output.
+type ClockStats struct {
+ Graphics string `xml:"graphics_clock"` // int
+ SM string `xml:"sm_clock"` // int
+ Memory string `xml:"mem_clock"` // int
+ Video string `xml:"video_clock"` // int
}
diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go
index 87785fe874950..3c191e609ade4 100644
--- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go
+++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go
@@ -1,44 +1,147 @@
package nvidia_smi
import (
+ "io/ioutil"
+ "path/filepath"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
-func TestParseLineStandard(t *testing.T) {
- line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1, 0.0\n"
- tags, fields, err := parseLine(line)
- if err != nil {
- t.Fail()
+func TestGatherValidXML(t *testing.T) {
+ tests := []struct {
+ name string
+ filename string
+ expected []telegraf.Metric
+ }{
+ {
+ name: "GeForce GTX 1070 Ti",
+ filename: "gtx-1070-ti.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "nvidia_smi",
+ map[string]string{
+ "name": "GeForce GTX 1070 Ti",
+ "compute_mode": "Default",
+ "index": "0",
+ "pstate": "P8",
+ "uuid": "GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665",
+ },
+ map[string]interface{}{
+ "clocks_current_graphics": 135,
+ "clocks_current_memory": 405,
+ "clocks_current_sm": 135,
+ "clocks_current_video": 405,
+ "encoder_stats_average_fps": 0,
+ "encoder_stats_average_latency": 0,
+ "encoder_stats_session_count": 0,
+ "fan_speed": 100,
+ "memory_free": 4054,
+ "memory_total": 4096,
+ "memory_used": 42,
+ "pcie_link_gen_current": 1,
+ "pcie_link_width_current": 16,
+ "temperature_gpu": 39,
+ "utilization_gpu": 0,
+ "utilization_memory": 0,
+ },
+ time.Unix(0, 0)),
+ },
+ },
+ {
+ name: "GeForce GTX 1660 Ti",
+ filename: "gtx-1660-ti.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "nvidia_smi",
+ map[string]string{
+ "compute_mode": "Default",
+ "index": "0",
+ "name": "Graphics Device",
+ "pstate": "P8",
+ "uuid": "GPU-304a277d-3545-63b8-3a36-dfde3c992989",
+ },
+ map[string]interface{}{
+ "clocks_current_graphics": 300,
+ "clocks_current_memory": 405,
+ "clocks_current_sm": 300,
+ "clocks_current_video": 540,
+ "encoder_stats_average_fps": 0,
+ "encoder_stats_average_latency": 0,
+ "encoder_stats_session_count": 0,
+ "fbc_stats_average_fps": 0,
+ "fbc_stats_average_latency": 0,
+ "fbc_stats_session_count": 0,
+ "fan_speed": 0,
+ "memory_free": 5912,
+ "memory_total": 5912,
+ "memory_used": 0,
+ "pcie_link_gen_current": 1,
+ "pcie_link_width_current": 16,
+ "power_draw": 8.93,
+ "temperature_gpu": 40,
+ "utilization_gpu": 0,
+ "utilization_memory": 1,
+ "utilization_encoder": 0,
+ "utilization_decoder": 0,
+ },
+ time.Unix(0, 0)),
+ },
+ },
+ {
+ name: "Quadro P400",
+ filename: "quadro-p400.xml",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "nvidia_smi",
+ map[string]string{
+ "compute_mode": "Default",
+ "index": "0",
+ "name": "Quadro P400",
+ "pstate": "P8",
+ "uuid": "GPU-8f750be4-dfbc-23b9-b33f-da729a536494",
+ },
+ map[string]interface{}{
+ "clocks_current_graphics": 139,
+ "clocks_current_memory": 405,
+ "clocks_current_sm": 139,
+ "clocks_current_video": 544,
+ "encoder_stats_average_fps": 0,
+ "encoder_stats_average_latency": 0,
+ "encoder_stats_session_count": 0,
+ "fbc_stats_average_fps": 0,
+ "fbc_stats_average_latency": 0,
+ "fbc_stats_session_count": 0,
+ "fan_speed": 34,
+ "memory_free": 1998,
+ "memory_total": 1998,
+ "memory_used": 0,
+ "pcie_link_gen_current": 1,
+ "pcie_link_width_current": 16,
+ "temperature_gpu": 33,
+ "utilization_gpu": 0,
+ "utilization_memory": 3,
+ "utilization_encoder": 0,
+ "utilization_decoder": 0,
+ },
+ time.Unix(0, 0)),
+ },
+ },
}
- if tags["name"] != "GeForce GTX 1070 Ti" {
- t.Fail()
- }
- if temp, ok := fields["temperature_gpu"].(int); ok && temp == 61 {
- t.Fail()
- }
-}
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var acc testutil.Accumulator
-func TestParseLineEmptyLine(t *testing.T) {
- line := "\n"
- _, _, err := parseLine(line)
- if err == nil {
- t.Fail()
- }
-}
+ octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename))
+ require.NoError(t, err)
-func TestParseLineBad(t *testing.T) {
- line := "the quick brown fox jumped over the lazy dog"
- _, _, err := parseLine(line)
- if err == nil {
- t.Fail()
- }
-}
+ err = gatherNvidiaSMI(octets, &acc)
+ require.NoError(t, err)
-func TestParseLineNotSupported(t *testing.T) {
- line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0, 0.0\n"
- _, fields, err := parseLine(line)
- require.NoError(t, err)
- require.Equal(t, nil, fields["fan_speed"])
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
+ })
+ }
}
diff --git a/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml b/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml
new file mode 100644
index 0000000000000..3e3e3ec870585
--- /dev/null
+++ b/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml
@@ -0,0 +1,47 @@
+
+
+
+
+ GeForce GTX 1070 Ti
+ GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665
+
+
+
+ 1
+
+
+ 16x
+
+
+
+ 100 %
+ P8
+
+ 4096 MiB
+ 42 MiB
+ 4054 MiB
+
+ Default
+
+ 0 %
+ 0 %
+
+
+ 0
+ 0
+ 0
+
+
+ 39 C
+
+
+ N/A
+
+
+ 135 MHz
+ 135 MHz
+ 405 MHz
+ 405 MHz
+
+
+
diff --git a/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml b/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml
new file mode 100644
index 0000000000000..1a6c7d0891935
--- /dev/null
+++ b/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml
@@ -0,0 +1,189 @@
+
+
+ Fri Mar 29 19:19:44 2019
+ 418.43
+ 10.1
+ 1
+
+ Graphics Device
+ GeForce
+ Disabled
+ Disabled
+ Disabled
+ Disabled
+ 4000
+
+ N/A
+ N/A
+
+ N/A
+ GPU-304a277d-3545-63b8-3a36-dfde3c992989
+ 0
+ 90.16.25.00.4C
+ No
+ 0x4300
+ N/A
+
+ G001.0000.02.04
+ 1.1
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ None
+
+
+ N/A
+
+
+ 43
+ 00
+ 0000
+ 218410DE
+ 00000000:43:00.0
+ 3FC81458
+
+
+ 3
+ 1
+
+
+ 16x
+ 16x
+
+
+
+ N/A
+ N/A
+
+ 0
+ 0
+ 0 KB/s
+ 0 KB/s
+
+ 0 %
+ P8
+
+ Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+
+
+ 5912 MiB
+ 0 MiB
+ 5912 MiB
+
+
+ 256 MiB
+ 2 MiB
+ 254 MiB
+
+ Default
+
+ 0 %
+ 1 %
+ 0 %
+ 0 %
+
+
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+
+
+ N/A
+ N/A
+
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+
+ 40 C
+ 96 C
+ 93 C
+ 91 C
+ N/A
+ N/A
+
+
+ P8
+ Supported
+ 8.93 W
+ 130.00 W
+ 130.00 W
+ 130.00 W
+ 70.00 W
+ 130.00 W
+
+
+ 300 MHz
+ 300 MHz
+ 405 MHz
+ 540 MHz
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ 2145 MHz
+ 2145 MHz
+ 4001 MHz
+ 1950 MHz
+
+
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+
+
+
+
+
+
diff --git a/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml b/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml
new file mode 100644
index 0000000000000..ca9e2191ec94f
--- /dev/null
+++ b/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml
@@ -0,0 +1,447 @@
+
+
+ Mon Mar 11 17:03:27 2019
+ 418.43
+ 10.1
+ 1
+
+ Quadro P400
+ Quadro
+ Disabled
+ Disabled
+ Disabled
+ Disabled
+ 4000
+
+ N/A
+ N/A
+
+ 0424418054852
+ GPU-8f750be4-dfbc-23b9-b33f-da729a536494
+ 0
+ 86.07.3B.00.4A
+ No
+ 0x4300
+ 900-5G212-1701-000
+
+ G212.0500.00.01
+ 1.1
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ None
+
+
+ N/A
+
+
+ 43
+ 00
+ 0000
+ 1CB310DE
+ 00000000:43:00.0
+ 11BE10DE
+
+
+ 3
+ 1
+
+
+ 16x
+ 16x
+
+
+
+ N/A
+ N/A
+
+ 0
+ 0
+ 0 KB/s
+ 0 KB/s
+
+ 34 %
+ P8
+
+ Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+
+
+ 1998 MiB
+ 0 MiB
+ 1998 MiB
+
+
+ 256 MiB
+ 2 MiB
+ 254 MiB
+
+ Default
+
+ 0 %
+ 3 %
+ 0 %
+ 0 %
+
+
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+
+
+ N/A
+ N/A
+
+
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+
+ 33 C
+ 103 C
+ 100 C
+ N/A
+ N/A
+ N/A
+
+
+ P8
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ 139 MHz
+ 139 MHz
+ 405 MHz
+ 544 MHz
+
+
+ 1227 MHz
+ 2005 MHz
+
+
+ 1227 MHz
+ 2005 MHz
+
+
+ 1252 MHz
+ 1252 MHz
+ 2005 MHz
+ 1126 MHz
+
+
+ 1252 MHz
+
+
+ N/A
+ N/A
+
+
+
+ 2005 MHz
+ 1252 MHz
+ 1240 MHz
+ 1227 MHz
+ 1215 MHz
+ 1202 MHz
+ 1189 MHz
+ 1177 MHz
+ 1164 MHz
+ 1151 MHz
+ 1139 MHz
+ 1126 MHz
+ 1113 MHz
+ 1101 MHz
+ 1088 MHz
+ 1075 MHz
+ 1063 MHz
+ 1050 MHz
+ 1037 MHz
+ 1025 MHz
+ 1012 MHz
+ 999 MHz
+ 987 MHz
+ 974 MHz
+ 961 MHz
+ 949 MHz
+ 936 MHz
+ 923 MHz
+ 911 MHz
+ 898 MHz
+ 885 MHz
+ 873 MHz
+ 860 MHz
+ 847 MHz
+ 835 MHz
+ 822 MHz
+ 810 MHz
+ 797 MHz
+ 784 MHz
+ 772 MHz
+ 759 MHz
+ 746 MHz
+ 734 MHz
+ 721 MHz
+ 708 MHz
+ 696 MHz
+ 683 MHz
+ 670 MHz
+ 658 MHz
+ 645 MHz
+ 632 MHz
+ 620 MHz
+ 607 MHz
+ 594 MHz
+ 582 MHz
+ 569 MHz
+ 556 MHz
+ 544 MHz
+ 531 MHz
+ 518 MHz
+ 506 MHz
+ 493 MHz
+ 480 MHz
+ 468 MHz
+ 455 MHz
+ 442 MHz
+ 430 MHz
+ 417 MHz
+ 405 MHz
+ 392 MHz
+ 379 MHz
+ 367 MHz
+ 354 MHz
+ 341 MHz
+ 329 MHz
+ 316 MHz
+ 303 MHz
+ 291 MHz
+ 278 MHz
+ 265 MHz
+ 253 MHz
+ 240 MHz
+ 227 MHz
+ 215 MHz
+ 202 MHz
+ 189 MHz
+ 177 MHz
+ 164 MHz
+ 151 MHz
+ 139 MHz
+
+
+ 810 MHz
+ 1252 MHz
+ 1240 MHz
+ 1227 MHz
+ 1215 MHz
+ 1202 MHz
+ 1189 MHz
+ 1177 MHz
+ 1164 MHz
+ 1151 MHz
+ 1139 MHz
+ 1126 MHz
+ 1113 MHz
+ 1101 MHz
+ 1088 MHz
+ 1075 MHz
+ 1063 MHz
+ 1050 MHz
+ 1037 MHz
+ 1025 MHz
+ 1012 MHz
+ 999 MHz
+ 987 MHz
+ 974 MHz
+ 961 MHz
+ 949 MHz
+ 936 MHz
+ 923 MHz
+ 911 MHz
+ 898 MHz
+ 885 MHz
+ 873 MHz
+ 860 MHz
+ 847 MHz
+ 835 MHz
+ 822 MHz
+ 810 MHz
+ 797 MHz
+ 784 MHz
+ 772 MHz
+ 759 MHz
+ 746 MHz
+ 734 MHz
+ 721 MHz
+ 708 MHz
+ 696 MHz
+ 683 MHz
+ 670 MHz
+ 658 MHz
+ 645 MHz
+ 632 MHz
+ 620 MHz
+ 607 MHz
+ 594 MHz
+ 582 MHz
+ 569 MHz
+ 556 MHz
+ 544 MHz
+ 531 MHz
+ 518 MHz
+ 506 MHz
+ 493 MHz
+ 480 MHz
+ 468 MHz
+ 455 MHz
+ 442 MHz
+ 430 MHz
+ 417 MHz
+ 405 MHz
+ 392 MHz
+ 379 MHz
+ 367 MHz
+ 354 MHz
+ 341 MHz
+ 329 MHz
+ 316 MHz
+ 303 MHz
+ 291 MHz
+ 278 MHz
+ 265 MHz
+ 253 MHz
+ 240 MHz
+ 227 MHz
+ 215 MHz
+ 202 MHz
+ 189 MHz
+ 177 MHz
+ 164 MHz
+ 151 MHz
+ 139 MHz
+
+
+ 405 MHz
+ 607 MHz
+ 594 MHz
+ 582 MHz
+ 569 MHz
+ 556 MHz
+ 544 MHz
+ 531 MHz
+ 518 MHz
+ 506 MHz
+ 493 MHz
+ 480 MHz
+ 468 MHz
+ 455 MHz
+ 442 MHz
+ 430 MHz
+ 417 MHz
+ 405 MHz
+ 392 MHz
+ 379 MHz
+ 367 MHz
+ 354 MHz
+ 341 MHz
+ 329 MHz
+ 316 MHz
+ 303 MHz
+ 291 MHz
+ 278 MHz
+ 265 MHz
+ 253 MHz
+ 240 MHz
+ 227 MHz
+ 215 MHz
+ 202 MHz
+ 189 MHz
+ 177 MHz
+ 164 MHz
+ 151 MHz
+ 139 MHz
+
+
+
+
+
+
+
+
+
diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md
new file mode 100644
index 0000000000000..173d98b6fac98
--- /dev/null
+++ b/plugins/inputs/opcua/README.md
@@ -0,0 +1,80 @@
+# OPC UA Client Input Plugin
+
+The `opcua` plugin retrieves data from OPC UA client devices.
+
+Telegraf minimum version: Telegraf 1.16
+Plugin minimum tested version: 1.16
+
+### Configuration:
+
+```toml
+[[inputs.opcua]]
+ ## Device name
+ # name = "localhost"
+ #
+ ## OPC UA Endpoint URL
+ # endpoint = "opc.tcp://localhost:4840"
+ #
+ ## Maximum time allowed to establish a connect to the endpoint.
+ # connect_timeout = "10s"
+ #
+ ## Maximum time allowed for a request over the estabilished connection.
+ # request_timeout = "5s"
+ #
+ ## Security policy, one of "None", "Basic128Rsa15", "Basic256",
+ ## "Basic256Sha256", or "auto"
+ # security_policy = "auto"
+ #
+ ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto"
+ # security_mode = "auto"
+ #
+ ## Path to cert.pem. Required when security mode or policy isn't "None".
+ ## If cert path is not supplied, self-signed cert and key will be generated.
+ # certificate = "/etc/telegraf/cert.pem"
+ #
+ ## Path to private key.pem. Required when security mode or policy isn't "None".
+ ## If key path is not supplied, self-signed cert and key will be generated.
+ # private_key = "/etc/telegraf/key.pem"
+ #
+ ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To
+ ## authenticate using a specific ID, select 'Certificate' or 'UserName'
+ # auth_method = "Anonymous"
+ #
+ ## Username. Required for auth_method = "UserName"
+ # username = ""
+ #
+ ## Password. Required for auth_method = "UserName"
+ # password = ""
+ #
+ ## Node ID configuration
+ ## name - the variable name
+ ## namespace - integer value 0 thru 3
+ ## identifier_type - s=string, i=numeric, g=guid, b=opaque
+ ## identifier - tag as shown in opcua browser
+ ## data_type - boolean, byte, short, int, uint, uint16, int16,
+ ## uint32, int32, float, double, string, datetime, number
+ ## Example:
+ ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"}
+ nodes = [
+ {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
+ {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
+ ]
+```
+
+### Example Node Configuration
+An OPC UA node ID may resemble: "n=3,s=Temperature". In this example:
+- n=3 is indicating the `namespace` is 3
+- s=Temperature is indicting that the `identifier_type` is a string and `identifier` value is 'Temperature'
+- This example temperature node has a value of 79.0, which makes the `data_type` a 'float'.
+To gather data from this node enter the following line into the 'nodes' property above:
+```
+{name="LabelName", namespace="3", identifier_type="s", identifier="Temperature", data_type="float", description="Description of node"},
+```
+
+
+### Example Output
+
+```
+opcua,host=3c70aee0901e,name=Random,type=double Random=0.018158170305814902 1597820490000000000
+
+```
diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go
new file mode 100644
index 0000000000000..87647e2b9d5f8
--- /dev/null
+++ b/plugins/inputs/opcua/opcua_client.go
@@ -0,0 +1,424 @@
+package opcua_client
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/gopcua/opcua"
+ "github.com/gopcua/opcua/ua"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// OpcUA type
+type OpcUA struct {
+ Name string `toml:"name"`
+ Endpoint string `toml:"endpoint"`
+ SecurityPolicy string `toml:"security_policy"`
+ SecurityMode string `toml:"security_mode"`
+ Certificate string `toml:"certificate"`
+ PrivateKey string `toml:"private_key"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ AuthMethod string `toml:"auth_method"`
+ ConnectTimeout config.Duration `toml:"connect_timeout"`
+ RequestTimeout config.Duration `toml:"request_timeout"`
+ NodeList []OPCTag `toml:"nodes"`
+
+ Nodes []string `toml:"-"`
+ NodeData []OPCData `toml:"-"`
+ NodeIDs []*ua.NodeID `toml:"-"`
+ NodeIDerror []error `toml:"-"`
+ state ConnectionState
+
+ // status
+ ReadSuccess int `toml:"-"`
+ ReadError int `toml:"-"`
+ NumberOfTags int `toml:"-"`
+
+ // internal values
+ client *opcua.Client
+ req *ua.ReadRequest
+ opts []opcua.Option
+}
+
+// OPCTag type
+type OPCTag struct {
+ Name string `toml:"name"`
+ Namespace string `toml:"namespace"`
+ IdentifierType string `toml:"identifier_type"`
+ Identifier string `toml:"identifier"`
+ DataType string `toml:"data_type"`
+ Description string `toml:"description"`
+}
+
+// OPCData type
+type OPCData struct {
+ TagName string
+ Value interface{}
+ Quality ua.StatusCode
+ TimeStamp string
+ Time string
+ DataType ua.TypeID
+}
+
+// ConnectionState used for constants
+type ConnectionState int
+
+const (
+ //Disconnected constant state 0
+ Disconnected ConnectionState = iota
+ //Connecting constant state 1
+ Connecting
+ //Connected constant state 2
+ Connected
+)
+
+const description = `Retrieve data from OPCUA devices`
+const sampleConfig = `
+[[inputs.opcua]]
+ ## Device name
+ # name = "localhost"
+ #
+ ## OPC UA Endpoint URL
+ # endpoint = "opc.tcp://localhost:4840"
+ #
+ ## Maximum time allowed to establish a connect to the endpoint.
+ # connect_timeout = "10s"
+ #
+ ## Maximum time allowed for a request over the estabilished connection.
+ # request_timeout = "5s"
+ #
+ ## Security policy, one of "None", "Basic128Rsa15", "Basic256",
+ ## "Basic256Sha256", or "auto"
+ # security_policy = "auto"
+ #
+ ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto"
+ # security_mode = "auto"
+ #
+ ## Path to cert.pem. Required when security mode or policy isn't "None".
+ ## If cert path is not supplied, self-signed cert and key will be generated.
+ # certificate = "/etc/telegraf/cert.pem"
+ #
+ ## Path to private key.pem. Required when security mode or policy isn't "None".
+ ## If key path is not supplied, self-signed cert and key will be generated.
+ # private_key = "/etc/telegraf/key.pem"
+ #
+ ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To
+ ## authenticate using a specific ID, select 'Certificate' or 'UserName'
+ # auth_method = "Anonymous"
+ #
+ ## Username. Required for auth_method = "UserName"
+ # username = ""
+ #
+ ## Password. Required for auth_method = "UserName"
+ # password = ""
+ #
+ ## Node ID configuration
+ ## name - the variable name
+ ## namespace - integer value 0 thru 3
+ ## identifier_type - s=string, i=numeric, g=guid, b=opaque
+ ## identifier - tag as shown in opcua browser
+ ## data_type - boolean, byte, short, int, uint, uint16, int16,
+ ## uint32, int32, float, double, string, datetime, number
+ ## Example:
+ ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"}
+ nodes = [
+ {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
+ {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
+ ]
+`
+
+// Description will appear directly above the plugin definition in the config file
+func (o *OpcUA) Description() string {
+ return description
+}
+
+// SampleConfig will populate the sample configuration portion of the plugin's configuration
+func (o *OpcUA) SampleConfig() string {
+ return sampleConfig
+}
+
+// Init will initialize all tags
+func (o *OpcUA) Init() error {
+ o.state = Disconnected
+
+ err := o.validateEndpoint()
+ if err != nil {
+ return err
+ }
+
+ err = o.InitNodes()
+ if err != nil {
+ return err
+ }
+ o.NumberOfTags = len(o.NodeList)
+
+ o.setupOptions()
+
+ return nil
+
+}
+
+func (o *OpcUA) validateEndpoint() error {
+ if o.Name == "" {
+ return fmt.Errorf("device name is empty")
+ }
+
+ if o.Endpoint == "" {
+ return fmt.Errorf("endpoint url is empty")
+ }
+
+ _, err := url.Parse(o.Endpoint)
+ if err != nil {
+ return fmt.Errorf("endpoint url is invalid")
+ }
+
+ //search security policy type
+ switch o.SecurityPolicy {
+ case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto":
+ break
+ default:
+ return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.Name)
+ }
+ //search security mode type
+ switch o.SecurityMode {
+ case "None", "Sign", "SignAndEncrypt", "auto":
+ break
+ default:
+ return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.Name)
+ }
+ return nil
+}
+
+//InitNodes Method on OpcUA
+func (o *OpcUA) InitNodes() error {
+ if len(o.NodeList) == 0 {
+ return nil
+ }
+
+ err := o.validateOPCTags()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (o *OpcUA) validateOPCTags() error {
+ nameEncountered := map[string]bool{}
+ for i, item := range o.NodeList {
+ //check empty name
+ if item.Name == "" {
+ return fmt.Errorf("empty name in '%s'", item.Name)
+ }
+ //search name duplicate
+ if nameEncountered[item.Name] {
+ return fmt.Errorf("name '%s' is duplicated in '%s'", item.Name, item.Name)
+ } else {
+ nameEncountered[item.Name] = true
+ }
+ //search identifier type
+ switch item.IdentifierType {
+ case "s", "i", "g", "b":
+ break
+ default:
+ return fmt.Errorf("invalid identifier type '%s' in '%s'", item.IdentifierType, item.Name)
+ }
+ // search data type
+ switch item.DataType {
+ case "boolean", "byte", "short", "int", "uint", "uint16", "int16", "uint32", "int32", "float", "double", "string", "datetime", "number":
+ break
+ default:
+ return fmt.Errorf("invalid data type '%s' in '%s'", item.DataType, item.Name)
+ }
+
+ // build nodeid
+ o.Nodes = append(o.Nodes, BuildNodeID(item))
+
+ //parse NodeIds and NodeIds errors
+ nid, niderr := ua.ParseNodeID(o.Nodes[i])
+ // build NodeIds and Errors
+ o.NodeIDs = append(o.NodeIDs, nid)
+ o.NodeIDerror = append(o.NodeIDerror, niderr)
+ // Grow NodeData for later input
+ o.NodeData = append(o.NodeData, OPCData{})
+ }
+ return nil
+}
+
+// BuildNodeID build node ID from OPC tag
+func BuildNodeID(tag OPCTag) string {
+ return "ns=" + tag.Namespace + ";" + tag.IdentifierType + "=" + tag.Identifier
+}
+
+// Connect to a OPCUA device
+func Connect(o *OpcUA) error {
+ u, err := url.Parse(o.Endpoint)
+ if err != nil {
+ return err
+ }
+
+ switch u.Scheme {
+ case "opc.tcp":
+ o.state = Connecting
+
+ if o.client != nil {
+ o.client.CloseSession()
+ }
+
+ o.client = opcua.NewClient(o.Endpoint, o.opts...)
+ ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout))
+ defer cancel()
+ if err := o.client.Connect(ctx); err != nil {
+ return fmt.Errorf("Error in Client Connection: %s", err)
+ }
+
+ regResp, err := o.client.RegisterNodes(&ua.RegisterNodesRequest{
+ NodesToRegister: o.NodeIDs,
+ })
+ if err != nil {
+ return fmt.Errorf("RegisterNodes failed: %v", err)
+ }
+
+ o.req = &ua.ReadRequest{
+ MaxAge: 2000,
+ NodesToRead: readvalues(regResp.RegisteredNodeIDs),
+ TimestampsToReturn: ua.TimestampsToReturnBoth,
+ }
+
+ err = o.getData()
+ if err != nil {
+ return fmt.Errorf("Get Data Failed: %v", err)
+ }
+
+ default:
+ return fmt.Errorf("unsupported scheme %q in endpoint. Expected opc.tcp", u.Scheme)
+ }
+ return nil
+}
+
+func (o *OpcUA) setupOptions() error {
+
+ // Get a list of the endpoints for our target server
+ endpoints, err := opcua.GetEndpoints(o.Endpoint)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if o.Certificate == "" && o.PrivateKey == "" {
+ if o.SecurityPolicy != "None" || o.SecurityMode != "None" {
+ o.Certificate, o.PrivateKey = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, (365 * 24 * time.Hour))
+ }
+ }
+
+ o.opts = generateClientOpts(endpoints, o.Certificate, o.PrivateKey, o.SecurityPolicy, o.SecurityMode, o.AuthMethod, o.Username, o.Password, time.Duration(o.RequestTimeout))
+
+ return nil
+}
+
+func (o *OpcUA) getData() error {
+ resp, err := o.client.Read(o.req)
+ if err != nil {
+ o.ReadError++
+ return fmt.Errorf("RegisterNodes Read failed: %v", err)
+ }
+ o.ReadSuccess++
+ for i, d := range resp.Results {
+ if d.Status != ua.StatusOK {
+ return fmt.Errorf("Status not OK: %v", d.Status)
+ }
+ o.NodeData[i].TagName = o.NodeList[i].Name
+ if d.Value != nil {
+ o.NodeData[i].Value = d.Value.Value()
+ o.NodeData[i].DataType = d.Value.Type()
+ }
+ o.NodeData[i].Quality = d.Status
+ o.NodeData[i].TimeStamp = d.ServerTimestamp.String()
+ o.NodeData[i].Time = d.SourceTimestamp.String()
+ }
+ return nil
+}
+
+func readvalues(ids []*ua.NodeID) []*ua.ReadValueID {
+ rvids := make([]*ua.ReadValueID, len(ids))
+ for i, v := range ids {
+ rvids[i] = &ua.ReadValueID{NodeID: v}
+ }
+ return rvids
+}
+
+func disconnect(o *OpcUA) error {
+ u, err := url.Parse(o.Endpoint)
+ if err != nil {
+ return err
+ }
+
+ o.ReadError = 0
+ o.ReadSuccess = 0
+
+ switch u.Scheme {
+ case "opc.tcp":
+ o.state = Disconnected
+ o.client.Close()
+ return nil
+ default:
+ return fmt.Errorf("invalid controller")
+ }
+}
+
+// Gather defines what data the plugin will gather.
+func (o *OpcUA) Gather(acc telegraf.Accumulator) error {
+ if o.state == Disconnected {
+ o.state = Connecting
+ err := Connect(o)
+ if err != nil {
+ o.state = Disconnected
+ return err
+ }
+ }
+
+ o.state = Connected
+
+ err := o.getData()
+ if err != nil && o.state == Connected {
+ o.state = Disconnected
+ disconnect(o)
+ return err
+ }
+
+ for i, n := range o.NodeList {
+ fields := make(map[string]interface{})
+ tags := map[string]string{
+ "name": n.Name,
+ "id": BuildNodeID(n),
+ }
+
+ fields[o.NodeData[i].TagName] = o.NodeData[i].Value
+ fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.NodeData[i].Quality))
+ acc.AddFields(o.Name, fields, tags)
+ }
+ return nil
+}
+
+// Add this plugin to telegraf
+func init() {
+ inputs.Add("opcua", func() telegraf.Input {
+ return &OpcUA{
+ Name: "localhost",
+ Endpoint: "opc.tcp://localhost:4840",
+ SecurityPolicy: "auto",
+ SecurityMode: "auto",
+ RequestTimeout: config.Duration(5 * time.Second),
+ ConnectTimeout: config.Duration(10 * time.Second),
+ Certificate: "/etc/telegraf/cert.pem",
+ PrivateKey: "/etc/telegraf/key.pem",
+ AuthMethod: "Anonymous",
+ }
+ })
+}
diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go
new file mode 100644
index 0000000000000..637ac87bc0afa
--- /dev/null
+++ b/plugins/inputs/opcua/opcua_client_test.go
@@ -0,0 +1,110 @@
+package opcua_client
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/config"
+ "github.com/stretchr/testify/require"
+)
+
+type OPCTags struct {
+ Name string
+ Namespace string
+ IdentifierType string
+ Identifier string
+ DataType string
+ Want string
+}
+
+func TestClient1(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+
+ var testopctags = []OPCTags{
+ {"ProductName", "0", "i", "2261", "string", "open62541 OPC UA Server"},
+ {"ProductUri", "0", "i", "2262", "string", "http://open62541.org"},
+ {"ManufacturerName", "0", "i", "2263", "string", "open62541"},
+ }
+
+ var o OpcUA
+ var err error
+
+ o.Name = "testing"
+ o.Endpoint = "opc.tcp://opcua.rocks:4840"
+ o.AuthMethod = "Anonymous"
+ o.ConnectTimeout = config.Duration(10 * time.Second)
+ o.RequestTimeout = config.Duration(1 * time.Second)
+ o.SecurityPolicy = "None"
+ o.SecurityMode = "None"
+ for _, tags := range testopctags {
+ o.NodeList = append(o.NodeList, MapOPCTag(tags))
+ }
+ err = o.Init()
+ if err != nil {
+ t.Errorf("Initialize Error: %s", err)
+ }
+ err = Connect(&o)
+ if err != nil {
+ t.Fatalf("Connect Error: %s", err)
+ }
+
+ for i, v := range o.NodeData {
+ if v.Value != nil {
+ types := reflect.TypeOf(v.Value)
+ value := reflect.ValueOf(v.Value)
+ compare := fmt.Sprintf("%v", value.Interface())
+ if compare != testopctags[i].Want {
+ t.Errorf("Tag %s: Values %v for type %s does not match record", o.NodeList[i].Name, value.Interface(), types)
+ }
+ } else {
+ t.Errorf("Tag: %s has value: %v", o.NodeList[i].Name, v.Value)
+ }
+ }
+}
+
+func MapOPCTag(tags OPCTags) (out OPCTag) {
+ out.Name = tags.Name
+ out.Namespace = tags.Namespace
+ out.IdentifierType = tags.IdentifierType
+ out.Identifier = tags.Identifier
+ out.DataType = tags.DataType
+ return out
+}
+
+func TestConfig(t *testing.T) {
+ toml := `
+[[inputs.opcua]]
+name = "localhost"
+endpoint = "opc.tcp://localhost:4840"
+connect_timeout = "10s"
+request_timeout = "5s"
+security_policy = "auto"
+security_mode = "auto"
+certificate = "/etc/telegraf/cert.pem"
+private_key = "/etc/telegraf/key.pem"
+auth_method = "Anonymous"
+username = ""
+password = ""
+nodes = [
+ {name="name", namespace="", identifier_type="", identifier="", data_type="", description=""},
+ {name="name2", namespace="", identifier_type="", identifier="", data_type="", description=""},
+]
+`
+
+ c := config.NewConfig()
+ err := c.LoadConfigData([]byte(toml))
+ require.NoError(t, err)
+
+ require.Len(t, c.Inputs, 1)
+
+ o, ok := c.Inputs[0].Input.(*OpcUA)
+ require.True(t, ok)
+
+ require.Len(t, o.NodeList, 2)
+ require.Equal(t, o.NodeList[0].Name, "name")
+ require.Equal(t, o.NodeList[1].Name, "name2")
+}
diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go
new file mode 100644
index 0000000000000..c0eac2483eb22
--- /dev/null
+++ b/plugins/inputs/opcua/opcua_util.go
@@ -0,0 +1,332 @@
+package opcua_client
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/big"
+ "net"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/gopcua/opcua"
+ "github.com/gopcua/opcua/debug"
+ "github.com/gopcua/opcua/ua"
+ "github.com/pkg/errors"
+)
+
+// SELF SIGNED CERT FUNCTIONS
+
+func newTempDir() (string, error) {
+ dir, err := ioutil.TempDir("", "ssc")
+ return dir, err
+}
+
+func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.Duration) (string, string) {
+
+ dir, _ := newTempDir()
+
+ if len(host) == 0 {
+ log.Fatalf("Missing required host parameter")
+ }
+ if rsaBits == 0 {
+ rsaBits = 2048
+ }
+ if len(certFile) == 0 {
+ certFile = fmt.Sprintf("%s/cert.pem", dir)
+ }
+ if len(keyFile) == 0 {
+ keyFile = fmt.Sprintf("%s/key.pem", dir)
+ }
+
+ priv, err := rsa.GenerateKey(rand.Reader, rsaBits)
+ if err != nil {
+ log.Fatalf("failed to generate private key: %s", err)
+ }
+
+ notBefore := time.Now()
+ notAfter := notBefore.Add(dur)
+
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ log.Fatalf("failed to generate serial number: %s", err)
+ }
+
+ template := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ Organization: []string{"Telegraf OPC UA client"},
+ },
+ NotBefore: notBefore,
+ NotAfter: notAfter,
+
+ KeyUsage: x509.KeyUsageContentCommitment | x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageDataEncipherment | x509.KeyUsageCertSign,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
+ BasicConstraintsValid: true,
+ }
+
+ hosts := strings.Split(host, ",")
+ for _, h := range hosts {
+ if ip := net.ParseIP(h); ip != nil {
+ template.IPAddresses = append(template.IPAddresses, ip)
+ } else {
+ template.DNSNames = append(template.DNSNames, h)
+ }
+ if uri, err := url.Parse(h); err == nil {
+ template.URIs = append(template.URIs, uri)
+ }
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv)
+ if err != nil {
+ log.Fatalf("Failed to create certificate: %s", err)
+ }
+
+ certOut, err := os.Create(certFile)
+ if err != nil {
+ log.Fatalf("failed to open %s for writing: %s", certFile, err)
+ }
+ if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
+ log.Fatalf("failed to write data to %s: %s", certFile, err)
+ }
+ if err := certOut.Close(); err != nil {
+ log.Fatalf("error closing %s: %s", certFile, err)
+ }
+
+ keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+ if err != nil {
+ log.Printf("failed to open %s for writing: %s", keyFile, err)
+ return "", ""
+ }
+ if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil {
+ log.Fatalf("failed to write data to %s: %s", keyFile, err)
+ }
+ if err := keyOut.Close(); err != nil {
+ log.Fatalf("error closing %s: %s", keyFile, err)
+ }
+
+ return certFile, keyFile
+}
+
+func publicKey(priv interface{}) interface{} {
+ switch k := priv.(type) {
+ case *rsa.PrivateKey:
+ return &k.PublicKey
+ case *ecdsa.PrivateKey:
+ return &k.PublicKey
+ default:
+ return nil
+ }
+}
+
+func pemBlockForKey(priv interface{}) *pem.Block {
+ switch k := priv.(type) {
+ case *rsa.PrivateKey:
+ return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
+ case *ecdsa.PrivateKey:
+ b, err := x509.MarshalECPrivateKey(k)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err)
+ os.Exit(2)
+ }
+ return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
+ default:
+ return nil
+ }
+}
+
+// OPT FUNCTIONS
+
+func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, policy, mode, auth, username, password string, requestTimeout time.Duration) []opcua.Option {
+ opts := []opcua.Option{}
+ appuri := "urn:telegraf:gopcua:client"
+ appname := "Telegraf"
+
+ // ApplicationURI is automatically read from the cert so is not required if a cert if provided
+ opts = append(opts, opcua.ApplicationURI(appuri))
+ opts = append(opts, opcua.ApplicationName(appname))
+
+ opts = append(opts, opcua.RequestTimeout(requestTimeout))
+
+ if certFile == "" && keyFile == "" {
+ if policy != "None" || mode != "None" {
+ certFile, keyFile = generateCert(appuri, 2048, certFile, keyFile, (365 * 24 * time.Hour))
+ }
+ }
+
+ var cert []byte
+ if certFile != "" && keyFile != "" {
+ debug.Printf("Loading cert/key from %s/%s", certFile, keyFile)
+ c, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ log.Printf("Failed to load certificate: %s", err)
+ } else {
+ pk, ok := c.PrivateKey.(*rsa.PrivateKey)
+ if !ok {
+ log.Fatalf("Invalid private key")
+ }
+ cert = c.Certificate[0]
+ opts = append(opts, opcua.PrivateKey(pk), opcua.Certificate(cert))
+ }
+ }
+
+ var secPolicy string
+ switch {
+ case policy == "auto":
+ // set it later
+ case strings.HasPrefix(policy, ua.SecurityPolicyURIPrefix):
+ secPolicy = policy
+ policy = ""
+ case policy == "None" || policy == "Basic128Rsa15" || policy == "Basic256" || policy == "Basic256Sha256" || policy == "Aes128_Sha256_RsaOaep" || policy == "Aes256_Sha256_RsaPss":
+ secPolicy = ua.SecurityPolicyURIPrefix + policy
+ policy = ""
+ default:
+ log.Fatalf("Invalid security policy: %s", policy)
+ }
+
+ // Select the most appropriate authentication mode from server capabilities and user input
+ authMode, authOption := generateAuth(auth, cert, username, password)
+ opts = append(opts, authOption)
+
+ var secMode ua.MessageSecurityMode
+ switch strings.ToLower(mode) {
+ case "auto":
+ case "none":
+ secMode = ua.MessageSecurityModeNone
+ mode = ""
+ case "sign":
+ secMode = ua.MessageSecurityModeSign
+ mode = ""
+ case "signandencrypt":
+ secMode = ua.MessageSecurityModeSignAndEncrypt
+ mode = ""
+ default:
+ log.Fatalf("Invalid security mode: %s", mode)
+ }
+
+ // Allow input of only one of sec-mode,sec-policy when choosing 'None'
+ if secMode == ua.MessageSecurityModeNone || secPolicy == ua.SecurityPolicyURINone {
+ secMode = ua.MessageSecurityModeNone
+ secPolicy = ua.SecurityPolicyURINone
+ }
+
+ // Find the best endpoint based on our input and server recommendation (highest SecurityMode+SecurityLevel)
+ var serverEndpoint *ua.EndpointDescription
+ switch {
+ case mode == "auto" && policy == "auto": // No user selection, choose best
+ for _, e := range endpoints {
+ if serverEndpoint == nil || (e.SecurityMode >= serverEndpoint.SecurityMode && e.SecurityLevel >= serverEndpoint.SecurityLevel) {
+ serverEndpoint = e
+ }
+ }
+
+ case mode != "auto" && policy == "auto": // User only cares about mode, select highest securitylevel with that mode
+ for _, e := range endpoints {
+ if e.SecurityMode == secMode && (serverEndpoint == nil || e.SecurityLevel >= serverEndpoint.SecurityLevel) {
+ serverEndpoint = e
+ }
+ }
+
+ case mode == "auto" && policy != "auto": // User only cares about policy, select highest securitylevel with that policy
+ for _, e := range endpoints {
+ if e.SecurityPolicyURI == secPolicy && (serverEndpoint == nil || e.SecurityLevel >= serverEndpoint.SecurityLevel) {
+ serverEndpoint = e
+ }
+ }
+
+ default: // User cares about both
+ for _, e := range endpoints {
+ if e.SecurityPolicyURI == secPolicy && e.SecurityMode == secMode && (serverEndpoint == nil || e.SecurityLevel >= serverEndpoint.SecurityLevel) {
+ serverEndpoint = e
+ }
+ }
+ }
+
+ if serverEndpoint == nil { // Didn't find an endpoint with matching policy and mode.
+ log.Printf("unable to find suitable server endpoint with selected sec-policy and sec-mode")
+ log.Fatalf("quitting")
+ }
+
+ secPolicy = serverEndpoint.SecurityPolicyURI
+ secMode = serverEndpoint.SecurityMode
+
+ // Check that the selected endpoint is a valid combo
+ err := validateEndpointConfig(endpoints, secPolicy, secMode, authMode)
+ if err != nil {
+ log.Fatalf("error validating input: %s", err)
+ }
+
+ opts = append(opts, opcua.SecurityFromEndpoint(serverEndpoint, authMode))
+ return opts
+}
+
+func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option) {
+ var err error
+
+ var authMode ua.UserTokenType
+ var authOption opcua.Option
+ switch strings.ToLower(a) {
+ case "anonymous":
+ authMode = ua.UserTokenTypeAnonymous
+ authOption = opcua.AuthAnonymous()
+
+ case "username":
+ authMode = ua.UserTokenTypeUserName
+
+ if un == "" {
+ if err != nil {
+ log.Fatalf("error reading username input: %s", err)
+ }
+ }
+
+ if pw == "" {
+ if err != nil {
+ log.Fatalf("error reading username input: %s", err)
+ }
+ }
+
+ authOption = opcua.AuthUsername(un, pw)
+
+ case "certificate":
+ authMode = ua.UserTokenTypeCertificate
+ authOption = opcua.AuthCertificate(cert)
+
+ case "issuedtoken":
+ // todo: this is unsupported, fail here or fail in the opcua package?
+ authMode = ua.UserTokenTypeIssuedToken
+ authOption = opcua.AuthIssuedToken([]byte(nil))
+
+ default:
+ log.Printf("unknown auth-mode, defaulting to Anonymous")
+ authMode = ua.UserTokenTypeAnonymous
+ authOption = opcua.AuthAnonymous()
+
+ }
+
+ return authMode, authOption
+}
+
+func validateEndpointConfig(endpoints []*ua.EndpointDescription, secPolicy string, secMode ua.MessageSecurityMode, authMode ua.UserTokenType) error {
+ for _, e := range endpoints {
+ if e.SecurityMode == secMode && e.SecurityPolicyURI == secPolicy {
+ for _, t := range e.UserIdentityTokens {
+ if t.TokenType == authMode {
+ return nil
+ }
+ }
+ }
+ }
+
+ err := errors.Errorf("server does not support an endpoint with security : %s , %s", secPolicy, secMode)
+ return err
+}
diff --git a/plugins/inputs/openldap/README.md b/plugins/inputs/openldap/README.md
index 48f29cb60d77f..fcb175bd430f8 100644
--- a/plugins/inputs/openldap/README.md
+++ b/plugins/inputs/openldap/README.md
@@ -1,4 +1,4 @@
-# Openldap Input Plugin
+# OpenLDAP Input Plugin
This plugin gathers metrics from OpenLDAP's cn=Monitor backend.
@@ -35,7 +35,9 @@ To use this plugin you must enable the [slapd monitoring](https://www.openldap.o
All **monitorCounter**, **monitoredInfo**, **monitorOpInitiated**, and **monitorOpCompleted** attributes are gathered based on this LDAP query:
-```(|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject))```
+```
+(|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject))
+```
Metric names are based on their entry DN with the cn=Monitor base removed. If `reverse_metric_names` is not set, metrics are based on their DN. If `reverse_metric_names` is set to `true`, the names are reversed. This is recommended as it allows the names to sort more naturally.
diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go
index 9e69c8a211924..bafb5db892414 100644
--- a/plugins/inputs/openldap/openldap.go
+++ b/plugins/inputs/openldap/openldap.go
@@ -5,11 +5,10 @@ import (
"strconv"
"strings"
- "gopkg.in/ldap.v2"
-
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
+ "gopkg.in/ldap.v3"
)
type Openldap struct {
@@ -57,6 +56,12 @@ var attrTranslate = map[string]string{
"monitoredInfo": "",
"monitorOpInitiated": "_initiated",
"monitorOpCompleted": "_completed",
+ "olmMDBPagesMax": "_mdb_pages_max",
+ "olmMDBPagesUsed": "_mdb_pages_used",
+ "olmMDBPagesFree": "_mdb_pages_free",
+ "olmMDBReadersMax": "_mdb_readers_max",
+ "olmMDBReadersUsed": "_mdb_readers_used",
+ "olmMDBEntries": "_mdb_entries",
}
func (o *Openldap) SampleConfig() string {
diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go
index 10835896fbbc8..76d9cc3a9dd42 100644
--- a/plugins/inputs/openldap/openldap_test.go
+++ b/plugins/inputs/openldap/openldap_test.go
@@ -4,11 +4,10 @@ import (
"strconv"
"testing"
- "gopkg.in/ldap.v2"
-
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "gopkg.in/ldap.v3"
)
func TestOpenldapMockResult(t *testing.T) {
diff --git a/plugins/inputs/openntpd/README.md b/plugins/inputs/openntpd/README.md
new file mode 100644
index 0000000000000..877c3a46092b1
--- /dev/null
+++ b/plugins/inputs/openntpd/README.md
@@ -0,0 +1,93 @@
+# OpenNTPD Input Plugin
+
+Get standard NTP query metrics from [OpenNTPD][] using the ntpctl command.
+
+[OpenNTPD]: http://www.openntpd.org/
+
+Below is the documentation of the various headers returned from the NTP query
+command when running `ntpctl -s peers`.
+
+- remote – The remote peer or server being synced to.
+- wt – the peer weight
+- tl – the peer trust level
+- st (stratum) – The remote peer or server Stratum
+- next – number of seconds until the next poll
+- poll – polling interval in seconds
+- delay – Round trip communication delay to the remote peer
+or server (milliseconds);
+- offset – Mean offset (phase) in the times reported between this local host and
+the remote peer or server (RMS, milliseconds);
+- jitter – Mean deviation (jitter) in the time reported for that remote peer or
+server (RMS of difference of multiple time samples, milliseconds);
+
+### Configuration
+
+```toml
+[[inputs.openntpd]]
+ ## Run ntpctl binary with sudo.
+ # use_sudo = false
+
+ ## Location of the ntpctl binary.
+ # binary = "/usr/sbin/ntpctl"
+
+ ## Maximum time the ntpctl binary is allowed to run.
+ # timeout = "5ms"
+```
+
+### Metrics
+
+- ntpctl
+ - tags:
+ - remote
+ - stratum
+ - fields:
+ - delay (float, milliseconds)
+ - jitter (float, milliseconds)
+ - offset (float, milliseconds)
+ - poll (int, seconds)
+ - next (int, seconds)
+ - wt (int)
+ - tl (int)
+
+### Permissions
+
+It's important to note that this plugin references ntpctl, which may require
+additional permissions to execute successfully.
+Depending on the user/group permissions of the telegraf user executing this
+plugin, you may need to alter the group membership, set facls, or use sudo.
+
+**Group membership (Recommended)**:
+```bash
+$ groups telegraf
+telegraf : telegraf
+
+$ usermod -a -G ntpd telegraf
+
+$ groups telegraf
+telegraf : telegraf ntpd
+```
+
+**Sudo privileges**:
+If you use this method, you will need the following in your telegraf config:
+```toml
+[[inputs.openntpd]]
+ use_sudo = true
+```
+
+You will also need to update your sudoers file:
+```bash
+$ visudo
+# Add the following lines:
+Cmnd_Alias NTPCTL = /usr/sbin/ntpctl
+telegraf ALL=(ALL) NOPASSWD: NTPCTL
+Defaults!NTPCTL !logfile, !syslog, !pam_session
+```
+
+Please use the solution you see as most appropriate.
+
+### Example Output
+
+```
+openntpd,remote=194.57.169.1,stratum=2,host=localhost tl=10i,poll=1007i,
+offset=2.295,jitter=3.896,delay=53.766,next=266i,wt=1i 1514454299000000000
+```
diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go
new file mode 100644
index 0000000000000..e7723b480a581
--- /dev/null
+++ b/plugins/inputs/openntpd/openntpd.go
@@ -0,0 +1,223 @@
+package openntpd
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os/exec"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// Mapping of ntpctl header names to tag keys
+var tagHeaders = map[string]string{
+ "st": "stratum",
+}
+
+// Mapping of the ntpctl tag key to the index in the command output
+var tagI = map[string]int{
+ "stratum": 2,
+}
+
+// Mapping of float metrics to their index in the command output
+var floatI = map[string]int{
+ "offset": 5,
+ "delay": 6,
+ "jitter": 7,
+}
+
+// Mapping of int metrics to their index in the command output
+var intI = map[string]int{
+ "wt": 0,
+ "tl": 1,
+ "next": 3,
+ "poll": 4,
+}
+
+type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error)
+
+// Openntpd is used to store configuration values
+type Openntpd struct {
+ Binary string
+ Timeout internal.Duration
+ UseSudo bool
+
+ filter filter.Filter
+ run runner
+}
+
+var defaultBinary = "/usr/sbin/ntpctl"
+var defaultTimeout = internal.Duration{Duration: 5 * time.Second}
+
+func (n *Openntpd) Description() string {
+ return "Get standard NTP query metrics from OpenNTPD."
+}
+
+func (n *Openntpd) SampleConfig() string {
+ return `
+ ## Run ntpctl binary with sudo.
+ # use_sudo = false
+
+ ## Location of the ntpctl binary.
+ # binary = "/usr/sbin/ntpctl"
+
+ ## Maximum time the ntpctl binary is allowed to run.
+ # timeout = "5ms"
+ `
+}
+
+// Shell out to ntpctl and return the output
+func openntpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) {
+ cmdArgs := []string{"-s", "peers"}
+
+ cmd := exec.Command(cmdName, cmdArgs...)
+
+ if UseSudo {
+ cmdArgs = append([]string{cmdName}, cmdArgs...)
+ cmd = exec.Command("sudo", cmdArgs...)
+ }
+
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := internal.RunTimeout(cmd, Timeout.Duration)
+ if err != nil {
+ return &out, fmt.Errorf("error running ntpctl: %s", err)
+ }
+
+ return &out, nil
+}
+
+func (n *Openntpd) Gather(acc telegraf.Accumulator) error {
+ out, err := n.run(n.Binary, n.Timeout, n.UseSudo)
+ if err != nil {
+ return fmt.Errorf("error gathering metrics: %s", err)
+ }
+
+ lineCounter := 0
+ scanner := bufio.NewScanner(out)
+ for scanner.Scan() {
+ // skip first (peer) and second (field list) line
+ if lineCounter < 2 {
+ lineCounter++
+ continue
+ }
+
+ line := scanner.Text()
+
+ fields := strings.Fields(line)
+
+ mFields := make(map[string]interface{})
+ tags := make(map[string]string)
+
+ // Even line ---> ntp server info
+ if lineCounter%2 == 0 {
+ // DNS resolution error ---> keep DNS name as remote name
+ if fields[0] != "not" {
+ tags["remote"] = fields[0]
+ } else {
+ tags["remote"] = fields[len(fields)-1]
+ }
+ }
+
+ // Read next line - Odd line ---> ntp server stats
+ scanner.Scan()
+ line = scanner.Text()
+ lineCounter++
+
+ fields = strings.Fields(line)
+
+ // if there is an ntpctl state prefix, remove it and make it it's own tag
+ if strings.ContainsAny(string(fields[0]), "*") {
+ tags["state_prefix"] = string(fields[0])
+ fields = fields[1:]
+ }
+
+ // Get tags from output
+ for key, index := range tagI {
+ if index >= len(fields) {
+ continue
+ }
+ tags[key] = fields[index]
+ }
+
+ // Get integer metrics from output
+ for key, index := range intI {
+ if index >= len(fields) {
+ continue
+ }
+ if fields[index] == "-" {
+ continue
+ }
+
+ if key == "next" || key == "poll" {
+
+ m, err := strconv.ParseInt(strings.TrimSuffix(fields[index], "s"), 10, 64)
+ if err != nil {
+ acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index]))
+ continue
+ }
+ mFields[key] = m
+
+ } else {
+
+ m, err := strconv.ParseInt(fields[index], 10, 64)
+ if err != nil {
+ acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index]))
+ continue
+ }
+ mFields[key] = m
+ }
+ }
+
+ // get float metrics from output
+ for key, index := range floatI {
+ if len(fields) <= index {
+ continue
+ }
+ if fields[index] == "-" || fields[index] == "----" || fields[index] == "peer" || fields[index] == "not" || fields[index] == "valid" {
+ continue
+ }
+
+ if key == "offset" || key == "delay" || key == "jitter" {
+
+ m, err := strconv.ParseFloat(strings.TrimSuffix(fields[index], "ms"), 64)
+ if err != nil {
+ acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index]))
+ continue
+ }
+ mFields[key] = m
+
+ } else {
+
+ m, err := strconv.ParseFloat(fields[index], 64)
+ if err != nil {
+ acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index]))
+ continue
+ }
+ mFields[key] = m
+
+ }
+ }
+ acc.AddFields("openntpd", mFields, tags)
+
+ lineCounter++
+ }
+ return nil
+}
+
+func init() {
+ inputs.Add("openntpd", func() telegraf.Input {
+ return &Openntpd{
+ run: openntpdRunner,
+ Binary: defaultBinary,
+ Timeout: defaultTimeout,
+ UseSudo: false,
+ }
+ })
+}
diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go
new file mode 100644
index 0000000000000..d629949a533c4
--- /dev/null
+++ b/plugins/inputs/openntpd/openntpd_test.go
@@ -0,0 +1,329 @@
+package openntpd
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+)
+
+var TestTimeout = internal.Duration{Duration: time.Second}
+
+func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) {
+ return func(string, internal.Duration, bool) (*bytes.Buffer, error) {
+ return bytes.NewBuffer([]byte(output)), nil
+ }
+}
+
+func TestParseSimpleOutput(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ v := &Openntpd{
+ run: OpenntpdCTL(simpleOutput, TestTimeout, false),
+ }
+ err := v.Gather(acc)
+
+ assert.NoError(t, err)
+ assert.True(t, acc.HasMeasurement("openntpd"))
+ assert.Equal(t, acc.NMetrics(), uint64(1))
+
+ assert.Equal(t, acc.NFields(), 7)
+
+ firstpeerfields := map[string]interface{}{
+ "wt": int64(1),
+ "tl": int64(10),
+ "next": int64(56),
+ "poll": int64(63),
+ "offset": float64(9.271),
+ "delay": float64(44.662),
+ "jitter": float64(2.678),
+ }
+
+ firstpeertags := map[string]string{
+ "remote": "212.129.9.36",
+ "stratum": "3",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags)
+}
+
+func TestParseSimpleOutputwithStatePrefix(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ v := &Openntpd{
+ run: OpenntpdCTL(simpleOutputwithStatePrefix, TestTimeout, false),
+ }
+ err := v.Gather(acc)
+
+ assert.NoError(t, err)
+ assert.True(t, acc.HasMeasurement("openntpd"))
+ assert.Equal(t, acc.NMetrics(), uint64(1))
+
+ assert.Equal(t, acc.NFields(), 7)
+
+ firstpeerfields := map[string]interface{}{
+ "wt": int64(1),
+ "tl": int64(10),
+ "next": int64(45),
+ "poll": int64(980),
+ "offset": float64(-9.901),
+ "delay": float64(67.573),
+ "jitter": float64(29.350),
+ }
+
+ firstpeertags := map[string]string{
+ "remote": "92.243.6.5",
+ "stratum": "2",
+ "state_prefix": "*",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags)
+}
+
+func TestParseSimpleOutputInvalidPeer(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ v := &Openntpd{
+ run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false),
+ }
+ err := v.Gather(acc)
+
+ assert.NoError(t, err)
+ assert.True(t, acc.HasMeasurement("openntpd"))
+ assert.Equal(t, acc.NMetrics(), uint64(1))
+
+ assert.Equal(t, acc.NFields(), 4)
+
+ firstpeerfields := map[string]interface{}{
+ "wt": int64(1),
+ "tl": int64(2),
+ "next": int64(203),
+ "poll": int64(300),
+ }
+
+ firstpeertags := map[string]string{
+ "remote": "178.33.111.49",
+ "stratum": "-",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags)
+}
+
+func TestParseSimpleOutputServersDNSError(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ v := &Openntpd{
+ run: OpenntpdCTL(simpleOutputServersDNSError, TestTimeout, false),
+ }
+ err := v.Gather(acc)
+
+ assert.NoError(t, err)
+ assert.True(t, acc.HasMeasurement("openntpd"))
+ assert.Equal(t, acc.NMetrics(), uint64(1))
+
+ assert.Equal(t, acc.NFields(), 4)
+
+ firstpeerfields := map[string]interface{}{
+ "next": int64(2),
+ "poll": int64(15),
+ "wt": int64(1),
+ "tl": int64(2),
+ }
+
+ firstpeertags := map[string]string{
+ "remote": "pool.nl.ntp.org",
+ "stratum": "-",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags)
+
+ secondpeerfields := map[string]interface{}{
+ "next": int64(2),
+ "poll": int64(15),
+ "wt": int64(1),
+ "tl": int64(2),
+ }
+
+ secondpeertags := map[string]string{
+ "remote": "pool.nl.ntp.org",
+ "stratum": "-",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", secondpeerfields, secondpeertags)
+}
+
+func TestParseSimpleOutputServerDNSError(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ v := &Openntpd{
+ run: OpenntpdCTL(simpleOutputServerDNSError, TestTimeout, false),
+ }
+ err := v.Gather(acc)
+
+ assert.NoError(t, err)
+ assert.True(t, acc.HasMeasurement("openntpd"))
+ assert.Equal(t, acc.NMetrics(), uint64(1))
+
+ assert.Equal(t, acc.NFields(), 4)
+
+ firstpeerfields := map[string]interface{}{
+ "next": int64(12),
+ "poll": int64(15),
+ "wt": int64(1),
+ "tl": int64(2),
+ }
+
+ firstpeertags := map[string]string{
+ "remote": "pool.fr.ntp.org",
+ "stratum": "-",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags)
+}
+
+func TestParseFullOutput(t *testing.T) {
+ acc := &testutil.Accumulator{}
+ v := &Openntpd{
+ run: OpenntpdCTL(fullOutput, TestTimeout, false),
+ }
+ err := v.Gather(acc)
+
+ assert.NoError(t, err)
+ assert.True(t, acc.HasMeasurement("openntpd"))
+ assert.Equal(t, acc.NMetrics(), uint64(20))
+
+ assert.Equal(t, acc.NFields(), 113)
+
+ firstpeerfields := map[string]interface{}{
+ "wt": int64(1),
+ "tl": int64(10),
+ "next": int64(56),
+ "poll": int64(63),
+ "offset": float64(9.271),
+ "delay": float64(44.662),
+ "jitter": float64(2.678),
+ }
+
+ firstpeertags := map[string]string{
+ "remote": "212.129.9.36",
+ "stratum": "3",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags)
+
+ secondpeerfields := map[string]interface{}{
+ "wt": int64(1),
+ "tl": int64(10),
+ "next": int64(21),
+ "poll": int64(64),
+ "offset": float64(-0.103),
+ "delay": float64(53.199),
+ "jitter": float64(9.046),
+ }
+
+ secondpeertags := map[string]string{
+ "remote": "163.172.25.19",
+ "stratum": "2",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", secondpeerfields, secondpeertags)
+
+ thirdpeerfields := map[string]interface{}{
+ "wt": int64(1),
+ "tl": int64(10),
+ "next": int64(45),
+ "poll": int64(980),
+ "offset": float64(-9.901),
+ "delay": float64(67.573),
+ "jitter": float64(29.350),
+ }
+
+ thirdpeertags := map[string]string{
+ "remote": "92.243.6.5",
+ "stratum": "2",
+ "state_prefix": "*",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", thirdpeerfields, thirdpeertags)
+
+ fourthpeerfields := map[string]interface{}{
+ "wt": int64(1),
+ "tl": int64(2),
+ "next": int64(203),
+ "poll": int64(300),
+ }
+
+ fourthpeertags := map[string]string{
+ "remote": "178.33.111.49",
+ "stratum": "-",
+ }
+
+ acc.AssertContainsTaggedFields(t, "openntpd", fourthpeerfields, fourthpeertags)
+}
+
+var simpleOutput = `peer
+wt tl st next poll offset delay jitter
+212.129.9.36 from pool 0.debian.pool.ntp.org
+1 10 3 56s 63s 9.271ms 44.662ms 2.678ms`
+
+var simpleOutputwithStatePrefix = `peer
+wt tl st next poll offset delay jitter
+92.243.6.5 from pool 0.debian.pool.ntp.org
+* 1 10 2 45s 980s -9.901ms 67.573ms 29.350ms`
+
+var simpleOutputInvalidPeer = `peer
+wt tl st next poll offset delay jitter
+178.33.111.49 from pool 0.debian.pool.ntp.org
+1 2 - 203s 300s ---- peer not valid ----`
+
+var simpleOutputServersDNSError = `peer
+wt tl st next poll offset delay jitter
+not resolved from pool pool.nl.ntp.org
+1 2 - 2s 15s ---- peer not valid ----
+`
+var simpleOutputServerDNSError = `peer
+wt tl st next poll offset delay jitter
+not resolved pool.fr.ntp.org
+1 2 - 12s 15s ---- peer not valid ----
+`
+
+var fullOutput = `peer
+wt tl st next poll offset delay jitter
+212.129.9.36 from pool 0.debian.pool.ntp.org
+1 10 3 56s 63s 9.271ms 44.662ms 2.678ms
+163.172.25.19 from pool 0.debian.pool.ntp.org
+1 10 2 21s 64s -0.103ms 53.199ms 9.046ms
+92.243.6.5 from pool 0.debian.pool.ntp.org
+* 1 10 2 45s 980s -9.901ms 67.573ms 29.350ms
+178.33.111.49 from pool 0.debian.pool.ntp.org
+1 2 - 203s 300s ---- peer not valid ----
+62.210.122.129 from pool 1.debian.pool.ntp.org
+1 10 3 4s 60s 5.372ms 53.690ms 14.700ms
+163.172.225.159 from pool 1.debian.pool.ntp.org
+1 10 3 38s 61s 12.276ms 40.631ms 1.282ms
+5.196.192.58 from pool 1.debian.pool.ntp.org
+1 2 - 0s 300s ---- peer not valid ----
+129.250.35.250 from pool 1.debian.pool.ntp.org
+1 10 2 28s 63s 11.236ms 43.874ms 1.381ms
+2001:41d0:a:5a7::1 from pool 2.debian.pool.ntp.org
+1 2 - 5s 15s ---- peer not valid ----
+2001:41d0:8:188d::16 from pool 2.debian.pool.ntp.org
+1 2 - 3s 15s ---- peer not valid ----
+2001:4b98:dc0:41:216:3eff:fe69:46e3 from pool 2.debian.pool.ntp.org
+1 2 - 14s 15s ---- peer not valid ----
+2a01:e0d:1:3:58bf:fa61:0:1 from pool 2.debian.pool.ntp.org
+1 2 - 9s 15s ---- peer not valid ----
+163.172.179.38 from pool 2.debian.pool.ntp.org
+1 10 2 51s 65s -19.229ms 85.404ms 48.734ms
+5.135.3.88 from pool 2.debian.pool.ntp.org
+1 2 - 173s 300s ---- peer not valid ----
+195.154.41.195 from pool 2.debian.pool.ntp.org
+1 10 2 84s 1004s -3.956ms 54.549ms 13.658ms
+62.210.81.130 from pool 2.debian.pool.ntp.org
+1 10 2 158s 1043s -42.593ms 124.353ms 94.230ms
+149.202.97.123 from pool 3.debian.pool.ntp.org
+1 2 - 205s 300s ---- peer not valid ----
+51.15.175.224 from pool 3.debian.pool.ntp.org
+1 10 2 9s 64s 8.861ms 46.640ms 0.668ms
+37.187.5.167 from pool 3.debian.pool.ntp.org
+1 2 - 105s 300s ---- peer not valid ----
+194.57.169.1 from pool 3.debian.pool.ntp.org
+1 10 2 32s 63s 6.589ms 52.051ms 2.057ms`
diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md
index 4c1949869b12e..5bbd4be89658a 100644
--- a/plugins/inputs/opensmtpd/README.md
+++ b/plugins/inputs/opensmtpd/README.md
@@ -12,7 +12,7 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server-
## The default location of the smtpctl binary can be overridden with:
binary = "/usr/sbin/smtpctl"
- # The default timeout of 1s can be overriden with:
+ # The default timeout of 1s can be overridden with:
#timeout = "1s"
```
diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go
index 1c0e5690d1197..c3f76f2efa850 100644
--- a/plugins/inputs/opensmtpd/opensmtpd.go
+++ b/plugins/inputs/opensmtpd/opensmtpd.go
@@ -37,7 +37,7 @@ var sampleConfig = `
## The default location of the smtpctl binary can be overridden with:
binary = "/usr/sbin/smtpctl"
- ## The default timeout of 1000ms can be overriden with (in milliseconds):
+ ## The default timeout of 1000ms can be overridden with (in milliseconds):
timeout = 1000
`
diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md
new file mode 100644
index 0000000000000..85803f76ab046
--- /dev/null
+++ b/plugins/inputs/openweathermap/README.md
@@ -0,0 +1,82 @@
+# OpenWeatherMap Input Plugin
+
+Collect current weather and forecast data from OpenWeatherMap.
+
+To use this plugin you will need an [api key][] (app_id).
+
+City identifiers can be found in the [city list][]. Alternately you
+can [search][] by name; the `city_id` can be found as the last digits
+of the URL: https://openweathermap.org/city/2643743. Language
+identifiers can be found in the [lang list][]. Documentation for
+condition ID, icon, and main is at [weather conditions][].
+
+### Configuration
+
+```toml
+[[inputs.openweathermap]]
+ ## OpenWeatherMap API key.
+ app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+
+ ## City ID's to collect weather data from.
+ city_id = ["5391959"]
+
+ ## Language of the description field. Can be one of "ar", "bg",
+ ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu",
+ ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru",
+ ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw"
+ # lang = "en"
+
+ ## APIs to fetch; can contain "weather" or "forecast".
+ fetch = ["weather", "forecast"]
+
+ ## OpenWeatherMap base URL
+ # base_url = "https://api.openweathermap.org/"
+
+ ## Timeout for HTTP response.
+ # response_timeout = "5s"
+
+ ## Preferred unit system for temperature and wind speed. Can be one of
+ ## "metric", "imperial", or "standard".
+ # units = "metric"
+
+ ## Query interval; OpenWeatherMap weather data is updated every 10
+ ## minutes.
+ interval = "10m"
+```
+
+### Metrics
+
+- weather
+ - tags:
+ - city_id
+ - forecast
+ - condition_id
+ - condition_main
+ - fields:
+ - cloudiness (int, percent)
+ - humidity (int, percent)
+ - pressure (float, atmospheric pressure hPa)
+ - rain (float, rain volume for the last 1-3 hours (depending on API response) in mm)
+ - sunrise (int, nanoseconds since unix epoch)
+ - sunset (int, nanoseconds since unix epoch)
+ - temperature (float, degrees)
+ - visibility (int, meters, not available on forecast data)
+ - wind_degrees (float, wind direction in degrees)
+ - wind_speed (float, wind speed in meters/sec or miles/sec)
+ - condition_description (string, localized long description)
+ - condition_icon
+
+
+### Example Output
+
+```
+> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=* cloudiness=1i,condition_description="clear sky",condition_icon="01d",humidity=35i,pressure=1012,rain=0,sunrise=1570630329000000000i,sunset=1570671689000000000i,temperature=21.52,visibility=16093i,wind_degrees=280,wind_speed=5.7 1570659256000000000
+> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=3h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=41i,pressure=1010,rain=0,temperature=22.34,wind_degrees=249.393,wind_speed=2.085 1570665600000000000
+> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=6h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=50i,pressure=1012,rain=0,temperature=17.09,wind_degrees=310.754,wind_speed=3.009 1570676400000000000
+```
+
+[api key]: https://openweathermap.org/appid
+[city list]: http://bulk.openweathermap.org/sample/city.list.json.gz
+[search]: https://openweathermap.org/find
+[lang list]: https://openweathermap.org/current#multi
+[weather conditions]: https://openweathermap.org/weather-conditions
diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go
new file mode 100644
index 0000000000000..94055a6f8bb6a
--- /dev/null
+++ b/plugins/inputs/openweathermap/openweathermap.go
@@ -0,0 +1,363 @@
+package openweathermap
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+const (
+ // https://openweathermap.org/current#severalid
+ // Call for several city IDs
+ // The limit of locations is 20.
+ owmRequestSeveralCityId int = 20
+
+ defaultBaseUrl = "https://api.openweathermap.org/"
+ defaultResponseTimeout time.Duration = time.Second * 5
+ defaultUnits string = "metric"
+ defaultLang string = "en"
+)
+
+type OpenWeatherMap struct {
+ AppId string `toml:"app_id"`
+ CityId []string `toml:"city_id"`
+ Lang string `toml:"lang"`
+ Fetch []string `toml:"fetch"`
+ BaseUrl string `toml:"base_url"`
+ ResponseTimeout internal.Duration `toml:"response_timeout"`
+ Units string `toml:"units"`
+
+ client *http.Client
+ baseUrl *url.URL
+}
+
+var sampleConfig = `
+ ## OpenWeatherMap API key.
+ app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+
+ ## City ID's to collect weather data from.
+ city_id = ["5391959"]
+
+ ## Language of the description field. Can be one of "ar", "bg",
+ ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu",
+ ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru",
+ ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw"
+ # lang = "en"
+
+ ## APIs to fetch; can contain "weather" or "forecast".
+ fetch = ["weather", "forecast"]
+
+ ## OpenWeatherMap base URL
+ # base_url = "https://api.openweathermap.org/"
+
+ ## Timeout for HTTP response.
+ # response_timeout = "5s"
+
+ ## Preferred unit system for temperature and wind speed. Can be one of
+ ## "metric", "imperial", or "standard".
+ # units = "metric"
+
+ ## Query interval; OpenWeatherMap updates their weather data every 10
+ ## minutes.
+ interval = "10m"
+`
+
+func (n *OpenWeatherMap) SampleConfig() string {
+ return sampleConfig
+}
+
+func (n *OpenWeatherMap) Description() string {
+ return "Read current weather and forecasts data from openweathermap.org"
+}
+
+func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error {
+ var wg sync.WaitGroup
+ var strs []string
+
+ for _, fetch := range n.Fetch {
+ if fetch == "forecast" {
+ for _, city := range n.CityId {
+ addr := n.formatURL("/data/2.5/forecast", city)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ status, err := n.gatherUrl(addr)
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
+
+ gatherForecast(acc, status)
+ }()
+ }
+ } else if fetch == "weather" {
+ j := 0
+ for j < len(n.CityId) {
+ strs = make([]string, 0)
+ for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ {
+ strs = append(strs, n.CityId[j])
+ j++
+ }
+ cities := strings.Join(strs, ",")
+
+ addr := n.formatURL("/data/2.5/group", cities)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ status, err := n.gatherUrl(addr)
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
+
+ gatherWeather(acc, status)
+ }()
+ }
+
+ }
+ }
+
+ wg.Wait()
+ return nil
+}
+
+func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) {
+ if n.ResponseTimeout.Duration < time.Second {
+ n.ResponseTimeout.Duration = defaultResponseTimeout
+ }
+
+ client := &http.Client{
+ Transport: &http.Transport{},
+ Timeout: n.ResponseTimeout.Duration,
+ }
+
+ return client, nil
+}
+
+func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) {
+ resp, err := n.client.Get(addr)
+ if err != nil {
+ return nil, fmt.Errorf("error making HTTP request to %s: %s", addr, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s returned HTTP status %s", addr, resp.Status)
+ }
+
+ mediaType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, err
+ }
+
+ if mediaType != "application/json" {
+ return nil, fmt.Errorf("%s returned unexpected content type %s", addr, mediaType)
+ }
+
+ return gatherWeatherUrl(resp.Body)
+}
+
+type WeatherEntry struct {
+ Dt int64 `json:"dt"`
+ Clouds struct {
+ All int64 `json:"all"`
+ } `json:"clouds"`
+ Main struct {
+ Humidity int64 `json:"humidity"`
+ Pressure float64 `json:"pressure"`
+ Temp float64 `json:"temp"`
+ } `json:"main"`
+ Rain struct {
+ Rain1 float64 `json:"1h"`
+ Rain3 float64 `json:"3h"`
+ } `json:"rain"`
+ Sys struct {
+ Country string `json:"country"`
+ Sunrise int64 `json:"sunrise"`
+ Sunset int64 `json:"sunset"`
+ } `json:"sys"`
+ Wind struct {
+ Deg float64 `json:"deg"`
+ Speed float64 `json:"speed"`
+ } `json:"wind"`
+ Id int64 `json:"id"`
+ Name string `json:"name"`
+ Coord struct {
+ Lat float64 `json:"lat"`
+ Lon float64 `json:"lon"`
+ } `json:"coord"`
+ Visibility int64 `json:"visibility"`
+ Weather []struct {
+ ID int64 `json:"id"`
+ Main string `json:"main"`
+ Description string `json:"description"`
+ Icon string `json:"icon"`
+ } `json:"weather"`
+}
+
+type Status struct {
+ City struct {
+ Coord struct {
+ Lat float64 `json:"lat"`
+ Lon float64 `json:"lon"`
+ } `json:"coord"`
+ Country string `json:"country"`
+ Id int64 `json:"id"`
+ Name string `json:"name"`
+ } `json:"city"`
+ List []WeatherEntry `json:"list"`
+}
+
+func gatherWeatherUrl(r io.Reader) (*Status, error) {
+ dec := json.NewDecoder(r)
+ status := &Status{}
+ if err := dec.Decode(status); err != nil {
+ return nil, fmt.Errorf("error while decoding JSON response: %s", err)
+ }
+ return status, nil
+}
+
+func gatherRain(e WeatherEntry) float64 {
+ if e.Rain.Rain1 > 0 {
+ return e.Rain.Rain1
+ }
+ return e.Rain.Rain3
+}
+
+func gatherWeather(acc telegraf.Accumulator, status *Status) {
+ for _, e := range status.List {
+ tm := time.Unix(e.Dt, 0)
+
+ fields := map[string]interface{}{
+ "cloudiness": e.Clouds.All,
+ "humidity": e.Main.Humidity,
+ "pressure": e.Main.Pressure,
+ "rain": gatherRain(e),
+ "sunrise": time.Unix(e.Sys.Sunrise, 0).UnixNano(),
+ "sunset": time.Unix(e.Sys.Sunset, 0).UnixNano(),
+ "temperature": e.Main.Temp,
+ "visibility": e.Visibility,
+ "wind_degrees": e.Wind.Deg,
+ "wind_speed": e.Wind.Speed,
+ }
+ tags := map[string]string{
+ "city": e.Name,
+ "city_id": strconv.FormatInt(e.Id, 10),
+ "country": e.Sys.Country,
+ "forecast": "*",
+ }
+
+ if len(e.Weather) > 0 {
+ fields["condition_description"] = e.Weather[0].Description
+ fields["condition_icon"] = e.Weather[0].Icon
+ tags["condition_id"] = strconv.FormatInt(e.Weather[0].ID, 10)
+ tags["condition_main"] = e.Weather[0].Main
+ }
+
+ acc.AddFields("weather", fields, tags, tm)
+ }
+}
+
+func gatherForecast(acc telegraf.Accumulator, status *Status) {
+ tags := map[string]string{
+ "city_id": strconv.FormatInt(status.City.Id, 10),
+ "forecast": "*",
+ "city": status.City.Name,
+ "country": status.City.Country,
+ }
+ for i, e := range status.List {
+ tm := time.Unix(e.Dt, 0)
+ fields := map[string]interface{}{
+ "cloudiness": e.Clouds.All,
+ "humidity": e.Main.Humidity,
+ "pressure": e.Main.Pressure,
+ "rain": gatherRain(e),
+ "temperature": e.Main.Temp,
+ "wind_degrees": e.Wind.Deg,
+ "wind_speed": e.Wind.Speed,
+ }
+ if len(e.Weather) > 0 {
+ fields["condition_description"] = e.Weather[0].Description
+ fields["condition_icon"] = e.Weather[0].Icon
+ tags["condition_id"] = strconv.FormatInt(e.Weather[0].ID, 10)
+ tags["condition_main"] = e.Weather[0].Main
+ }
+ tags["forecast"] = fmt.Sprintf("%dh", (i+1)*3)
+ acc.AddFields("weather", fields, tags, tm)
+ }
+}
+
+func init() {
+ inputs.Add("openweathermap", func() telegraf.Input {
+ tmout := internal.Duration{
+ Duration: defaultResponseTimeout,
+ }
+ return &OpenWeatherMap{
+ ResponseTimeout: tmout,
+ BaseUrl: defaultBaseUrl,
+ }
+ })
+}
+
+func (n *OpenWeatherMap) Init() error {
+ var err error
+ n.baseUrl, err = url.Parse(n.BaseUrl)
+ if err != nil {
+ return err
+ }
+
+ // Create an HTTP client that is re-used for each
+ // collection interval
+ n.client, err = n.createHttpClient()
+ if err != nil {
+ return err
+ }
+
+ switch n.Units {
+ case "imperial", "standard", "metric":
+ case "":
+ n.Units = defaultUnits
+ default:
+ return fmt.Errorf("unknown units: %s", n.Units)
+ }
+
+ switch n.Lang {
+ case "ar", "bg", "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl",
+ "hr", "hu", "it", "ja", "kr", "la", "lt", "mk", "nl", "pl",
+ "pt", "ro", "ru", "se", "sk", "sl", "es", "tr", "ua", "vi",
+ "zh_cn", "zh_tw":
+ case "":
+ n.Lang = defaultLang
+ default:
+ return fmt.Errorf("unknown language: %s", n.Lang)
+ }
+
+ return nil
+}
+
+func (n *OpenWeatherMap) formatURL(path string, city string) string {
+ v := url.Values{
+ "id": []string{city},
+ "APPID": []string{n.AppId},
+ "lang": []string{n.Lang},
+ "units": []string{n.Units},
+ }
+
+ relative := &url.URL{
+ Path: path,
+ RawQuery: v.Encode(),
+ }
+
+ return n.baseUrl.ResolveReference(relative).String()
+}
diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go
new file mode 100644
index 0000000000000..9bee1d2e96199
--- /dev/null
+++ b/plugins/inputs/openweathermap/openweathermap_test.go
@@ -0,0 +1,830 @@
+package openweathermap
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+const sampleNoContent = `
+{
+}
+`
+
+const sampleStatusResponse = `
+{
+ "city": {
+ "coord": {
+ "lat": 48.8534,
+ "lon": 2.3488
+ },
+ "country": "FR",
+ "id": 2988507,
+ "name": "Paris"
+ },
+ "cnt": 40,
+ "cod": "200",
+ "list": [
+ {
+ "clouds": {
+ "all": 88
+ },
+ "dt": 1543622400,
+ "dt_txt": "2018-12-01 00:00:00",
+ "main": {
+ "grnd_level": 1018.65,
+ "humidity": 91,
+ "pressure": 1018.65,
+ "sea_level": 1030.99,
+ "temp": 6.71,
+ "temp_kf": -2.14
+ },
+ "rain": {
+ "3h": 0.035
+ },
+ "sys": {
+ "pod": "n"
+ },
+ "weather": [
+ {
+ "description": "light rain",
+ "icon": "10n",
+ "id": 500,
+ "main": "Rain"
+ }
+ ],
+ "wind": {
+ "deg": 228.501,
+ "speed": 3.76
+ }
+ },
+ {
+ "clouds": {
+ "all": 92
+ },
+ "dt": 1544043600,
+ "dt_txt": "2018-12-05 21:00:00",
+ "main": {
+ "grnd_level": 1032.18,
+ "humidity": 98,
+ "pressure": 1032.18,
+ "sea_level": 1044.78,
+ "temp": 6.38,
+ "temp_kf": 0
+ },
+ "rain": {
+ "3h": 0.049999999999997
+ },
+ "sys": {
+ "pod": "n"
+ },
+ "weather": [
+ {
+ "description": "light rain",
+ "icon": "10n",
+ "id": 500,
+ "main": "Rain"
+ }
+ ],
+ "wind": {
+ "deg": 335.005,
+ "speed": 2.66
+ }
+ }
+ ],
+ "message": 0.0025
+}
+`
+
+const groupWeatherResponse = `
+{
+ "cnt": 1,
+ "list": [{
+ "clouds": {
+ "all": 0
+ },
+ "coord": {
+ "lat": 48.85,
+ "lon": 2.35
+ },
+ "dt": 1544194800,
+ "id": 2988507,
+ "main": {
+ "humidity": 87,
+ "pressure": 1007,
+ "temp": 9.25
+ },
+ "name": "Paris",
+ "sys": {
+ "country": "FR",
+ "id": 6550,
+ "message": 0.002,
+ "sunrise": 1544167818,
+ "sunset": 1544198047,
+ "type": 1
+ },
+ "visibility": 10000,
+ "weather": [
+ {
+ "description": "light intensity drizzle",
+ "icon": "09d",
+ "id": 300,
+ "main": "Drizzle"
+ }
+ ],
+ "wind": {
+ "deg": 290,
+ "speed": 8.7
+ }
+ }]
+}
+`
+
+const rainWeatherResponse = `
+{
+ "cnt": 2,
+ "list": [{
+ "dt": 1544194800,
+ "id": 111,
+ "main": {
+ "humidity": 87,
+ "pressure": 1007,
+ "temp": 9.25
+ },
+ "name": "Paris",
+ "sys": {
+ "country": "FR",
+ "id": 6550,
+ "message": 0.002,
+ "sunrise": 1544167818,
+ "sunset": 1544198047,
+ "type": 1
+ },
+ "visibility": 10000,
+ "weather": [
+ {
+ "description": "light intensity drizzle",
+ "icon": "09d",
+ "id": 300,
+ "main": "Drizzle"
+ }
+ ],
+ "rain": {
+ "1h": 1.000
+ },
+ "wind": {
+ "deg": 290,
+ "speed": 8.7
+ }
+ },
+ {
+ "dt": 1544194800,
+ "id": 222,
+ "main": {
+ "humidity": 87,
+ "pressure": 1007,
+ "temp": 9.25
+ },
+ "name": "Paris",
+ "sys": {
+ "country": "FR",
+ "id": 6550,
+ "message": 0.002,
+ "sunrise": 1544167818,
+ "sunset": 1544198047,
+ "type": 1
+ },
+ "visibility": 10000,
+ "weather": [
+ {
+ "description": "light intensity drizzle",
+ "icon": "09d",
+ "id": 300,
+ "main": "Drizzle"
+ }
+ ],
+ "rain": {
+ "3h": 3.000
+ },
+ "wind": {
+ "deg": 290,
+ "speed": 8.7
+ }
+ },
+ {
+ "dt": 1544194800,
+ "id": 333,
+ "main": {
+ "humidity": 87,
+ "pressure": 1007,
+ "temp": 9.25
+ },
+ "name": "Paris",
+ "sys": {
+ "country": "FR",
+ "id": 6550,
+ "message": 0.002,
+ "sunrise": 1544167818,
+ "sunset": 1544198047,
+ "type": 1
+ },
+ "visibility": 10000,
+ "weather": [
+ {
+ "description": "light intensity drizzle",
+ "icon": "09d",
+ "id": 300,
+ "main": "Drizzle"
+ }
+ ],
+ "rain": {
+ "1h": 1.300,
+ "3h": 999
+ },
+ "wind": {
+ "deg": 290,
+ "speed": 8.7
+ }
+ },
+ {
+ "dt": 1544194800,
+ "id": 444,
+ "main": {
+ "humidity": 87,
+ "pressure": 1007,
+ "temp": 9.25
+ },
+ "name": "Paris",
+ "sys": {
+ "country": "FR",
+ "id": 6550,
+ "message": 0.002,
+ "sunrise": 1544167818,
+ "sunset": 1544198047,
+ "type": 1
+ },
+ "visibility": 10000,
+ "weather": [
+ {
+ "description": "light intensity drizzle",
+ "icon": "09d",
+ "id": 300,
+ "main": "Drizzle"
+ }
+ ],
+ "wind": {
+ "deg": 290,
+ "speed": 8.7
+ }
+ }]
+}
+`
+const batchWeatherResponse = `
+{
+ "cnt": 3,
+ "list": [{
+ "coord": {
+ "lon": 37.62,
+ "lat": 55.75
+ },
+ "sys": {
+ "type": 1,
+ "id": 9029,
+ "message": 0.0061,
+ "country": "RU",
+ "sunrise": 1556416455,
+ "sunset": 1556470779
+ },
+ "weather": [{
+ "id": 802,
+ "main": "Clouds",
+ "description": "scattered clouds",
+ "icon": "03d"
+ }],
+ "main": {
+ "temp": 9.57,
+ "pressure": 1014,
+ "humidity": 46
+ },
+ "visibility": 10000,
+ "wind": {
+ "speed": 5,
+ "deg": 60
+ },
+ "clouds": {
+ "all": 40
+ },
+ "dt": 1556444155,
+ "id": 524901,
+ "name": "Moscow"
+ }, {
+ "coord": {
+ "lon": 30.52,
+ "lat": 50.43
+ },
+ "sys": {
+ "type": 1,
+ "id": 8903,
+ "message": 0.0076,
+ "country": "UA",
+ "sunrise": 1556419155,
+ "sunset": 1556471486
+ },
+ "weather": [{
+ "id": 520,
+ "main": "Rain",
+ "description": "light intensity shower rain",
+ "icon": "09d"
+ }],
+ "main": {
+ "temp": 19.29,
+ "pressure": 1009,
+ "humidity": 63
+ },
+ "visibility": 10000,
+ "wind": {
+ "speed": 1
+ },
+ "clouds": {
+ "all": 0
+ },
+ "dt": 1556444155,
+ "id": 703448,
+ "name": "Kiev"
+ }, {
+ "coord": {
+ "lon": -0.13,
+ "lat": 51.51
+ },
+ "sys": {
+ "type": 1,
+ "id": 1414,
+ "message": 0.0088,
+ "country": "GB",
+ "sunrise": 1556426319,
+ "sunset": 1556479032
+ },
+ "weather": [{
+ "id": 803,
+ "main": "Clouds",
+ "description": "broken clouds",
+ "icon": "04d"
+ }],
+ "main": {
+ "temp": 10.62,
+ "pressure": 1019,
+ "humidity": 66
+ },
+ "visibility": 10000,
+ "wind": {
+ "speed": 6.2,
+ "deg": 290
+ },
+ "rain": {
+ "3h": 0.072
+ },
+ "clouds": {
+ "all": 75
+ },
+ "dt": 1556444155,
+ "id": 2643743,
+ "name": "London"
+ }]
+}
+`
+
+func TestForecastGeneratesMetrics(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var rsp string
+ if r.URL.Path == "/data/2.5/forecast" {
+ rsp = sampleStatusResponse
+ w.Header()["Content-Type"] = []string{"application/json"}
+ } else if r.URL.Path == "/data/2.5/group" {
+ rsp = sampleNoContent
+ } else {
+ panic("Cannot handle request")
+ }
+
+ fmt.Fprintln(w, rsp)
+ }))
+ defer ts.Close()
+
+ n := &OpenWeatherMap{
+ BaseUrl: ts.URL,
+ AppId: "noappid",
+ CityId: []string{"2988507"},
+ Fetch: []string{"weather", "forecast"},
+ Units: "metric",
+ }
+ n.Init()
+
+ var acc testutil.Accumulator
+
+ err := n.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "2988507",
+ "forecast": "3h",
+ "city": "Paris",
+ "country": "FR",
+ "condition_id": "500",
+ "condition_main": "Rain",
+ },
+ map[string]interface{}{
+ "cloudiness": int64(88),
+ "humidity": int64(91),
+ "pressure": 1018.65,
+ "temperature": 6.71,
+ "rain": 0.035,
+ "wind_degrees": 228.501,
+ "wind_speed": 3.76,
+ "condition_description": "light rain",
+ "condition_icon": "10n",
+ },
+ time.Unix(1543622400, 0),
+ ),
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "2988507",
+ "forecast": "6h",
+ "city": "Paris",
+ "country": "FR",
+ "condition_id": "500",
+ "condition_main": "Rain",
+ },
+ map[string]interface{}{
+ "cloudiness": int64(92),
+ "humidity": int64(98),
+ "pressure": 1032.18,
+ "temperature": 6.38,
+ "rain": 0.049999999999997,
+ "wind_degrees": 335.005,
+ "wind_speed": 2.66,
+ "condition_description": "light rain",
+ "condition_icon": "10n",
+ },
+ time.Unix(1544043600, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t,
+ expected, acc.GetTelegrafMetrics(),
+ testutil.SortMetrics())
+}
+
+func TestWeatherGeneratesMetrics(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var rsp string
+ if r.URL.Path == "/data/2.5/group" {
+ rsp = groupWeatherResponse
+ w.Header()["Content-Type"] = []string{"application/json"}
+ } else if r.URL.Path == "/data/2.5/forecast" {
+ rsp = sampleNoContent
+ } else {
+ panic("Cannot handle request")
+ }
+
+ fmt.Fprintln(w, rsp)
+ }))
+ defer ts.Close()
+
+ n := &OpenWeatherMap{
+ BaseUrl: ts.URL,
+ AppId: "noappid",
+ CityId: []string{"2988507"},
+ Fetch: []string{"weather"},
+ Units: "metric",
+ }
+ n.Init()
+
+ var acc testutil.Accumulator
+
+ err := n.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "2988507",
+ "forecast": "*",
+ "city": "Paris",
+ "country": "FR",
+ "condition_id": "300",
+ "condition_main": "Drizzle",
+ },
+ map[string]interface{}{
+ "cloudiness": int64(0),
+ "humidity": int64(87),
+ "pressure": 1007.0,
+ "temperature": 9.25,
+ "rain": 0.0,
+ "sunrise": int64(1544167818000000000),
+ "sunset": int64(1544198047000000000),
+ "wind_degrees": 290.0,
+ "wind_speed": 8.7,
+ "visibility": 10000,
+ "condition_description": "light intensity drizzle",
+ "condition_icon": "09d",
+ },
+ time.Unix(1544194800, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
+}
+
+// Ensure that results containing "1h", "3h", both, or no rain values are parsed correctly
+func TestRainMetrics(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var rsp string
+ if r.URL.Path == "/data/2.5/group" {
+ rsp = rainWeatherResponse
+ w.Header()["Content-Type"] = []string{"application/json"}
+ } else {
+ panic("Cannot handle request")
+ }
+
+ fmt.Fprintln(w, rsp)
+ }))
+ defer ts.Close()
+
+ n := &OpenWeatherMap{
+ BaseUrl: ts.URL,
+ AppId: "noappid",
+ CityId: []string{"111", "222", "333", "444"},
+ Fetch: []string{"weather"},
+ Units: "metric",
+ }
+ n.Init()
+
+ var acc testutil.Accumulator
+
+ err := n.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ // City with 1h rain value
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "111",
+ "forecast": "*",
+ "city": "Paris",
+ "country": "FR",
+ "condition_id": "300",
+ "condition_main": "Drizzle",
+ },
+ map[string]interface{}{
+ "cloudiness": int64(0),
+ "humidity": int64(87),
+ "pressure": 1007.0,
+ "temperature": 9.25,
+ "rain": 1.0,
+ "sunrise": int64(1544167818000000000),
+ "sunset": int64(1544198047000000000),
+ "wind_degrees": 290.0,
+ "wind_speed": 8.7,
+ "visibility": 10000,
+ "condition_description": "light intensity drizzle",
+ "condition_icon": "09d",
+ },
+ time.Unix(1544194800, 0),
+ ),
+ // City with 3h rain value
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "222",
+ "forecast": "*",
+ "city": "Paris",
+ "country": "FR",
+ "condition_id": "300",
+ "condition_main": "Drizzle",
+ },
+ map[string]interface{}{
+ "cloudiness": int64(0),
+ "humidity": int64(87),
+ "pressure": 1007.0,
+ "temperature": 9.25,
+ "rain": 3.0,
+ "sunrise": int64(1544167818000000000),
+ "sunset": int64(1544198047000000000),
+ "wind_degrees": 290.0,
+ "wind_speed": 8.7,
+ "visibility": 10000,
+ "condition_description": "light intensity drizzle",
+ "condition_icon": "09d",
+ },
+ time.Unix(1544194800, 0),
+ ),
+ // City with both 1h and 3h rain values, prefer the 1h value
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "333",
+ "forecast": "*",
+ "city": "Paris",
+ "country": "FR",
+ "condition_id": "300",
+ "condition_main": "Drizzle",
+ },
+ map[string]interface{}{
+ "cloudiness": int64(0),
+ "humidity": int64(87),
+ "pressure": 1007.0,
+ "temperature": 9.25,
+ "rain": 1.3,
+ "sunrise": int64(1544167818000000000),
+ "sunset": int64(1544198047000000000),
+ "wind_degrees": 290.0,
+ "wind_speed": 8.7,
+ "visibility": 10000,
+ "condition_description": "light intensity drizzle",
+ "condition_icon": "09d",
+ },
+ time.Unix(1544194800, 0),
+ ),
+ // City with no rain values
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "444",
+ "forecast": "*",
+ "city": "Paris",
+ "country": "FR",
+ "condition_id": "300",
+ "condition_main": "Drizzle",
+ },
+ map[string]interface{}{
+ "cloudiness": int64(0),
+ "humidity": int64(87),
+ "pressure": 1007.0,
+ "temperature": 9.25,
+ "rain": 0.0,
+ "sunrise": int64(1544167818000000000),
+ "sunset": int64(1544198047000000000),
+ "wind_degrees": 290.0,
+ "wind_speed": 8.7,
+ "visibility": 10000,
+ "condition_description": "light intensity drizzle",
+ "condition_icon": "09d",
+ },
+ time.Unix(1544194800, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
+}
+
+func TestBatchWeatherGeneratesMetrics(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var rsp string
+ if r.URL.Path == "/data/2.5/group" {
+ rsp = batchWeatherResponse
+ w.Header()["Content-Type"] = []string{"application/json"}
+ } else if r.URL.Path == "/data/2.5/forecast" {
+ rsp = sampleNoContent
+ } else {
+ panic("Cannot handle request")
+ }
+
+ fmt.Fprintln(w, rsp)
+ }))
+ defer ts.Close()
+
+ n := &OpenWeatherMap{
+ BaseUrl: ts.URL,
+ AppId: "noappid",
+ CityId: []string{"524901", "703448", "2643743"},
+ Fetch: []string{"weather"},
+ Units: "metric",
+ }
+ n.Init()
+
+ var acc testutil.Accumulator
+
+ err := n.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "524901",
+ "forecast": "*",
+ "city": "Moscow",
+ "country": "RU",
+ "condition_id": "802",
+ "condition_main": "Clouds",
+ },
+ map[string]interface{}{
+ "cloudiness": 40,
+ "humidity": int64(46),
+ "pressure": 1014.0,
+ "temperature": 9.57,
+ "wind_degrees": 60.0,
+ "wind_speed": 5.0,
+ "rain": 0.0,
+ "sunrise": int64(1556416455000000000),
+ "sunset": int64(1556470779000000000),
+ "visibility": 10000,
+ "condition_description": "scattered clouds",
+ "condition_icon": "03d",
+ },
+ time.Unix(1556444155, 0),
+ ),
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "703448",
+ "forecast": "*",
+ "city": "Kiev",
+ "country": "UA",
+ "condition_id": "520",
+ "condition_main": "Rain",
+ },
+ map[string]interface{}{
+ "cloudiness": 0,
+ "humidity": int64(63),
+ "pressure": 1009.0,
+ "temperature": 19.29,
+ "wind_degrees": 0.0,
+ "wind_speed": 1.0,
+ "rain": 0.0,
+ "sunrise": int64(1556419155000000000),
+ "sunset": int64(1556471486000000000),
+ "visibility": 10000,
+ "condition_description": "light intensity shower rain",
+ "condition_icon": "09d",
+ },
+ time.Unix(1556444155, 0),
+ ),
+ testutil.MustMetric(
+ "weather",
+ map[string]string{
+ "city_id": "2643743",
+ "forecast": "*",
+ "city": "London",
+ "country": "GB",
+ "condition_id": "803",
+ "condition_main": "Clouds",
+ },
+ map[string]interface{}{
+ "cloudiness": 75,
+ "humidity": int64(66),
+ "pressure": 1019.0,
+ "temperature": 10.62,
+ "wind_degrees": 290.0,
+ "wind_speed": 6.2,
+ "rain": 0.072,
+ "sunrise": int64(1556426319000000000),
+ "sunset": int64(1556479032000000000),
+ "visibility": 10000,
+ "condition_description": "broken clouds",
+ "condition_icon": "04d",
+ },
+ time.Unix(1556444155, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t,
+ expected, acc.GetTelegrafMetrics(),
+ testutil.SortMetrics())
+}
+
+func TestFormatURL(t *testing.T) {
+ n := &OpenWeatherMap{
+ AppId: "appid",
+ Units: "units",
+ Lang: "lang",
+ BaseUrl: "http://foo.com",
+ }
+ n.Init()
+
+ require.Equal(t,
+ "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=lang&units=units",
+ n.formatURL("/data/2.5/forecast", "12345"))
+}
+
+func TestDefaultUnits(t *testing.T) {
+ n := &OpenWeatherMap{}
+ n.Init()
+
+ require.Equal(t, "metric", n.Units)
+}
+
+func TestDefaultLang(t *testing.T) {
+ n := &OpenWeatherMap{}
+ n.Init()
+
+ require.Equal(t, "en", n.Lang)
+}
diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md
index 2e70de5b79ab5..83215d8f62f3a 100644
--- a/plugins/inputs/pf/README.md
+++ b/plugins/inputs/pf/README.md
@@ -1,8 +1,8 @@
-# PF Plugin
+# PF Input Plugin
-The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrive information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table.
+The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrieve information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table.
-The pf plugin retrives this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`:
+The pf plugin retrieves this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`:
* Run telegraf as root. This is strongly discouraged.
* Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either.
diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md
index 2a841c45aada0..53737a81ad098 100644
--- a/plugins/inputs/pgbouncer/README.md
+++ b/plugins/inputs/pgbouncer/README.md
@@ -1,21 +1,82 @@
-# PgBouncer plugin
+# PgBouncer Input Plugin
-This PgBouncer plugin provides metrics for your PgBouncer load balancer.
+The `pgbouncer` plugin provides metrics for your PgBouncer load balancer.
-More information about the meaning of these metrics can be found in the [PgBouncer Documentation](https://pgbouncer.github.io/usage.html)
+More information about the meaning of these metrics can be found in the
+[PgBouncer Documentation](https://pgbouncer.github.io/usage.html).
-## Configuration
-Specify address via a url matching:
+- PgBouncer minimum tested version: 1.5
- `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]`
+### Configuration example
+
+```toml
+[[inputs.pgbouncer]]
+ ## specify address via a url matching:
+ ## postgres://[pqgotest[:password]]@host:port[/dbname]\
+ ## ?sslmode=[disable|verify-ca|verify-full]
+ ## or a simple string:
+ ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production
+ ##
+ ## All connection parameters are optional.
+ ##
+ address = "host=localhost user=pgbouncer sslmode=disable"
+```
+
+#### `address`
+
+Specify address via a postgresql connection string:
+
+ `host=/run/postgresql port=6432 user=telegraf database=pgbouncer`
+
+Or via an url matching:
+
+ `postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full]`
All connection parameters are optional.
Without the dbname parameter, the driver will default to a database with the same name as the user.
This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for.
-### Configuration example
+### Metrics
+
+- pgbouncer
+ - tags:
+ - db
+ - server
+ - fields:
+ - avg_query_count
+ - avg_query_time
+ - avg_wait_time
+ - avg_xact_count
+ - avg_xact_time
+ - total_query_count
+ - total_query_time
+ - total_received
+ - total_sent
+ - total_wait_time
+ - total_xact_count
+ - total_xact_time
+
++ pgbouncer_pools
+ - tags:
+ - db
+ - pool_mode
+ - server
+ - user
+ - fields:
+ - cl_active
+ - cl_waiting
+ - maxwait
+ - maxwait_us
+ - sv_active
+ - sv_idle
+ - sv_login
+ - sv_tested
+ - sv_used
+
+### Example Output
+
```
-[[inputs.pgbouncer]]
- address = "postgres://telegraf@localhost/pgbouncer"
+pgbouncer,db=pgbouncer,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ avg_query_count=0i,avg_query_time=0i,avg_wait_time=0i,avg_xact_count=0i,avg_xact_time=0i,total_query_count=26i,total_query_time=0i,total_received=0i,total_sent=0i,total_wait_time=0i,total_xact_count=26i,total_xact_time=0i 1581569936000000000
+pgbouncer_pools,db=pgbouncer,pool_mode=statement,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ ,user=pgbouncer cl_active=1i,cl_waiting=0i,maxwait=0i,maxwait_us=0i,sv_active=0i,sv_idle=0i,sv_login=0i,sv_tested=0i,sv_used=0i 1581569936000000000
```
diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go
index 722648c48edc1..0b8c8c16acd02 100644
--- a/plugins/inputs/pgbouncer/pgbouncer.go
+++ b/plugins/inputs/pgbouncer/pgbouncer.go
@@ -2,14 +2,13 @@ package pgbouncer
import (
"bytes"
- "github.com/influxdata/telegraf/plugins/inputs/postgresql"
-
- // register in driver.
- _ "github.com/jackc/pgx/stdlib"
+ "strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
+ _ "github.com/jackc/pgx/stdlib" // register driver
)
type PgBouncer struct {
@@ -25,7 +24,7 @@ var sampleConfig = `
## postgres://[pqgotest[:password]]@localhost[/dbname]\
## ?sslmode=[disable|verify-ca|verify-full]
## or a simple string:
- ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
+ ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
##
## All connection parameters are optional.
##
@@ -71,13 +70,32 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error {
fields := make(map[string]interface{})
for col, val := range columnMap {
_, ignore := ignoredColumns[col]
- if !ignore {
- fields[col] = *val
+ if ignore {
+ continue
+ }
+
+ switch v := (*val).(type) {
+ case int64:
+ // Integer fields are returned in pgbouncer 1.5 through 1.9
+ fields[col] = v
+ case string:
+ // Integer fields are returned in pgbouncer 1.12
+ integer, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return err
+ }
+
+ fields[col] = integer
}
}
acc.AddFields("pgbouncer", fields, tags)
}
+ err = rows.Err()
+ if err != nil {
+ return err
+ }
+
query = `SHOW POOLS`
poolRows, err := p.DB.Query(query)
@@ -98,12 +116,16 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error {
return err
}
- if s, ok := (*columnMap["user"]).(string); ok && s != "" {
- tags["user"] = s
+ if user, ok := columnMap["user"]; ok {
+ if s, ok := (*user).(string); ok && s != "" {
+ tags["user"] = s
+ }
}
- if s, ok := (*columnMap["pool_mode"]).(string); ok && s != "" {
- tags["pool_mode"] = s
+ if poolMode, ok := columnMap["pool_mode"]; ok {
+ if s, ok := (*poolMode).(string); ok && s != "" {
+ tags["pool_mode"] = s
+ }
}
fields := make(map[string]interface{})
diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md
index 531edae241f32..b31f4b7e427bd 100644
--- a/plugins/inputs/phpfpm/README.md
+++ b/plugins/inputs/phpfpm/README.md
@@ -19,6 +19,8 @@ Get phpfpm stats using either HTTP status page or fpm socket.
## "/var/run/php5-fpm.sock"
## or using a custom fpm status path:
## "/var/run/php5-fpm.sock:fpm-custom-status-path"
+ ## glob patterns are also supported:
+ ## "/var/run/php*.sock"
##
## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
## "fcgi://10.0.0.12:9000/status"
@@ -27,6 +29,16 @@ Get phpfpm stats using either HTTP status page or fpm socket.
## Example of multiple gathering from local socket and remote host
## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
urls = ["http://localhost/status"]
+
+ ## Duration allowed to complete HTTP requests.
+ # timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
```
When using `unixsocket`, you have to ensure that telegraf runs on same
diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go
index 5a4d200192a90..9b42d91bd961a 100644
--- a/plugins/inputs/phpfpm/fcgi_client.go
+++ b/plugins/inputs/phpfpm/fcgi_client.go
@@ -59,7 +59,7 @@ func (client *conn) Request(
rec := &record{}
var err1 error
- // recive until EOF or FCGI_END_REQUEST
+ // receive until EOF or FCGI_END_REQUEST
READ_LOOP:
for {
err1 = rec.read(client.rwc)
diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go
index e40dae174fa83..d6b3681209272 100644
--- a/plugins/inputs/phpfpm/phpfpm.go
+++ b/plugins/inputs/phpfpm/phpfpm.go
@@ -13,12 +13,16 @@ import (
"sync"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/globpath"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
const (
PF_POOL = "pool"
PF_PROCESS_MANAGER = "process manager"
+ PF_START_SINCE = "start since"
PF_ACCEPTED_CONN = "accepted conn"
PF_LISTEN_QUEUE = "listen queue"
PF_MAX_LISTEN_QUEUE = "max listen queue"
@@ -35,7 +39,9 @@ type metric map[string]int64
type poolStat map[string]metric
type phpfpm struct {
- Urls []string
+ Urls []string
+ Timeout internal.Duration
+ tls.ClientConfig
client *http.Client
}
@@ -58,33 +64,63 @@ var sampleConfig = `
## "fcgi://10.0.0.12:9000/status"
## "cgi://10.0.10.12:9001/status"
##
- ## Example of multiple gathering from local socket and remove host
+ ## Example of multiple gathering from local socket and remote host
## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
urls = ["http://localhost/status"]
+
+ ## Duration allowed to complete HTTP requests.
+ # timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
`
-func (r *phpfpm) SampleConfig() string {
+func (p *phpfpm) SampleConfig() string {
return sampleConfig
}
-func (r *phpfpm) Description() string {
+func (p *phpfpm) Description() string {
return "Read metrics of phpfpm, via HTTP status page or socket"
}
+func (p *phpfpm) Init() error {
+ tlsCfg, err := p.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ p.client = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ },
+ Timeout: p.Timeout.Duration,
+ }
+ return nil
+}
+
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
-func (g *phpfpm) Gather(acc telegraf.Accumulator) error {
- if len(g.Urls) == 0 {
- return g.gatherServer("http://127.0.0.1/status", acc)
+func (p *phpfpm) Gather(acc telegraf.Accumulator) error {
+ if len(p.Urls) == 0 {
+ return p.gatherServer("http://127.0.0.1/status", acc)
}
var wg sync.WaitGroup
- for _, serv := range g.Urls {
+ urls, err := expandUrls(p.Urls)
+ if err != nil {
+ return err
+ }
+
+ for _, serv := range urls {
wg.Add(1)
go func(serv string) {
defer wg.Done()
- acc.AddError(g.gatherServer(serv, acc))
+ acc.AddError(p.gatherServer(serv, acc))
}(serv)
}
@@ -94,14 +130,9 @@ func (g *phpfpm) Gather(acc telegraf.Accumulator) error {
}
// Request status page to get stat raw data and import it
-func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
- if g.client == nil {
- client := &http.Client{}
- g.client = client
- }
-
+func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") {
- return g.gatherHttp(addr, acc)
+ return p.gatherHttp(addr, acc)
}
var (
@@ -129,18 +160,10 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
statusPath = "status"
}
} else {
- socketAddr := strings.Split(addr, ":")
- if len(socketAddr) >= 2 {
- socketPath = socketAddr[0]
- statusPath = socketAddr[1]
- } else {
- socketPath = socketAddr[0]
+ socketPath, statusPath = unixSocketPaths(addr)
+ if statusPath == "" {
statusPath = "status"
}
-
- if _, err := os.Stat(socketPath); os.IsNotExist(err) {
- return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err)
- }
fcgi, err = newFcgiClient("unix", socketPath)
}
@@ -148,11 +171,11 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
return err
}
- return g.gatherFcgi(fcgi, statusPath, acc, addr)
+ return p.gatherFcgi(fcgi, statusPath, acc, addr)
}
// Gather stat using fcgi protocol
-func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator, addr string) error {
+func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator, addr string) error {
fpmOutput, fpmErr, err := fcgi.Request(map[string]string{
"SCRIPT_NAME": "/" + statusPath,
"SCRIPT_FILENAME": statusPath,
@@ -172,7 +195,7 @@ func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula
}
// Gather stat using http protocol
-func (g *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error {
+func (p *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error {
u, err := url.Parse(addr)
if err != nil {
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
@@ -180,7 +203,7 @@ func (g *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error {
req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme,
u.Host, u.Path), nil)
- res, err := g.client.Do(req)
+ res, err := p.client.Do(req)
if err != nil {
return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v",
addr, err)
@@ -219,7 +242,8 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) (poolStat,
// Start to parse metric for current pool
switch fieldName {
- case PF_ACCEPTED_CONN,
+ case PF_START_SINCE,
+ PF_ACCEPTED_CONN,
PF_LISTEN_QUEUE,
PF_MAX_LISTEN_QUEUE,
PF_LISTEN_QUEUE_LEN,
@@ -252,6 +276,70 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) (poolStat,
return stats, nil
}
+func expandUrls(urls []string) ([]string, error) {
+ addrs := make([]string, 0, len(urls))
+ for _, url := range urls {
+ if isNetworkURL(url) {
+ addrs = append(addrs, url)
+ continue
+ }
+ paths, err := globUnixSocket(url)
+ if err != nil {
+ return nil, err
+ }
+ addrs = append(addrs, paths...)
+ }
+ return addrs, nil
+}
+
+func globUnixSocket(url string) ([]string, error) {
+ pattern, status := unixSocketPaths(url)
+ glob, err := globpath.Compile(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("could not compile glob %q: %v", pattern, err)
+ }
+ paths := glob.Match()
+ if len(paths) == 0 {
+ if _, err := os.Stat(paths[0]); err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("Socket doesn't exist '%s': %s", pattern, err)
+ }
+ return nil, err
+ }
+ return nil, nil
+ }
+
+ addrs := make([]string, 0, len(paths))
+
+ for _, path := range paths {
+ if status != "" {
+ path = path + ":" + status
+ }
+ addrs = append(addrs, path)
+ }
+
+ return addrs, nil
+}
+
+func unixSocketPaths(addr string) (string, string) {
+ var socketPath, statusPath string
+
+ socketAddr := strings.Split(addr, ":")
+ if len(socketAddr) >= 2 {
+ socketPath = socketAddr[0]
+ statusPath = socketAddr[1]
+ } else {
+ socketPath = socketAddr[0]
+ statusPath = ""
+ }
+
+ return socketPath, statusPath
+}
+
+func isNetworkURL(addr string) bool {
+ return strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") || strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://")
+}
+
func init() {
inputs.Add("phpfpm", func() telegraf.Input {
return &phpfpm{}
diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go
index ba24b0f36a6d2..5f68b07f5dbae 100644
--- a/plugins/inputs/phpfpm/phpfpm_test.go
+++ b/plugins/inputs/phpfpm/phpfpm_test.go
@@ -33,9 +33,12 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) {
Urls: []string{ts.URL},
}
+ err := r.Init()
+ require.NoError(t, err)
+
var acc testutil.Accumulator
- err := acc.GatherError(r.Gather)
+ err = acc.GatherError(r.Gather)
require.NoError(t, err)
tags := map[string]string{
@@ -44,6 +47,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) {
}
fields := map[string]interface{}{
+ "start_since": int64(1991),
"accepted_conn": int64(3),
"listen_queue": int64(1),
"max_listen_queue": int64(0),
@@ -75,6 +79,9 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) {
Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"},
}
+ err = r.Init()
+ require.NoError(t, err)
+
var acc testutil.Accumulator
err = acc.GatherError(r.Gather)
require.NoError(t, err)
@@ -85,6 +92,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) {
}
fields := map[string]interface{}{
+ "start_since": int64(1991),
"accepted_conn": int64(3),
"listen_queue": int64(1),
"max_listen_queue": int64(0),
@@ -119,6 +127,9 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) {
Urls: []string{tcp.Addr().String()},
}
+ err = r.Init()
+ require.NoError(t, err)
+
var acc testutil.Accumulator
err = acc.GatherError(r.Gather)
@@ -130,6 +141,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) {
}
fields := map[string]interface{}{
+ "start_since": int64(1991),
"accepted_conn": int64(3),
"listen_queue": int64(1),
"max_listen_queue": int64(0),
@@ -145,6 +157,74 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) {
acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags)
}
+func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) {
+ // Create a socket in /tmp because we always have write permission and if the
+ // removing of socket fail when system restart /tmp is clear so
+ // we don't have junk files around
+ var randomNumber int64
+ binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
+ socket1 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)
+ tcp1, err := net.Listen("unix", socket1)
+ if err != nil {
+ t.Fatal("Cannot initialize server on port ")
+ }
+ defer tcp1.Close()
+
+ binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
+ socket2 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)
+ tcp2, err := net.Listen("unix", socket2)
+ if err != nil {
+ t.Fatal("Cannot initialize server on port ")
+ }
+ defer tcp2.Close()
+
+ s := statServer{}
+ go fcgi.Serve(tcp1, s)
+ go fcgi.Serve(tcp2, s)
+
+ r := &phpfpm{
+ Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"},
+ }
+
+ err = r.Init()
+ require.NoError(t, err)
+
+ var acc1, acc2 testutil.Accumulator
+
+ err = acc1.GatherError(r.Gather)
+ require.NoError(t, err)
+
+ err = acc2.GatherError(r.Gather)
+ require.NoError(t, err)
+
+ tags1 := map[string]string{
+ "pool": "www",
+ "url": socket1,
+ }
+
+ tags2 := map[string]string{
+ "pool": "www",
+ "url": socket2,
+ }
+
+ fields := map[string]interface{}{
+ "start_since": int64(1991),
+ "accepted_conn": int64(3),
+ "listen_queue": int64(1),
+ "max_listen_queue": int64(0),
+ "listen_queue_len": int64(0),
+ "idle_processes": int64(1),
+ "active_processes": int64(1),
+ "total_processes": int64(2),
+ "max_active_processes": int64(1),
+ "max_children_reached": int64(2),
+ "slow_requests": int64(1),
+ }
+
+ acc1.AssertContainsTaggedFields(t, "phpfpm", fields, tags1)
+ acc2.AssertContainsTaggedFields(t, "phpfpm", fields, tags2)
+}
+
func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) {
// Create a socket in /tmp because we always have write permission. If the
// removing of socket fail we won't have junk files around. Cuz when system
@@ -164,6 +244,9 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) {
Urls: []string{tcp.Addr().String() + ":custom-status-path"},
}
+ err = r.Init()
+ require.NoError(t, err)
+
var acc testutil.Accumulator
err = acc.GatherError(r.Gather)
@@ -175,6 +258,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) {
}
fields := map[string]interface{}{
+ "start_since": int64(1991),
"accepted_conn": int64(3),
"listen_queue": int64(1),
"max_listen_queue": int64(0),
@@ -195,9 +279,12 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) {
func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) {
r := &phpfpm{}
+ err := r.Init()
+ require.NoError(t, err)
+
var acc testutil.Accumulator
- err := acc.GatherError(r.Gather)
+ err = acc.GatherError(r.Gather)
require.Error(t, err)
assert.Contains(t, err.Error(), "127.0.0.1/status")
}
@@ -207,11 +294,15 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t
Urls: []string{"http://aninvalidone"},
}
+ err := r.Init()
+ require.NoError(t, err)
+
var acc testutil.Accumulator
- err := acc.GatherError(r.Gather)
+ err = acc.GatherError(r.Gather)
require.Error(t, err)
- assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`)
+ assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone'`)
+ assert.Contains(t, err.Error(), `lookup aninvalidone`)
}
func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) {
@@ -219,11 +310,14 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi
Urls: []string{"/tmp/invalid.sock"},
}
+ err := r.Init()
+ require.NoError(t, err)
+
var acc testutil.Accumulator
- err := acc.GatherError(r.Gather)
+ err = acc.GatherError(r.Gather)
require.Error(t, err)
- assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error())
+ assert.Equal(t, `dial unix /tmp/invalid.sock: connect: no such file or directory`, err.Error())
}
diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md
index 5d3904e929c38..91af1b2ae33ed 100644
--- a/plugins/inputs/ping/README.md
+++ b/plugins/inputs/ping/README.md
@@ -2,67 +2,142 @@
Sends a ping message by executing the system ping command and reports the results.
+This plugin has two main methods of operation: `exec` and `native`. The
+recommended method is `native`, which has greater system compatibility and
+performance. However, for backwards compatibility the `exec` method is the
+default.
+
+When using `method = "exec"`, the systems ping utility is executed to send the
+ping packets.
+
Most ping command implementations are supported, one notable exception being
-that there is currently no support for GNU Inetutils ping. You may instead
-use the iputils-ping implementation:
+that there is currently no support for GNU Inetutils ping. You may instead use
+the iputils-ping implementation:
```
apt-get install iputils-ping
```
+When using `method = "native"` a ping is sent and the results are reported in
+native Go by the Telegraf process, eliminating the need to execute the system
+`ping` command.
+
### Configuration:
```toml
[[inputs.ping]]
- ## List of urls to ping
+ ## Hosts to send ping packets to.
urls = ["example.org"]
- ## Number of pings to send per collection (ping -c )
+ ## Method used for sending pings, can be either "exec" or "native". When set
+ ## to "exec" the systems ping command will be executed. When set to "native"
+ ## the plugin will send pings directly.
+ ##
+ ## While the default is "exec" for backwards compatibility, new deployments
+ ## are encouraged to use the "native" method for improved compatibility and
+ ## performance.
+ # method = "exec"
+
+ ## Number of ping packets to send per interval. Corresponds to the "-c"
+ ## option of the ping command.
# count = 1
- ## Interval, in s, at which to ping. 0 == default (ping -i )
- ## Not available in Windows.
+ ## Time to wait between sending ping packets in seconds. Operates like the
+ ## "-i" option of the ping command.
# ping_interval = 1.0
- ## Per-ping timeout, in s. 0 == no timeout (ping -W )
+ ## If set, the time to wait for a ping response in seconds. Operates like
+ ## the "-W" option of the ping command.
# timeout = 1.0
- ## Total-ping deadline, in s. 0 == no deadline (ping -w )
+ ## If set, the total ping deadline, in seconds. Operates like the -w option
+ ## of the ping command.
# deadline = 10
- ## Interface or source address to send ping from (ping -I )
- ## on Darwin and Freebsd only source address possible: (ping -S )
+ ## Interface or source address to send ping from. Operates like the -I or -S
+ ## option of the ping command.
# interface = ""
- ## Specify the ping executable binary, default is "ping"
+ ## Specify the ping executable binary.
# binary = "ping"
- ## Arguments for ping command
- ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored
+ ## Arguments for ping command. When arguments is not empty, the command from
+ ## the binary option will be used and other options (ping_interval, timeout,
+ ## etc) will be ignored.
# arguments = ["-c", "3"]
+
+ ## Use only IPv6 addresses when resolving a hostname.
+ # ipv6 = false
```
#### File Limit
-Since this plugin runs the ping command, it may need to open several files per
-host. With a large host list you may receive a `too many open files` error.
+Since this plugin runs the ping command, it may need to open multiple files per
+host. The number of files used is lessened with the `native` option but still
+many files are used. With a large host list you may receive a `too many open
+files` error.
-To increase this limit on platforms using systemd it must be done in the
-service file.
+To increase this limit on platforms using systemd the recommended method is to
+use the "drop-in directory", usually located at
+`/etc/systemd/system/telegraf.service.d`.
+You can create or edit a drop-in file in the correct location using:
+```sh
+$ systemctl edit telegraf
+```
-Find the service unit file:
+Increase the number of open files:
+```ini
+[Service]
+LimitNOFILE=8192
```
-$ systemctl show telegraf.service -p FragmentPath
-FragmentPath=/lib/systemd/system/telegraf.service
+
+Restart Telegraf:
+```sh
+$ systemctl edit telegraf
```
-Set the file number limit:
+#### Linux Permissions
+
+When using `method = "native"`, Telegraf will attempt to use privileged raw
+ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities.
+
+With systemd:
+```sh
+$ systemctl edit telegraf
```
+```ini
[Service]
-LimitNOFILE=4096
+CapabilityBoundingSet=CAP_NET_RAW
+AmbientCapabilities=CAP_NET_RAW
+```
+```sh
+$ systemctl restart telegraf
+```
+
+Without systemd:
+```sh
+$ setcap cap_net_raw=eip /usr/bin/telegraf
```
-### Metrics:
+Reference [`man 7 capabilities`][man 7 capabilities] for more information about
+setting capabilities.
+
+[man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html
+
+When Telegraf cannot listen on a privileged ICMP socket it will attempt to use
+ICMP echo sockets. If you wish to use this method you must ensure Telegraf's
+group, usually `telegraf`, is allowed to use ICMP echo sockets:
+
+```sh
+$ sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH"
+```
+
+Reference [`man 7 icmp`][man 7 icmp] for more information about ICMP echo
+sockets and the `ping_group_range` setting.
+
+[man 7 icmp]: http://man7.org/linux/man-pages/man7/icmp.7.html
+
+### Metrics
- ping
- tags:
@@ -70,29 +145,30 @@ LimitNOFILE=4096
- fields:
- packets_transmitted (integer)
- packets_received (integer)
- - percent_packets_loss (float)
+ - percent_packet_loss (float)
- ttl (integer, Not available on Windows)
- average_response_ms (integer)
- minimum_response_ms (integer)
- maximum_response_ms (integer)
- - standard_deviation_ms (integer, Not available on Windows)
+ - standard_deviation_ms (integer, Available on Windows only with native ping)
- errors (float, Windows only)
- - reply_received (integer, Windows only)
- - percent_reply_loss (float, Windows only)
+ - reply_received (integer, Windows with method = "exec" only)
+ - percent_reply_loss (float, Windows with method = "exec" only)
- result_code (int, success = 0, no such host = 1, ping error = 2)
##### reply_received vs packets_received
-On Windows systems, "Destination net unreachable" reply will increment `packets_received` but not `reply_received`.
+On Windows systems with `method = "exec"`, the "Destination net unreachable" reply will increment `packets_received` but not `reply_received`*.
-### Example Output:
+##### ttl
-**Windows:**
-```
-ping,url=example.org result_code=0i,average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000
-```
+There is currently no support for TTL on windows with `"native"`; track
+progress at https://github.com/golang/go/issues/7175 and
+https://github.com/golang/go/issues/7174
+
+
+### Example Output
-**Linux:**
```
ping,url=example.org average_response_ms=23.066,ttl=63,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000
```
diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go
index 28e967a85c8e9..008cfceacc5b9 100644
--- a/plugins/inputs/ping/ping.go
+++ b/plugins/inputs/ping/ping.go
@@ -1,20 +1,18 @@
-// +build !windows
-
package ping
import (
+ "context"
"errors"
- "fmt"
+ "log"
+ "math"
"net"
"os/exec"
- "regexp"
"runtime"
- "strconv"
"strings"
"sync"
- "syscall"
"time"
+ "github.com/glinton/ping"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -25,6 +23,10 @@ import (
// for unit test purposes (see ping_test.go)
type HostPinger func(binary string, timeout float64, args ...string) (string, error)
+type HostResolver func(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error)
+
+type IsCorrectNetwork func(ip net.IPAddr) bool
+
type Ping struct {
wg sync.WaitGroup
@@ -34,7 +36,7 @@ type Ping struct {
// Number of pings to send (ping -c )
Count int
- // Ping timeout, in seconds. 0 means no timeout (ping -W )
+ // Per-ping timeout, in seconds. 0 means no timeout (ping -W )
Timeout float64
// Ping deadline, in seconds. 0 means no deadline. (ping -w )
@@ -46,59 +48,99 @@ type Ping struct {
// URLs to ping
Urls []string
+ // Method defines how to ping (native or exec)
+ Method string
+
// Ping executable binary
Binary string
- // Arguments for ping command.
- // when `Arguments` is not empty, other options (ping_interval, timeout, etc) will be ignored
+ // Arguments for ping command. When arguments is not empty, system binary will be used and
+ // other options (ping_interval, timeout, etc) will be ignored
Arguments []string
+ // Whether to resolve addresses using ipv6 or not.
+ IPv6 bool
+
// host ping function
pingHost HostPinger
+
+ // resolve host function
+ resolveHost HostResolver
+
+ // listenAddr is the address associated with the interface defined.
+ listenAddr string
}
-func (_ *Ping) Description() string {
+func (*Ping) Description() string {
return "Ping given url(s) and return statistics"
}
const sampleConfig = `
- ## List of urls to ping
+ ## Hosts to send ping packets to.
urls = ["example.org"]
- ## Number of pings to send per collection (ping -c )
+ ## Method used for sending pings, can be either "exec" or "native". When set
+ ## to "exec" the systems ping command will be executed. When set to "native"
+ ## the plugin will send pings directly.
+ ##
+ ## While the default is "exec" for backwards compatibility, new deployments
+ ## are encouraged to use the "native" method for improved compatibility and
+ ## performance.
+ # method = "exec"
+
+ ## Number of ping packets to send per interval. Corresponds to the "-c"
+ ## option of the ping command.
# count = 1
- ## Interval, in s, at which to ping. 0 == default (ping -i )
- ## Not available in Windows.
+ ## Time to wait between sending ping packets in seconds. Operates like the
+ ## "-i" option of the ping command.
# ping_interval = 1.0
- ## Per-ping timeout, in s. 0 == no timeout (ping -W )
+ ## If set, the time to wait for a ping response in seconds. Operates like
+ ## the "-W" option of the ping command.
# timeout = 1.0
- ## Total-ping deadline, in s. 0 == no deadline (ping -w )
+ ## If set, the total ping deadline, in seconds. Operates like the -w option
+ ## of the ping command.
# deadline = 10
- ## Interface or source address to send ping from (ping -I )
- ## on Darwin and Freebsd only source address possible: (ping -S )
+ ## Interface or source address to send ping from. Operates like the -I or -S
+ ## option of the ping command.
# interface = ""
- ## Specify the ping executable binary, default is "ping"
+ ## Specify the ping executable binary.
# binary = "ping"
- ## Arguments for ping command
- ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored
+ ## Arguments for ping command. When arguments is not empty, the command from
+ ## the binary option will be used and other options (ping_interval, timeout,
+ ## etc) will be ignored.
# arguments = ["-c", "3"]
+
+ ## Use only IPv6 addresses when resolving a hostname.
+ # ipv6 = false
`
-func (_ *Ping) SampleConfig() string {
+func (*Ping) SampleConfig() string {
return sampleConfig
}
func (p *Ping) Gather(acc telegraf.Accumulator) error {
- // Spin off a go routine for each url to ping
- for _, url := range p.Urls {
+ if p.Interface != "" && p.listenAddr == "" {
+ p.listenAddr = getAddr(p.Interface)
+ }
+
+ for _, host := range p.Urls {
p.wg.Add(1)
- go p.pingToURL(url, acc)
+ go func(host string) {
+ defer p.wg.Done()
+
+ switch p.Method {
+ case "native":
+ p.pingToURLNative(host, acc)
+ default:
+ p.pingToURL(host, acc)
+ }
+ }(host)
}
p.wg.Wait()
@@ -106,81 +148,39 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
return nil
}
-func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) {
- defer p.wg.Done()
- tags := map[string]string{"url": u}
- fields := map[string]interface{}{"result_code": 0}
-
- _, err := net.LookupHost(u)
- if err != nil {
- acc.AddError(err)
- fields["result_code"] = 1
- acc.AddFields("ping", fields, tags)
- return
+func getAddr(iface string) string {
+ if addr := net.ParseIP(iface); addr != nil {
+ return addr.String()
}
- args := p.args(u, runtime.GOOS)
- totalTimeout := 60.0
- if len(p.Arguments) == 0 {
- totalTimeout = float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval
+ ifaces, err := net.Interfaces()
+ if err != nil {
+ return ""
}
- out, err := p.pingHost(p.Binary, totalTimeout, args...)
- if err != nil {
- // Some implementations of ping return a 1 exit code on
- // timeout, if this occurs we will not exit and try to parse
- // the output.
- status := -1
- if exitError, ok := err.(*exec.ExitError); ok {
- if ws, ok := exitError.Sys().(syscall.WaitStatus); ok {
- status = ws.ExitStatus()
- fields["result_code"] = status
+ var ip net.IP
+ for i := range ifaces {
+ if ifaces[i].Name == iface {
+ addrs, err := ifaces[i].Addrs()
+ if err != nil {
+ return ""
}
- }
-
- if status != 1 {
- // Combine go err + stderr output
- out = strings.TrimSpace(out)
- if len(out) > 0 {
- acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err))
- } else {
- acc.AddError(fmt.Errorf("host %s: %s", u, err))
+ if len(addrs) > 0 {
+ switch v := addrs[0].(type) {
+ case *net.IPNet:
+ ip = v.IP
+ case *net.IPAddr:
+ ip = v.IP
+ }
+ if len(ip) == 0 {
+ return ""
+ }
+ return ip.String()
}
- fields["result_code"] = 2
- acc.AddFields("ping", fields, tags)
- return
}
}
- trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out)
- if err != nil {
- // fatal error
- acc.AddError(fmt.Errorf("%s: %s", err, u))
- fields["result_code"] = 2
- acc.AddFields("ping", fields, tags)
- return
- }
- // Calculate packet loss percentage
- loss := float64(trans-rec) / float64(trans) * 100.0
- fields["packets_transmitted"] = trans
- fields["packets_received"] = rec
- fields["percent_packet_loss"] = loss
- if ttl >= 0 {
- fields["ttl"] = ttl
- }
- if min >= 0 {
- fields["minimum_response_ms"] = min
- }
- if avg >= 0 {
- fields["average_response_ms"] = avg
- }
- if max >= 0 {
- fields["maximum_response_ms"] = max
- }
- if stddev >= 0 {
- fields["standard_deviation_ms"] = stddev
- }
- acc.AddFields("ping", fields, tags)
+ return ""
}
func hostPinger(binary string, timeout float64, args ...string) (string, error) {
@@ -194,147 +194,243 @@ func hostPinger(binary string, timeout float64, args ...string) (string, error)
return string(out), err
}
-// args returns the arguments for the 'ping' executable
-func (p *Ping) args(url string, system string) []string {
- if len(p.Arguments) > 0 {
- return append(p.Arguments, url)
+func filterIPs(addrs []net.IPAddr, filterFunc IsCorrectNetwork) []net.IPAddr {
+ n := 0
+ for _, x := range addrs {
+ if filterFunc(x) {
+ addrs[n] = x
+ n++
+ }
}
+ return addrs[:n]
+}
+
+func hostResolver(ctx context.Context, ipv6 bool, destination string) (*net.IPAddr, error) {
+ resolver := &net.Resolver{}
+ ips, err := resolver.LookupIPAddr(ctx, destination)
- // build the ping command args based on toml config
- args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"}
- if p.PingInterval > 0 {
- args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64))
+ if err != nil {
+ return nil, err
}
- if p.Timeout > 0 {
- switch system {
- case "darwin":
- args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64))
- case "freebsd", "netbsd", "openbsd":
- args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64))
- case "linux":
- args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64))
- default:
- // Not sure the best option here, just assume GNU ping?
- args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64))
- }
+
+ if ipv6 {
+ ips = filterIPs(ips, isV6)
+ } else {
+ ips = filterIPs(ips, isV4)
}
+
+ if len(ips) == 0 {
+ return nil, errors.New("Cannot resolve ip address")
+ }
+ return &ips[0], err
+}
+
+func isV4(ip net.IPAddr) bool {
+ return ip.IP.To4() != nil
+}
+
+func isV6(ip net.IPAddr) bool {
+ return !isV4(ip)
+}
+
+func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) {
+ ctx := context.Background()
+ interval := p.PingInterval
+ if interval < 0.2 {
+ interval = 0.2
+ }
+
+ timeout := p.Timeout
+ if timeout == 0 {
+ timeout = 5
+ }
+
+ tick := time.NewTicker(time.Duration(interval * float64(time.Second)))
+ defer tick.Stop()
+
if p.Deadline > 0 {
- switch system {
- case "darwin", "freebsd", "netbsd", "openbsd":
- args = append(args, "-t", strconv.Itoa(p.Deadline))
- case "linux":
- args = append(args, "-w", strconv.Itoa(p.Deadline))
- default:
- // not sure the best option here, just assume gnu ping?
- args = append(args, "-w", strconv.Itoa(p.Deadline))
- }
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, time.Duration(p.Deadline)*time.Second)
+ defer cancel()
+ }
+
+ host, err := p.resolveHost(ctx, p.IPv6, destination)
+ if err != nil {
+ acc.AddFields(
+ "ping",
+ map[string]interface{}{"result_code": 1},
+ map[string]string{"url": destination},
+ )
+ acc.AddError(err)
+ return
}
- if p.Interface != "" {
- switch system {
- case "darwin":
- args = append(args, "-I", p.Interface)
- case "freebsd", "netbsd", "openbsd":
- args = append(args, "-s", p.Interface)
- case "linux":
- args = append(args, "-I", p.Interface)
- default:
- // not sure the best option here, just assume gnu ping?
- args = append(args, "-i", p.Interface)
+
+ resps := make(chan *ping.Response)
+ rsps := []*ping.Response{}
+
+ r := &sync.WaitGroup{}
+ r.Add(1)
+ go func() {
+ for res := range resps {
+ rsps = append(rsps, res)
}
+ r.Done()
+ }()
+
+ wg := &sync.WaitGroup{}
+ c := ping.Client{}
+
+ var doErr error
+ var packetsSent int
+
+ type sentReq struct {
+ err error
+ sent bool
}
- args = append(args, url)
- return args
-}
+ sents := make(chan sentReq)
-// processPingOutput takes in a string output from the ping command, like:
-//
-// ping www.google.com (173.194.115.84): 56 data bytes
-// 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms
-// 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms
-//
-// --- www.google.com ping statistics ---
-// 2 packets transmitted, 2 packets received, 0.0% packet loss
-// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms
-//
-// It returns (, , )
-func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) {
- var trans, recv, ttl int = 0, 0, -1
- var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0
- // Set this error to nil if we find a 'transmitted' line
- err := errors.New("Fatal error processing ping output")
- lines := strings.Split(out, "\n")
- for _, line := range lines {
- // Reading only first TTL, ignoring other TTL messages
- if ttl == -1 && strings.Contains(line, "ttl=") {
- ttl, err = getTTL(line)
- } else if strings.Contains(line, "transmitted") &&
- strings.Contains(line, "received") {
- trans, recv, err = getPacketStats(line, trans, recv)
- if err != nil {
- return trans, recv, ttl, min, avg, max, stddev, err
+ r.Add(1)
+ go func() {
+ for sent := range sents {
+ if sent.err != nil {
+ doErr = sent.err
}
- } else if strings.Contains(line, "min/avg/max") {
- min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev)
- if err != nil {
- return trans, recv, ttl, min, avg, max, stddev, err
+ if sent.sent {
+ packetsSent++
}
}
+ r.Done()
+ }()
+
+ for i := 0; i < p.Count; i++ {
+ select {
+ case <-ctx.Done():
+ goto finish
+ case <-tick.C:
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout*float64(time.Second)))
+ defer cancel()
+
+ wg.Add(1)
+ go func(seq int) {
+ defer wg.Done()
+ resp, err := c.Do(ctx, &ping.Request{
+ Dst: net.ParseIP(host.String()),
+ Src: net.ParseIP(p.listenAddr),
+ Seq: seq,
+ })
+
+ sent := sentReq{err: err, sent: true}
+ if err != nil {
+ if strings.Contains(err.Error(), "not permitted") {
+ sent.sent = false
+ }
+ sents <- sent
+ return
+ }
+
+ resps <- resp
+ sents <- sent
+ }(i + 1)
+ }
}
- return trans, recv, ttl, min, avg, max, stddev, err
-}
-func getPacketStats(line string, trans, recv int) (int, int, error) {
- stats := strings.Split(line, ", ")
- // Transmitted packets
- trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0])
- if err != nil {
- return trans, recv, err
+finish:
+ wg.Wait()
+ close(resps)
+ close(sents)
+
+ r.Wait()
+
+ if doErr != nil && strings.Contains(doErr.Error(), "not permitted") {
+ log.Printf("D! [inputs.ping] %s", doErr.Error())
}
- // Received packets
- recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0])
- return trans, recv, err
-}
-func getTTL(line string) (int, error) {
- ttlLine := regexp.MustCompile(`ttl=(\d+)`)
- ttlMatch := ttlLine.FindStringSubmatch(line)
- return strconv.Atoi(ttlMatch[1])
+ tags, fields := onFin(packetsSent, rsps, doErr, destination)
+ acc.AddFields("ping", fields, tags)
}
-func checkRoundTripTimeStats(line string, min, avg, max,
- stddev float64) (float64, float64, float64, float64, error) {
- stats := strings.Split(line, " ")[3]
- data := strings.Split(stats, "/")
+func onFin(packetsSent int, resps []*ping.Response, err error, destination string) (map[string]string, map[string]interface{}) {
+ packetsRcvd := len(resps)
- min, err := strconv.ParseFloat(data[0], 64)
- if err != nil {
- return min, avg, max, stddev, err
- }
- avg, err = strconv.ParseFloat(data[1], 64)
- if err != nil {
- return min, avg, max, stddev, err
+ tags := map[string]string{"url": destination}
+ fields := map[string]interface{}{
+ "result_code": 0,
+ "packets_transmitted": packetsSent,
+ "packets_received": packetsRcvd,
}
- max, err = strconv.ParseFloat(data[2], 64)
- if err != nil {
- return min, avg, max, stddev, err
+
+ if packetsSent == 0 {
+ if err != nil {
+ fields["result_code"] = 2
+ }
+ return tags, fields
}
- if len(data) == 4 {
- stddev, err = strconv.ParseFloat(data[3], 64)
+
+ if packetsRcvd == 0 {
if err != nil {
- return min, avg, max, stddev, err
+ fields["result_code"] = 1
+ }
+ fields["percent_packet_loss"] = float64(100)
+ return tags, fields
+ }
+
+ fields["percent_packet_loss"] = float64(packetsSent-packetsRcvd) / float64(packetsSent) * 100
+ ttl := resps[0].TTL
+
+ var min, max, avg, total time.Duration
+ min = resps[0].RTT
+ max = resps[0].RTT
+
+ for _, res := range resps {
+ if res.RTT < min {
+ min = res.RTT
+ }
+ if res.RTT > max {
+ max = res.RTT
}
+ total += res.RTT
}
- return min, avg, max, stddev, err
+
+ avg = total / time.Duration(packetsRcvd)
+ var sumsquares time.Duration
+ for _, res := range resps {
+ sumsquares += (res.RTT - avg) * (res.RTT - avg)
+ }
+ stdDev := time.Duration(math.Sqrt(float64(sumsquares / time.Duration(packetsRcvd))))
+
+ // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go
+ switch runtime.GOOS {
+ case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris":
+ fields["ttl"] = ttl
+ }
+
+ fields["minimum_response_ms"] = float64(min.Nanoseconds()) / float64(time.Millisecond)
+ fields["average_response_ms"] = float64(avg.Nanoseconds()) / float64(time.Millisecond)
+ fields["maximum_response_ms"] = float64(max.Nanoseconds()) / float64(time.Millisecond)
+ fields["standard_deviation_ms"] = float64(stdDev.Nanoseconds()) / float64(time.Millisecond)
+
+ return tags, fields
+}
+
+// Init ensures the plugin is configured correctly.
+func (p *Ping) Init() error {
+ if p.Count < 1 {
+ return errors.New("bad number of packets to transmit")
+ }
+
+ return nil
}
func init() {
inputs.Add("ping", func() telegraf.Input {
return &Ping{
pingHost: hostPinger,
+ resolveHost: hostResolver,
PingInterval: 1.0,
Count: 1,
Timeout: 1.0,
Deadline: 10,
+ Method: "exec",
Binary: "ping",
Arguments: []string{},
}
diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go
new file mode 100644
index 0000000000000..a014a8237e8e7
--- /dev/null
+++ b/plugins/inputs/ping/ping_notwindows.go
@@ -0,0 +1,235 @@
+// +build !windows
+
+package ping
+
+import (
+ "errors"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/influxdata/telegraf"
+)
+
+func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) {
+ tags := map[string]string{"url": u}
+ fields := map[string]interface{}{"result_code": 0}
+
+ out, err := p.pingHost(p.Binary, 60.0, p.args(u, runtime.GOOS)...)
+ if err != nil {
+ // Some implementations of ping return a non-zero exit code on
+ // timeout, if this occurs we will not exit and try to parse
+ // the output.
+ // Linux iputils-ping returns 1, BSD-derived ping returns 2.
+ status := -1
+ if exitError, ok := err.(*exec.ExitError); ok {
+ if ws, ok := exitError.Sys().(syscall.WaitStatus); ok {
+ status = ws.ExitStatus()
+ fields["result_code"] = status
+ }
+ }
+
+ var timeoutExitCode int
+ switch runtime.GOOS {
+ case "freebsd", "netbsd", "openbsd", "darwin":
+ timeoutExitCode = 2
+ case "linux":
+ timeoutExitCode = 1
+ default:
+ timeoutExitCode = 1
+ }
+
+ if status != timeoutExitCode {
+ // Combine go err + stderr output
+ out = strings.TrimSpace(out)
+ if len(out) > 0 {
+ acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err))
+ } else {
+ acc.AddError(fmt.Errorf("host %s: %s", u, err))
+ }
+ fields["result_code"] = 2
+ acc.AddFields("ping", fields, tags)
+ return
+ }
+ }
+ trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out)
+ if err != nil {
+ // fatal error
+ acc.AddError(fmt.Errorf("%s: %s", err, u))
+ fields["result_code"] = 2
+ acc.AddFields("ping", fields, tags)
+ return
+ }
+
+ // Calculate packet loss percentage
+ loss := float64(trans-rec) / float64(trans) * 100.0
+
+ fields["packets_transmitted"] = trans
+ fields["packets_received"] = rec
+ fields["percent_packet_loss"] = loss
+ if ttl >= 0 {
+ fields["ttl"] = ttl
+ }
+ if min >= 0 {
+ fields["minimum_response_ms"] = min
+ }
+ if avg >= 0 {
+ fields["average_response_ms"] = avg
+ }
+ if max >= 0 {
+ fields["maximum_response_ms"] = max
+ }
+ if stddev >= 0 {
+ fields["standard_deviation_ms"] = stddev
+ }
+ acc.AddFields("ping", fields, tags)
+}
+
+// args returns the arguments for the 'ping' executable
+func (p *Ping) args(url string, system string) []string {
+ if len(p.Arguments) > 0 {
+ return append(p.Arguments, url)
+ }
+
+ // build the ping command args based on toml config
+ args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"}
+ if p.PingInterval > 0 {
+ args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64))
+ }
+ if p.Timeout > 0 {
+ switch system {
+ case "darwin":
+ args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64))
+ case "freebsd":
+ if strings.Contains(p.Binary, "ping6") {
+ args = append(args, "-x", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64))
+ } else {
+ args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64))
+ }
+ case "netbsd", "openbsd":
+ args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64))
+ case "linux":
+ args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64))
+ default:
+ // Not sure the best option here, just assume GNU ping?
+ args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64))
+ }
+ }
+ if p.Deadline > 0 {
+ switch system {
+ case "freebsd":
+ if strings.Contains(p.Binary, "ping6") {
+ args = append(args, "-X", strconv.Itoa(p.Deadline))
+ } else {
+ args = append(args, "-t", strconv.Itoa(p.Deadline))
+ }
+ case "darwin", "netbsd", "openbsd":
+ args = append(args, "-t", strconv.Itoa(p.Deadline))
+ case "linux":
+ args = append(args, "-w", strconv.Itoa(p.Deadline))
+ default:
+ // not sure the best option here, just assume gnu ping?
+ args = append(args, "-w", strconv.Itoa(p.Deadline))
+ }
+ }
+ if p.Interface != "" {
+ switch system {
+ case "darwin":
+ args = append(args, "-I", p.Interface)
+ case "freebsd", "netbsd", "openbsd":
+ args = append(args, "-S", p.Interface)
+ case "linux":
+ args = append(args, "-I", p.Interface)
+ default:
+ // not sure the best option here, just assume gnu ping?
+ args = append(args, "-i", p.Interface)
+ }
+ }
+ args = append(args, url)
+ return args
+}
+
+// processPingOutput takes in a string output from the ping command, like:
+//
+// ping www.google.com (173.194.115.84): 56 data bytes
+// 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms
+// 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms
+//
+// --- www.google.com ping statistics ---
+// 2 packets transmitted, 2 packets received, 0.0% packet loss
+// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms
+//
+// It returns (, , )
+func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) {
+ var trans, recv, ttl int = 0, 0, -1
+ var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0
+ // Set this error to nil if we find a 'transmitted' line
+ err := errors.New("Fatal error processing ping output")
+ lines := strings.Split(out, "\n")
+ for _, line := range lines {
+ // Reading only first TTL, ignoring other TTL messages
+ if ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) {
+ ttl, err = getTTL(line)
+ } else if strings.Contains(line, "transmitted") &&
+ strings.Contains(line, "received") {
+ trans, recv, err = getPacketStats(line, trans, recv)
+ if err != nil {
+ return trans, recv, ttl, min, avg, max, stddev, err
+ }
+ } else if strings.Contains(line, "min/avg/max") {
+ min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev)
+ if err != nil {
+ return trans, recv, ttl, min, avg, max, stddev, err
+ }
+ }
+ }
+ return trans, recv, ttl, min, avg, max, stddev, err
+}
+
+func getPacketStats(line string, trans, recv int) (int, int, error) {
+ stats := strings.Split(line, ", ")
+ // Transmitted packets
+ trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0])
+ if err != nil {
+ return trans, recv, err
+ }
+ // Received packets
+ recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0])
+ return trans, recv, err
+}
+
+func getTTL(line string) (int, error) {
+ ttlLine := regexp.MustCompile(`(ttl|hlim)=(\d+)`)
+ ttlMatch := ttlLine.FindStringSubmatch(line)
+ return strconv.Atoi(ttlMatch[2])
+}
+
+func checkRoundTripTimeStats(line string, min, avg, max,
+ stddev float64) (float64, float64, float64, float64, error) {
+ stats := strings.Split(line, " ")[3]
+ data := strings.Split(stats, "/")
+
+ min, err := strconv.ParseFloat(data[0], 64)
+ if err != nil {
+ return min, avg, max, stddev, err
+ }
+ avg, err = strconv.ParseFloat(data[1], 64)
+ if err != nil {
+ return min, avg, max, stddev, err
+ }
+ max, err = strconv.ParseFloat(data[2], 64)
+ if err != nil {
+ return min, avg, max, stddev, err
+ }
+ if len(data) == 4 {
+ stddev, err = strconv.ParseFloat(data[3], 64)
+ if err != nil {
+ return min, avg, max, stddev, err
+ }
+ }
+ return min, avg, max, stddev, err
+}
diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go
index 8870d415680af..0c8cfb0939daa 100644
--- a/plugins/inputs/ping/ping_test.go
+++ b/plugins/inputs/ping/ping_test.go
@@ -3,7 +3,9 @@
package ping
import (
+ "context"
"errors"
+ "net"
"reflect"
"sort"
"testing"
@@ -27,6 +29,20 @@ PING www.google.com (216.58.217.36): 56 data bytes
round-trip min/avg/max/stddev = 15.087/20.224/27.263/4.076 ms
`
+// FreeBSD ping6 output
+var freebsdPing6Output = `
+PING6(64=40+8+16 bytes) 2001:db8::1 --> 2a00:1450:4001:824::2004
+24 bytes from 2a00:1450:4001:824::2004, icmp_seq=0 hlim=117 time=93.870 ms
+24 bytes from 2a00:1450:4001:824::2004, icmp_seq=1 hlim=117 time=40.278 ms
+24 bytes from 2a00:1450:4001:824::2004, icmp_seq=2 hlim=120 time=59.077 ms
+24 bytes from 2a00:1450:4001:824::2004, icmp_seq=3 hlim=117 time=37.102 ms
+24 bytes from 2a00:1450:4001:824::2004, icmp_seq=4 hlim=117 time=35.727 ms
+
+--- www.google.com ping6 statistics ---
+5 packets transmitted, 5 packets received, 0.0% packet loss
+round-trip min/avg/max/std-dev = 35.727/53.211/93.870/22.000 ms
+`
+
// Linux ping output
var linuxPingOutput = `
PING www.google.com (216.58.218.164) 56(84) bytes of data.
@@ -65,17 +81,27 @@ func TestProcessPingOutput(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 55, ttl, "ttl value is 55")
assert.Equal(t, 5, trans, "5 packets were transmitted")
- assert.Equal(t, 5, rec, "5 packets were transmitted")
+ assert.Equal(t, 5, rec, "5 packets were received")
assert.InDelta(t, 15.087, min, 0.001)
assert.InDelta(t, 20.224, avg, 0.001)
assert.InDelta(t, 27.263, max, 0.001)
assert.InDelta(t, 4.076, stddev, 0.001)
+ trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(freebsdPing6Output)
+ assert.NoError(t, err)
+ assert.Equal(t, 117, ttl, "ttl value is 117")
+ assert.Equal(t, 5, trans, "5 packets were transmitted")
+ assert.Equal(t, 5, rec, "5 packets were received")
+ assert.InDelta(t, 35.727, min, 0.001)
+ assert.InDelta(t, 53.211, avg, 0.001)
+ assert.InDelta(t, 93.870, max, 0.001)
+ assert.InDelta(t, 22.000, stddev, 0.001)
+
trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(linuxPingOutput)
assert.NoError(t, err)
assert.Equal(t, 63, ttl, "ttl value is 63")
assert.Equal(t, 5, trans, "5 packets were transmitted")
- assert.Equal(t, 5, rec, "5 packets were transmitted")
+ assert.Equal(t, 5, rec, "5 packets were received")
assert.InDelta(t, 35.225, min, 0.001)
assert.InDelta(t, 43.628, avg, 0.001)
assert.InDelta(t, 51.806, max, 0.001)
@@ -85,7 +111,7 @@ func TestProcessPingOutput(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 56, ttl, "ttl value is 56")
assert.Equal(t, 4, trans, "4 packets were transmitted")
- assert.Equal(t, 4, rec, "4 packets were transmitted")
+ assert.Equal(t, 4, rec, "4 packets were received")
assert.InDelta(t, 15.810, min, 0.001)
assert.InDelta(t, 17.611, avg, 0.001)
assert.InDelta(t, 22.559, max, 0.001)
@@ -126,7 +152,7 @@ func TestErrorProcessPingOutput(t *testing.T) {
assert.Error(t, err, "Error was expected from processPingOutput")
}
-// Test that arg lists and created correctly
+// Test that default arg lists are created correctly
func TestArgs(t *testing.T) {
p := Ping{
Count: 2,
@@ -154,6 +180,35 @@ func TestArgs(t *testing.T) {
}
}
+// Test that default arg lists for ping6 are created correctly
+func TestArgs6(t *testing.T) {
+ p := Ping{
+ Count: 2,
+ Interface: "eth0",
+ Timeout: 12.0,
+ Deadline: 24,
+ PingInterval: 1.2,
+ Binary: "ping6",
+ }
+
+ var systemCases = []struct {
+ system string
+ output []string
+ }{
+ {"freebsd", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-x", "12000", "-X", "24", "-S", "eth0", "www.google.com"}},
+ {"linux", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-I", "eth0", "www.google.com"}},
+ {"anything else", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-i", "eth0", "www.google.com"}},
+ }
+ for i := range systemCases {
+ actual := p.args("www.google.com", systemCases[i].system)
+ expected := systemCases[i].output
+ sort.Strings(actual)
+ sort.Strings(expected)
+ require.True(t, reflect.DeepEqual(expected, actual),
+ "Expected: %s Actual: %s", expected, actual)
+ }
+}
+
func TestArguments(t *testing.T) {
arguments := []string{"-c", "3"}
expected := append(arguments, "www.google.com")
@@ -180,12 +235,12 @@ func mockHostPinger(binary string, timeout float64, args ...string) (string, err
func TestPingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
- Urls: []string{"www.google.com", "www.reddit.com"},
+ Urls: []string{"localhost", "influxdata.com"},
pingHost: mockHostPinger,
}
acc.GatherError(p.Gather)
- tags := map[string]string{"url": "www.google.com"}
+ tags := map[string]string{"url": "localhost"}
fields := map[string]interface{}{
"packets_transmitted": 5,
"packets_received": 5,
@@ -199,7 +254,7 @@ func TestPingGather(t *testing.T) {
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
- tags = map[string]string{"url": "www.reddit.com"}
+ tags = map[string]string{"url": "influxdata.com"}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
}
@@ -339,3 +394,50 @@ func TestPingBinary(t *testing.T) {
}
acc.GatherError(p.Gather)
}
+
+func mockHostResolver(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) {
+ ipaddr := net.IPAddr{}
+ ipaddr.IP = net.IPv4(127, 0, 0, 1)
+ return &ipaddr, nil
+}
+
+// Test that Gather function works using native ping
+func TestPingGatherNative(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test due to permission requirements.")
+ }
+
+ var acc testutil.Accumulator
+ p := Ping{
+ Urls: []string{"localhost", "127.0.0.2"},
+ Method: "native",
+ Count: 5,
+ resolveHost: mockHostResolver,
+ }
+
+ assert.NoError(t, acc.GatherError(p.Gather))
+ assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5))
+ assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5))
+}
+
+func mockHostResolverError(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) {
+ return nil, errors.New("myMock error")
+}
+
+// Test failed DNS resolutions
+func TestDNSLookupError(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test due to permission requirements.")
+ }
+
+ var acc testutil.Accumulator
+ p := Ping{
+ Urls: []string{"localhost"},
+ Method: "native",
+ IPv6: false,
+ resolveHost: mockHostResolverError,
+ }
+
+ acc.GatherError(p.Gather)
+ assert.True(t, len(acc.Errors) > 0)
+}
diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go
index 6064fabe4b6dc..f53d6f09a7373 100644
--- a/plugins/inputs/ping/ping_windows.go
+++ b/plugins/inputs/ping/ping_windows.go
@@ -5,103 +5,17 @@ package ping
import (
"errors"
"fmt"
- "net"
- "os/exec"
"regexp"
"strconv"
"strings"
- "sync"
- "time"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/plugins/inputs"
)
-// HostPinger is a function that runs the "ping" function using a list of
-// passed arguments. This can be easily switched with a mocked ping function
-// for unit test purposes (see ping_test.go)
-type HostPinger func(binary string, timeout float64, args ...string) (string, error)
-
-type Ping struct {
- wg sync.WaitGroup
-
- // Number of pings to send (ping -c )
- Count int
-
- // Ping timeout, in seconds. 0 means no timeout (ping -W )
- Timeout float64
-
- // URLs to ping
- Urls []string
-
- // Ping executable binary
- Binary string
-
- // Arguments for ping command.
- // when `Arguments` is not empty, other options (ping_interval, timeout, etc) will be ignored
- Arguments []string
-
- // host ping function
- pingHost HostPinger
-}
-
-func (s *Ping) Description() string {
- return "Ping given url(s) and return statistics"
-}
-
-const sampleConfig = `
- ## List of urls to ping
- urls = ["www.google.com"]
-
- ## number of pings to send per collection (ping -n )
- # count = 1
-
- ## Ping timeout, in seconds. 0.0 means default timeout (ping -w )
- # timeout = 0.0
-
- ## Specify the ping executable binary, default is "ping"
- # binary = "ping"
-
- ## Arguments for ping command
- ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored
- # arguments = ["-c", "3"]
-`
-
-func (s *Ping) SampleConfig() string {
- return sampleConfig
-}
-
-func (p *Ping) Gather(acc telegraf.Accumulator) error {
- if p.Count < 1 {
- p.Count = 1
- }
-
- // Spin off a go routine for each url to ping
- for _, url := range p.Urls {
- p.wg.Add(1)
- go p.pingToURL(url, acc)
- }
-
- p.wg.Wait()
-
- return nil
-}
-
func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) {
- defer p.wg.Done()
-
tags := map[string]string{"url": u}
fields := map[string]interface{}{"result_code": 0}
- _, err := net.LookupHost(u)
- if err != nil {
- acc.AddError(err)
- fields["result_code"] = 1
- acc.AddFields("ping", fields, tags)
- return
- }
-
args := p.args(u)
totalTimeout := 60.0
if len(p.Arguments) == 0 {
@@ -151,17 +65,6 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) {
acc.AddFields("ping", fields, tags)
}
-func hostPinger(binary string, timeout float64, args ...string) (string, error) {
- bin, err := exec.LookPath(binary)
- if err != nil {
- return "", err
- }
- c := exec.Command(bin, args...)
- out, err := internal.CombinedOutputTimeout(c,
- time.Second*time.Duration(timeout+1))
- return string(out), err
-}
-
// args returns the arguments for the 'ping' executable
func (p *Ping) args(url string) []string {
if len(p.Arguments) > 0 {
@@ -246,14 +149,3 @@ func (p *Ping) timeout() float64 {
}
return 4 + 1
}
-
-func init() {
- inputs.Add("ping", func() telegraf.Input {
- return &Ping{
- pingHost: hostPinger,
- Count: 1,
- Binary: "ping",
- Arguments: []string{},
- }
- })
-}
diff --git a/plugins/inputs/postfix/README.md b/plugins/inputs/postfix/README.md
index 3dab2b39d2acb..2fdfacd9d193c 100644
--- a/plugins/inputs/postfix/README.md
+++ b/plugins/inputs/postfix/README.md
@@ -2,7 +2,10 @@
The postfix plugin reports metrics on the postfix queues.
-For each of the active, hold, incoming, maildrop, and deferred queues (http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue length (number of items), size (bytes used by items), and age (age of oldest item in seconds).
+For each of the active, hold, incoming, maildrop, and deferred queues
+(http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue
+length (number of items), size (bytes used by items), and age (age of oldest
+item in seconds).
### Configuration
@@ -13,12 +16,15 @@ For each of the active, hold, incoming, maildrop, and deferred queues (http://ww
# queue_directory = "/var/spool/postfix"
```
-#### Permissions:
+#### Permissions
Telegraf will need read access to the files in the queue directory. You may
need to alter the permissions of these directories to provide access to the
telegraf user.
+This can be setup either using standard unix permissions or with Posix ACLs,
+you will only need to use one method:
+
Unix permissions:
```sh
$ sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred}
@@ -29,20 +35,20 @@ $ sudo chmod g+r /var/spool/postfix/maildrop
Posix ACL:
```sh
-$ sudo setfacl -Rdm u:telegraf:rX /var/spool/postfix/{active,hold,incoming,deferred,maildrop}
+$ sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/
+$ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/
```
-### Measurements & Fields:
+### Metrics
- postfix_queue
+ - tags:
+ - queue
+ - fields:
- length (integer)
- size (integer, bytes)
- age (integer, seconds)
-### Tags:
-
-- postfix_queue
- - queue
### Example Output
diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md
index a873ddac0a1e8..627fd2dbbfa88 100644
--- a/plugins/inputs/postgresql/README.md
+++ b/plugins/inputs/postgresql/README.md
@@ -1,4 +1,4 @@
-# PostgreSQL plugin
+# PostgreSQL Input Plugin
This postgresql plugin provides metrics for your postgres database. It currently works with postgres versions 8.1+. It uses data from the built in _pg_stat_database_ and pg_stat_bgwriter views. The metrics recorded depend on your version of postgres. See table:
```
@@ -31,9 +31,13 @@ _* value ignored and therefore not recorded._
More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW)
## Configuration
-Specify address via a url matching:
+Specify address via a postgresql connection string:
- `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]`
+ `host=localhost port=5432 user=telegraf database=telegraf`
+
+Or via an url matching:
+
+ `postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full]`
All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for.
@@ -53,7 +57,7 @@ host=localhost user=pgotest dbname=app_production sslmode=require sslkey=/etc/te
```
### Configuration example
-```
+```toml
[[inputs.postgresql]]
address = "postgres://telegraf@localhost/someDB"
ignored_databases = ["template0", "template1"]
diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go
index e136098f4f304..0911b20ce7184 100644
--- a/plugins/inputs/postgresql/postgresql.go
+++ b/plugins/inputs/postgresql/postgresql.go
@@ -26,7 +26,7 @@ var sampleConfig = `
## postgres://[pqgotest[:password]]@localhost[/dbname]\
## ?sslmode=[disable|verify-ca|verify-full]
## or a simple string:
- ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
+ ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
##
## All connection parameters are optional.
##
@@ -155,7 +155,12 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []str
}
if columnMap["datname"] != nil {
// extract the database name from the column map
- dbname.WriteString((*columnMap["datname"]).(string))
+ if dbNameStr, ok := (*columnMap["datname"]).(string); ok {
+ dbname.WriteString(dbNameStr)
+ } else {
+ // PG 12 adds tracking of global objects to pg_stat_database
+ dbname.WriteString("postgres_global")
+ }
} else {
dbname.WriteString("postgres")
}
diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go
index 9d3ab396317a1..96a9a63175658 100644
--- a/plugins/inputs/postgresql/service.go
+++ b/plugins/inputs/postgresql/service.go
@@ -122,6 +122,13 @@ func (p *Service) Start(telegraf.Accumulator) (err error) {
Name: "int8OID",
OID: pgtype.Int8OID,
})
+ // Newer versions of pgbouncer need this defined. See the discussion here:
+ // https://github.com/jackc/pgx/issues/649
+ info.RegisterDataType(pgtype.DataType{
+ Value: &pgtype.OIDValue{},
+ Name: "numericOID",
+ OID: pgtype.NumericOID,
+ })
return info, nil
},
diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md
index 29c5e36d853e4..abbdd07f43d1b 100644
--- a/plugins/inputs/postgresql_extensible/README.md
+++ b/plugins/inputs/postgresql_extensible/README.md
@@ -1,4 +1,4 @@
-# PostgreSQL plugin
+# PostgreSQL Extensible Input Plugin
This postgresql plugin provides metrics for your postgres database. It has been
designed to parse SQL queries in the plugin section of your `telegraf.conf`.
@@ -11,14 +11,14 @@ The example below has two queries are specified, with the following parameters:
* The name of the measurement
* A list of the columns to be defined as tags
-```
+```toml
[[inputs.postgresql_extensible]]
# specify address via a url matching:
- # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=...
+ # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=...
# or a simple string:
- # host=localhost user=pqotest password=... sslmode=... dbname=app_production
+ # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production
#
- # All connection parameters are optional.
+ # All connection parameters are optional.
# Without the dbname parameter, the driver will default to a database
# with the same name as the user. This dbname is just for instantiating a
# connection with the server and doesn't restrict the databases we are trying
@@ -44,6 +44,9 @@ The example below has two queries are specified, with the following parameters:
# Be careful that if the withdbname is set to false you don't have to define
# the where clause (aka with the dbname)
#
+ # The script option can be used to specify the .sql file path.
+ # If script and sqlquery options specified at same time, sqlquery will be used
+ #
# the tagvalue field is used to define custom tags (separated by comas).
# the query is expected to return columns which match the names of the
# defined tags. The values in these columns must be of a string-type,
@@ -61,19 +64,19 @@ The example below has two queries are specified, with the following parameters:
withdbname=false
tagvalue=""
[[inputs.postgresql_extensible.query]]
- sqlquery="SELECT * FROM pg_stat_bgwriter"
+ script="your_sql-filepath.sql"
version=901
withdbname=false
tagvalue=""
```
The system can be easily extended using homemade metrics collection tools or
-using postgreql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/))
+using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/))
# Sample Queries :
- telegraf.conf postgresql_extensible queries (assuming that you have configured
correctly your connection)
-```
+```toml
[[inputs.postgresql_extensible.query]]
sqlquery="SELECT * FROM pg_stat_database"
version=901
diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go
index c2bcb7b600efc..f91feaf407d49 100644
--- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go
+++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go
@@ -3,10 +3,10 @@ package postgresql_extensible
import (
"bytes"
"fmt"
- "log"
+ "io/ioutil"
+ "os"
"strings"
- // register in driver.
_ "github.com/jackc/pgx/stdlib"
"github.com/influxdata/telegraf"
@@ -21,10 +21,13 @@ type Postgresql struct {
AdditionalTags []string
Query query
Debug bool
+
+ Log telegraf.Logger
}
type query []struct {
Sqlquery string
+ Script string
Version int
Withdbname bool
Tagvalue string
@@ -38,7 +41,7 @@ var sampleConfig = `
## postgres://[pqgotest[:password]]@localhost[/dbname]\
## ?sslmode=[disable|verify-ca|verify-full]
## or a simple string:
- ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
+ ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
#
## All connection parameters are optional. #
## Without the dbname parameter, the driver will default to a database
@@ -75,7 +78,10 @@ var sampleConfig = `
## field is used to define custom tags (separated by commas)
## The optional "measurement" value can be used to override the default
## output measurement name ("postgresql").
- #
+ ##
+ ## The script option can be used to specify the .sql file path.
+ ## If script and sqlquery options specified at same time, sqlquery will be used
+ ##
## Structure :
## [[inputs.postgresql_extensible.query]]
## sqlquery string
@@ -96,6 +102,19 @@ var sampleConfig = `
tagvalue="postgresql.stats"
`
+func (p *Postgresql) Init() error {
+ var err error
+ for i := range p.Query {
+ if p.Query[i].Sqlquery == "" {
+ p.Query[i].Sqlquery, err = ReadQueryFromFile(p.Query[i].Script)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
func (p *Postgresql) SampleConfig() string {
return sampleConfig
}
@@ -108,6 +127,20 @@ func (p *Postgresql) IgnoredColumns() map[string]bool {
return ignoredColumns
}
+func ReadQueryFromFile(filePath string) (string, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ query, err := ioutil.ReadAll(file)
+ if err != nil {
+ return "", err
+ }
+ return string(query), err
+}
+
func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
var (
err error
@@ -120,7 +153,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
columns []string
)
- // Retreiving the database version
+ // Retrieving the database version
query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'`
if err = p.DB.QueryRow(query).Scan(&db_version); err != nil {
db_version = 0
@@ -131,6 +164,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
for i := range p.Query {
sql_query = p.Query[i].Sqlquery
tag_value = p.Query[i].Tagvalue
+
if p.Query[i].Measurement != "" {
meas_name = p.Query[i].Measurement
} else {
@@ -152,7 +186,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
if p.Query[i].Version <= db_version {
rows, err := p.DB.Query(sql_query)
if err != nil {
- acc.AddError(err)
+ p.Log.Error(err.Error())
continue
}
@@ -160,7 +194,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
// grab the column information from the result
if columns, err = rows.Columns(); err != nil {
- acc.AddError(err)
+ p.Log.Error(err.Error())
continue
}
@@ -175,7 +209,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
for rows.Next() {
err = p.accRow(meas_name, rows, acc, columns)
if err != nil {
- acc.AddError(err)
+ p.Log.Error(err.Error())
break
}
}
@@ -238,7 +272,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula
fields := make(map[string]interface{})
COLUMN:
for col, val := range columnMap {
- log.Printf("D! postgresql_extensible: column: %s = %T: %v\n", col, *val, *val)
+ p.Log.Debugf("Column: %s = %T: %v\n", col, *val, *val)
_, ignore := ignoredColumns[col]
if ignore || *val == nil {
continue
@@ -256,7 +290,7 @@ COLUMN:
case int64, int32, int:
tags[col] = fmt.Sprintf("%d", v)
default:
- log.Println("failed to add additional tag", col)
+ p.Log.Debugf("Failed to add %q as additional tag", col)
}
continue COLUMN
}
diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go
index 1ed62a1cd62c0..bca009f167cf7 100644
--- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go
+++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go
@@ -13,6 +13,7 @@ import (
func queryRunner(t *testing.T, q query) *testutil.Accumulator {
p := &Postgresql{
+ Log: testutil.Logger{},
Service: postgresql.Service{
Address: fmt.Sprintf(
"host=%s user=postgres sslmode=disable",
@@ -25,7 +26,7 @@ func queryRunner(t *testing.T, q query) *testutil.Accumulator {
}
var acc testutil.Accumulator
p.Start(&acc)
-
+ p.Init()
require.NoError(t, acc.GatherError(p.Gather))
return &acc
}
@@ -201,12 +202,39 @@ func TestPostgresqlFieldOutput(t *testing.T) {
}
}
+func TestPostgresqlSqlScript(t *testing.T) {
+ q := query{{
+ Script: "testdata/test.sql",
+ Version: 901,
+ Withdbname: false,
+ Tagvalue: "",
+ }}
+ p := &Postgresql{
+ Log: testutil.Logger{},
+ Service: postgresql.Service{
+ Address: fmt.Sprintf(
+ "host=%s user=postgres sslmode=disable",
+ testutil.GetLocalHost(),
+ ),
+ IsPgBouncer: false,
+ },
+ Databases: []string{"postgres"},
+ Query: q,
+ }
+ var acc testutil.Accumulator
+ p.Start(&acc)
+ p.Init()
+
+ require.NoError(t, acc.GatherError(p.Gather))
+}
+
func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := &Postgresql{
+ Log: testutil.Logger{},
Service: postgresql.Service{
Address: fmt.Sprintf(
"host=%s user=postgres sslmode=disable",
@@ -226,7 +254,10 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
}
func TestAccRow(t *testing.T) {
- p := Postgresql{}
+ p := Postgresql{
+ Log: testutil.Logger{},
+ }
+
var acc testutil.Accumulator
columns := []string{"datname", "cat"}
diff --git a/plugins/inputs/postgresql_extensible/testdata/test.sql b/plugins/inputs/postgresql_extensible/testdata/test.sql
new file mode 100644
index 0000000000000..49ec02b258fe9
--- /dev/null
+++ b/plugins/inputs/postgresql_extensible/testdata/test.sql
@@ -0,0 +1 @@
+select * from pg_stat_database
\ No newline at end of file
diff --git a/plugins/inputs/powerdns/README.md b/plugins/inputs/powerdns/README.md
index 4b1732782e7ec..a6bad660fc37b 100644
--- a/plugins/inputs/powerdns/README.md
+++ b/plugins/inputs/powerdns/README.md
@@ -4,7 +4,7 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket.
### Configuration:
-```
+```toml
# Description
[[inputs.powerdns]]
# An array of sockets to gather stats about.
@@ -14,6 +14,16 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket.
unix_sockets = ["/var/run/pdns.controlsocket"]
```
+#### Permissions
+
+Telegraf will need read access to the powerdns control socket.
+
+On many systems this can be accomplished by adding the `telegraf` user to the
+`pdns` group:
+```
+usermod telegraf -a -G pdns
+```
+
### Measurements & Fields:
- powerdns
diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go
index e53373baf1ce4..3c661990cee4c 100644
--- a/plugins/inputs/powerdns/powerdns.go
+++ b/plugins/inputs/powerdns/powerdns.go
@@ -110,8 +110,8 @@ func parseResponse(metrics string) map[string]interface{} {
i, err := strconv.ParseInt(m[1], 10, 64)
if err != nil {
- log.Printf("E! powerdns: Error parsing integer for metric [%s]: %s",
- metric, err)
+ log.Printf("E! [inputs.powerdns] error parsing integer for metric %q: %s",
+ metric, err.Error())
continue
}
values[m[0]] = i
diff --git a/plugins/inputs/powerdns_recursor/README.md b/plugins/inputs/powerdns_recursor/README.md
new file mode 100644
index 0000000000000..09192db35ad2b
--- /dev/null
+++ b/plugins/inputs/powerdns_recursor/README.md
@@ -0,0 +1,163 @@
+# PowerDNS Recursor Input Plugin
+
+The `powerdns_recursor` plugin gathers metrics about PowerDNS Recursor using
+the unix controlsocket.
+
+### Configuration
+
+```toml
+[[inputs.powerdns_recursor]]
+ ## Path to the Recursor control socket.
+ unix_sockets = ["/var/run/pdns_recursor.controlsocket"]
+
+ ## Directory to create receive socket. This default is likely not writable,
+ ## please reference the full plugin documentation for a recommended setup.
+ # socket_dir = "/var/run/"
+ ## Socket permissions for the receive socket.
+ # socket_mode = "0666"
+```
+
+#### Permissions
+
+Telegraf will need read/write access to the control socket and to the
+`socket_dir`. PowerDNS will need to be able to write to the `socket_dir`.
+
+The setup described below was tested on a Debian Stretch system and may need
+adapted for other systems.
+
+First change permissions on the controlsocket in the PowerDNS recursor
+configuration, usually in `/etc/powerdns/recursor.conf`:
+```
+socket-mode = 660
+```
+
+Then place the `telegraf` user into the `pdns` group:
+```
+usermod telegraf -a -G pdns
+```
+
+Since `telegraf` cannot write to to the default `/var/run` socket directory,
+create a subdirectory and adjust permissions for this directory so that both
+users can access it.
+```sh
+$ mkdir /var/run/pdns
+$ chown root:pdns /var/run/pdns
+$ chmod 770 /var/run/pdns
+```
+
+### Metrics
+
+- powerdns_recursor
+ - tags:
+ - server
+ - fields:
+ - all-outqueries
+ - answers-slow
+ - answers0-1
+ - answers1-10
+ - answers10-100
+ - answers100-1000
+ - auth-zone-queries
+ - auth4-answers-slow
+ - auth4-answers0-1
+ - auth4-answers1-10
+ - auth4-answers10-100
+ - auth4-answers100-1000
+ - auth6-answers-slow
+ - auth6-answers0-1
+ - auth6-answers1-10
+ - auth6-answers10-100
+ - auth6-answers100-1000
+ - cache-entries
+ - cache-hits
+ - cache-misses
+ - case-mismatches
+ - chain-resends
+ - client-parse-errors
+ - concurrent-queries
+ - dlg-only-drops
+ - dnssec-queries
+ - dnssec-result-bogus
+ - dnssec-result-indeterminate
+ - dnssec-result-insecure
+ - dnssec-result-nta
+ - dnssec-result-secure
+ - dnssec-validations
+ - dont-outqueries
+ - ecs-queries
+ - ecs-responses
+ - edns-ping-matches
+ - edns-ping-mismatches
+ - failed-host-entries
+ - fd-usage
+ - ignored-packets
+ - ipv6-outqueries
+ - ipv6-questions
+ - malloc-bytes
+ - max-cache-entries
+ - max-mthread-stack
+ - max-packetcache-entries
+ - negcache-entries
+ - no-packet-error
+ - noedns-outqueries
+ - noerror-answers
+ - noping-outqueries
+ - nsset-invalidations
+ - nsspeeds-entries
+ - nxdomain-answers
+ - outgoing-timeouts
+ - outgoing4-timeouts
+ - outgoing6-timeouts
+ - over-capacity-drops
+ - packetcache-entries
+ - packetcache-hits
+ - packetcache-misses
+ - policy-drops
+ - policy-result-custom
+ - policy-result-drop
+ - policy-result-noaction
+ - policy-result-nodata
+ - policy-result-nxdomain
+ - policy-result-truncate
+ - qa-latency
+ - query-pipe-full-drops
+ - questions
+ - real-memory-usage
+ - resource-limits
+ - security-status
+ - server-parse-errors
+ - servfail-answers
+ - spoof-prevents
+ - sys-msec
+ - tcp-client-overflow
+ - tcp-clients
+ - tcp-outqueries
+ - tcp-questions
+ - throttle-entries
+ - throttled-out
+ - throttled-outqueries
+ - too-old-drops
+ - udp-in-errors
+ - udp-noport-errors
+ - udp-recvbuf-errors
+ - udp-sndbuf-errors
+ - unauthorized-tcp
+ - unauthorized-udp
+ - unexpected-packets
+ - unreachables
+ - uptime
+ - user-msec
+ - x-our-latency
+ - x-ourtime-slow
+ - x-ourtime0-1
+ - x-ourtime1-2
+ - x-ourtime16-32
+ - x-ourtime2-4
+ - x-ourtime4-8
+ - x-ourtime8-16
+
+### Example Output
+
+```
+powerdns_recursor,server=/var/run/pdns_recursor.controlsocket all-outqueries=3631810i,answers-slow=36863i,answers0-1=179612i,answers1-10=1223305i,answers10-100=1252199i,answers100-1000=408357i,auth-zone-queries=4i,auth4-answers-slow=44758i,auth4-answers0-1=59721i,auth4-answers1-10=1766787i,auth4-answers10-100=1329638i,auth4-answers100-1000=430372i,auth6-answers-slow=0i,auth6-answers0-1=0i,auth6-answers1-10=0i,auth6-answers10-100=0i,auth6-answers100-1000=0i,cache-entries=296689i,cache-hits=150654i,cache-misses=2949682i,case-mismatches=0i,chain-resends=420004i,client-parse-errors=0i,concurrent-queries=0i,dlg-only-drops=0i,dnssec-queries=152970i,dnssec-result-bogus=0i,dnssec-result-indeterminate=0i,dnssec-result-insecure=0i,dnssec-result-nta=0i,dnssec-result-secure=47i,dnssec-validations=47i,dont-outqueries=62i,ecs-queries=0i,ecs-responses=0i,edns-ping-matches=0i,edns-ping-mismatches=0i,failed-host-entries=21i,fd-usage=32i,ignored-packets=0i,ipv6-outqueries=0i,ipv6-questions=0i,malloc-bytes=0i,max-cache-entries=1000000i,max-mthread-stack=33747i,max-packetcache-entries=500000i,negcache-entries=100019i,no-packet-error=0i,noedns-outqueries=73341i,noerror-answers=25453808i,noping-outqueries=0i,nsset-invalidations=2398i,nsspeeds-entries=3966i,nxdomain-answers=3341302i,outgoing-timeouts=44384i,outgoing4-timeouts=44384i,outgoing6-timeouts=0i,over-capacity-drops=0i,packetcache-entries=78258i,packetcache-hits=25999027i,packetcache-misses=3100179i,policy-drops=0i,policy-result-custom=0i,policy-result-drop=0i,policy-result-noaction=3100336i,policy-result-nodata=0i,policy-result-nxdomain=0i,policy-result-truncate=0i,qa-latency=6553i,query-pipe-full-drops=0i,questions=29099363i,real-memory-usage=280494080i,resource-limits=0i,security-status=1i,server-parse-errors=0i,servfail-answers=304253i,spoof-prevents=0i,sys-msec=1312600i,tcp-client-overflow=0i,tcp-clients=0i,tcp-outqueries=116i,tcp-questions=133i,throttle-entries=21i,throttled-out=13296i,throttled-outqueries=13296i,too-old-drops=2i,udp-in-errors=4i,udp-noport-errors=2918i,udp-recvbuf-errors=0i,udp-sndbuf-errors=0i,unauthorized-tcp=0i,unauthorized-udp=0i,unexpected-packets=0i,unreachables=1708i,uptime=167482i,user-msec=1282640i,x-our-latency=19i,x-ourtime-slow=642i,x-ourtime0-1=3095566i,x-ourtime1-2=3401i,x-ourtime16-32=201i,x-ourtime2-4=304i,x-ourtime4-8=198i,x-ourtime8-16=24i 1533903879000000000
+```
diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go
new file mode 100644
index 0000000000000..d040d8355329d
--- /dev/null
+++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go
@@ -0,0 +1,167 @@
+package powerdns_recursor
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "log"
+ "math/rand"
+ "net"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type PowerdnsRecursor struct {
+ UnixSockets []string `toml:"unix_sockets"`
+ SocketDir string `toml:"socket_dir"`
+ SocketMode string `toml:"socket_mode"`
+
+ mode uint32
+}
+
+var defaultTimeout = 5 * time.Second
+
+var sampleConfig = `
+ ## Path to the Recursor control socket.
+ unix_sockets = ["/var/run/pdns_recursor.controlsocket"]
+
+ ## Directory to create receive socket. This default is likely not writable,
+ ## please reference the full plugin documentation for a recommended setup.
+ # socket_dir = "/var/run/"
+ ## Socket permissions for the receive socket.
+ # socket_mode = "0666"
+`
+
+func (p *PowerdnsRecursor) SampleConfig() string {
+ return sampleConfig
+}
+
+func (p *PowerdnsRecursor) Description() string {
+ return "Read metrics from one or many PowerDNS Recursor servers"
+}
+
+func (p *PowerdnsRecursor) Init() error {
+ if p.SocketMode != "" {
+ mode, err := strconv.ParseUint(p.SocketMode, 8, 32)
+ if err != nil {
+ return fmt.Errorf("could not parse socket_mode: %v", err)
+ }
+
+ p.mode = uint32(mode)
+ }
+ return nil
+}
+
+func (p *PowerdnsRecursor) Gather(acc telegraf.Accumulator) error {
+ if len(p.UnixSockets) == 0 {
+ return p.gatherServer("/var/run/pdns_recursor.controlsocket", acc)
+ }
+
+ for _, serverSocket := range p.UnixSockets {
+ if err := p.gatherServer(serverSocket, acc); err != nil {
+ acc.AddError(err)
+ }
+ }
+
+ return nil
+}
+
+func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator) error {
+ randomNumber := rand.Int63()
+ recvSocket := filepath.Join("/", "var", "run", fmt.Sprintf("pdns_recursor_telegraf%d", randomNumber))
+ if p.SocketDir != "" {
+ recvSocket = filepath.Join(p.SocketDir, fmt.Sprintf("pdns_recursor_telegraf%d", randomNumber))
+ }
+
+ laddr, err := net.ResolveUnixAddr("unixgram", recvSocket)
+ if err != nil {
+ return err
+ }
+ defer os.Remove(recvSocket)
+ raddr, err := net.ResolveUnixAddr("unixgram", address)
+ if err != nil {
+ return err
+ }
+ conn, err := net.DialUnix("unixgram", laddr, raddr)
+ if err != nil {
+ return err
+ }
+ if err := os.Chmod(recvSocket, os.FileMode(p.mode)); err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ conn.SetDeadline(time.Now().Add(defaultTimeout))
+
+ // Read and write buffer
+ rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
+
+ // Send command
+ if _, err := fmt.Fprint(rw, "get-all\n"); err != nil {
+ return nil
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+
+ // Read data
+ buf := make([]byte, 16384)
+ n, err := rw.Read(buf)
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return errors.New("no data received")
+ }
+
+ metrics := string(buf)
+
+ // Process data
+ fields := parseResponse(metrics)
+
+ // Add server socket as a tag
+ tags := map[string]string{"server": address}
+
+ acc.AddFields("powerdns_recursor", fields, tags)
+
+ conn.Close()
+
+ return nil
+}
+
+func parseResponse(metrics string) map[string]interface{} {
+ values := make(map[string]interface{})
+
+ s := strings.Split(metrics, "\n")
+
+ for _, metric := range s[:len(s)-1] {
+ m := strings.Split(metric, "\t")
+ if len(m) < 2 {
+ continue
+ }
+
+ i, err := strconv.ParseInt(m[1], 10, 64)
+ if err != nil {
+ log.Printf("E! [inputs.powerdns_recursor] error parsing integer for metric %q: %s",
+ metric, err.Error())
+ continue
+ }
+ values[m[0]] = i
+ }
+
+ return values
+}
+
+func init() {
+ inputs.Add("powerdns_recursor", func() telegraf.Input {
+ return &PowerdnsRecursor{
+ mode: uint32(0666),
+ }
+ })
+}
diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go
new file mode 100644
index 0000000000000..d0f5690cc31cb
--- /dev/null
+++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go
@@ -0,0 +1,561 @@
+package powerdns_recursor
+
+import (
+ "net"
+ "os"
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type statServer struct{}
+
+var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" +
+ "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" +
+ "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" +
+ "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" +
+ "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" +
+ "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" +
+ "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" +
+ "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" +
+ "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" +
+ "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" +
+ "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" +
+ "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" +
+ "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" +
+ "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" +
+ "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" +
+ "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" +
+ "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" +
+ "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" +
+ "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" +
+ "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" +
+ "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" +
+ "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" +
+ "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" +
+ "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" +
+ "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" +
+ "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n"
+
+// first metric has no "\t"
+var corruptMetrics = "all-outqueries3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" +
+ "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" +
+ "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" +
+ "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" +
+ "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" +
+ "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" +
+ "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" +
+ "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" +
+ "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" +
+ "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" +
+ "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" +
+ "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" +
+ "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" +
+ "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" +
+ "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" +
+ "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" +
+ "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" +
+ "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" +
+ "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" +
+ "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" +
+ "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" +
+ "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" +
+ "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" +
+ "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" +
+ "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" +
+ "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n"
+
+// integer overflow
+var intOverflowMetrics = "all-outqueries\t18446744073709550195\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" +
+ "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" +
+ "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" +
+ "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" +
+ "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" +
+ "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" +
+ "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" +
+ "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" +
+ "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" +
+ "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" +
+ "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" +
+ "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" +
+ "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" +
+ "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" +
+ "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" +
+ "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" +
+ "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" +
+ "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" +
+ "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" +
+ "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" +
+ "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" +
+ "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" +
+ "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" +
+ "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" +
+ "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" +
+ "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n"
+
+func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) {
+ if runtime.GOOS == "darwin" {
+ t.Skip("Skipping test on darwin")
+ }
+ // We create a fake server to return test data
+ controlSocket := "/tmp/pdns5724354148158589552.controlsocket"
+ addr, err := net.ResolveUnixAddr("unixgram", controlSocket)
+ if err != nil {
+ t.Fatal("Cannot parse unix socket")
+ }
+ socket, err := net.ListenUnixgram("unixgram", addr)
+ if err != nil {
+ t.Fatal("Cannot initialize server on port")
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer func() {
+ socket.Close()
+ os.Remove(controlSocket)
+ wg.Done()
+ }()
+
+ for {
+ buf := make([]byte, 1024)
+ n, remote, err := socket.ReadFromUnix(buf)
+ if err != nil {
+ socket.Close()
+ return
+ }
+
+ data := buf[:n]
+ if string(data) == "get-all\n" {
+ socket.WriteToUnix([]byte(metrics), remote)
+ socket.Close()
+ }
+
+ time.Sleep(100 * time.Millisecond)
+ }
+ }()
+
+ p := &PowerdnsRecursor{
+ UnixSockets: []string{controlSocket},
+ SocketDir: "/tmp",
+ SocketMode: "0666",
+ }
+ err = p.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+
+ err = acc.GatherError(p.Gather)
+ require.NoError(t, err)
+
+ wg.Wait()
+
+ intMetrics := []string{"all-outqueries", "answers-slow", "answers0-1", "answers1-10",
+ "answers10-100", "answers100-1000", "auth-zone-queries", "auth4-answers-slow",
+ "auth4-answers0-1", "auth4-answers1-10", "auth4-answers10-100", "auth4-answers100-1000",
+ "auth6-answers-slow", "auth6-answers0-1", "auth6-answers1-10", "auth6-answers10-100",
+ "auth6-answers100-1000", "cache-entries", "cache-hits", "cache-misses", "case-mismatches",
+ "chain-resends", "client-parse-errors", "concurrent-queries", "dlg-only-drops", "dnssec-queries",
+ "dnssec-result-bogus", "dnssec-result-indeterminate", "dnssec-result-insecure", "dnssec-result-nta",
+ "dnssec-result-secure", "dnssec-validations", "dont-outqueries", "ecs-queries", "ecs-responses",
+ "edns-ping-matches", "edns-ping-mismatches", "failed-host-entries", "fd-usage", "ignored-packets",
+ "ipv6-outqueries", "ipv6-questions", "malloc-bytes", "max-cache-entries", "max-mthread-stack",
+ "max-packetcache-entries", "negcache-entries", "no-packet-error", "noedns-outqueries",
+ "noerror-answers", "noping-outqueries", "nsset-invalidations", "nsspeeds-entries",
+ "nxdomain-answers", "outgoing-timeouts", "outgoing4-timeouts", "outgoing6-timeouts",
+ "over-capacity-drops", "packetcache-entries", "packetcache-hits", "packetcache-misses",
+ "policy-drops", "policy-result-custom", "policy-result-drop", "policy-result-noaction",
+ "policy-result-nodata", "policy-result-nxdomain", "policy-result-truncate", "qa-latency",
+ "query-pipe-full-drops", "questions", "real-memory-usage", "resource-limits", "security-status",
+ "server-parse-errors", "servfail-answers", "spoof-prevents", "sys-msec", "tcp-client-overflow",
+ "tcp-clients", "tcp-outqueries", "tcp-questions", "throttle-entries", "throttled-out", "throttled-outqueries",
+ "too-old-drops", "udp-in-errors", "udp-noport-errors", "udp-recvbuf-errors", "udp-sndbuf-errors",
+ "unauthorized-tcp", "unauthorized-udp", "unexpected-packets", "unreachables", "uptime", "user-msec",
+ "x-our-latency", "x-ourtime-slow", "x-ourtime0-1", "x-ourtime1-2", "x-ourtime16-32",
+ "x-ourtime2-4", "x-ourtime4-8", "x-ourtime8-16"}
+
+ for _, metric := range intMetrics {
+ assert.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric)
+ }
+}
+
+func TestPowerdnsRecursorParseMetrics(t *testing.T) {
+ values := parseResponse(metrics)
+
+ tests := []struct {
+ key string
+ value int64
+ }{
+ {"all-outqueries", 3591637},
+ {"answers-slow", 36451},
+ {"answers0-1", 177297},
+ {"answers1-10", 1209328},
+ {"answers10-100", 1238786},
+ {"answers100-1000", 402917},
+ {"auth-zone-queries", 4},
+ {"auth4-answers-slow", 44248},
+ {"auth4-answers0-1", 59169},
+ {"auth4-answers1-10", 1747403},
+ {"auth4-answers10-100", 1315621},
+ {"auth4-answers100-1000", 424683},
+ {"auth6-answers-slow", 0},
+ {"auth6-answers0-1", 0},
+ {"auth6-answers1-10", 0},
+ {"auth6-answers10-100", 0},
+ {"auth6-answers100-1000", 0},
+ {"cache-entries", 295917},
+ {"cache-hits", 148630},
+ {"cache-misses", 2916149},
+ {"case-mismatches", 0},
+ {"chain-resends", 418602},
+ {"client-parse-errors", 0},
+ {"concurrent-queries", 0},
+ {"dlg-only-drops", 0},
+ {"dnssec-queries", 151536},
+ {"dnssec-result-bogus", 0},
+ {"dnssec-result-indeterminate", 0},
+ {"dnssec-result-insecure", 0},
+ {"dnssec-result-nta", 0},
+ {"dnssec-result-secure", 46},
+ {"dnssec-validations", 46},
+ {"dont-outqueries", 62},
+ {"ecs-queries", 0},
+ {"ecs-responses", 0},
+ {"edns-ping-matches", 0},
+ {"edns-ping-mismatches", 0},
+ {"failed-host-entries", 33},
+ {"fd-usage", 32},
+ {"ignored-packets", 0},
+ {"ipv6-outqueries", 0},
+ {"ipv6-questions", 0},
+ {"malloc-bytes", 0},
+ {"max-cache-entries", 1000000},
+ {"max-mthread-stack", 33747},
+ {"max-packetcache-entries", 500000},
+ {"negcache-entries", 100070},
+ {"no-packet-error", 0},
+ {"noedns-outqueries", 72409},
+ {"noerror-answers", 25155259},
+ {"noping-outqueries", 0},
+ {"nsset-invalidations", 2385},
+ {"nsspeeds-entries", 3571},
+ {"nxdomain-answers", 3307768},
+ {"outgoing-timeouts", 43876},
+ {"outgoing4-timeouts", 43876},
+ {"outgoing6-timeouts", 0},
+ {"over-capacity-drops", 0},
+ {"packetcache-entries", 80756},
+ {"packetcache-hits", 25698497},
+ {"packetcache-misses", 3064625},
+ {"policy-drops", 0},
+ {"policy-result-custom", 0},
+ {"policy-result-drop", 0},
+ {"policy-result-noaction", 3064779},
+ {"policy-result-nodata", 0},
+ {"policy-result-nxdomain", 0},
+ {"policy-result-truncate", 0},
+ {"qa-latency", 6587},
+ {"query-pipe-full-drops", 0},
+ {"questions", 28763276},
+ {"real-memory-usage", 280465408},
+ {"resource-limits", 0},
+ {"security-status", 1},
+ {"server-parse-errors", 0},
+ {"servfail-answers", 300249},
+ {"spoof-prevents", 0},
+ {"sys-msec", 1296588},
+ {"tcp-client-overflow", 0},
+ {"tcp-clients", 0},
+ {"tcp-outqueries", 116},
+ {"tcp-questions", 130},
+ {"throttle-entries", 33},
+ {"throttled-out", 13187},
+ {"throttled-outqueries", 13187},
+ {"too-old-drops", 2},
+ {"udp-in-errors", 4},
+ {"udp-noport-errors", 2908},
+ {"udp-recvbuf-errors", 0},
+ {"udp-sndbuf-errors", 0},
+ {"unauthorized-tcp", 0},
+ {"unauthorized-udp", 0},
+ {"unexpected-packets", 0},
+ {"unreachables", 1695},
+ {"uptime", 165725},
+ {"user-msec", 1266384},
+ {"x-our-latency", 19},
+ {"x-ourtime-slow", 632},
+ {"x-ourtime0-1", 3060079},
+ {"x-ourtime1-2", 3351},
+ {"x-ourtime16-32", 197},
+ {"x-ourtime2-4", 302},
+ {"x-ourtime4-8", 194},
+ {"x-ourtime8-16", 24},
+ }
+
+ for _, test := range tests {
+ value, ok := values[test.key]
+ if !ok {
+ t.Errorf("Did not find key for metric %s in values", test.key)
+ continue
+ }
+ if value != test.value {
+ t.Errorf("Metric: %s, Expected: %d, actual: %d",
+ test.key, test.value, value)
+ }
+ }
+}
+
+func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) {
+ values := parseResponse(corruptMetrics)
+
+ tests := []struct {
+ key string
+ value int64
+ }{
+ {"answers-slow", 36451},
+ {"answers0-1", 177297},
+ {"answers1-10", 1209328},
+ {"answers10-100", 1238786},
+ {"answers100-1000", 402917},
+ {"auth-zone-queries", 4},
+ {"auth4-answers-slow", 44248},
+ {"auth4-answers0-1", 59169},
+ {"auth4-answers1-10", 1747403},
+ {"auth4-answers10-100", 1315621},
+ {"auth4-answers100-1000", 424683},
+ {"auth6-answers-slow", 0},
+ {"auth6-answers0-1", 0},
+ {"auth6-answers1-10", 0},
+ {"auth6-answers10-100", 0},
+ {"auth6-answers100-1000", 0},
+ {"cache-entries", 295917},
+ {"cache-hits", 148630},
+ {"cache-misses", 2916149},
+ {"case-mismatches", 0},
+ {"chain-resends", 418602},
+ {"client-parse-errors", 0},
+ {"concurrent-queries", 0},
+ {"dlg-only-drops", 0},
+ {"dnssec-queries", 151536},
+ {"dnssec-result-bogus", 0},
+ {"dnssec-result-indeterminate", 0},
+ {"dnssec-result-insecure", 0},
+ {"dnssec-result-nta", 0},
+ {"dnssec-result-secure", 46},
+ {"dnssec-validations", 46},
+ {"dont-outqueries", 62},
+ {"ecs-queries", 0},
+ {"ecs-responses", 0},
+ {"edns-ping-matches", 0},
+ {"edns-ping-mismatches", 0},
+ {"failed-host-entries", 33},
+ {"fd-usage", 32},
+ {"ignored-packets", 0},
+ {"ipv6-outqueries", 0},
+ {"ipv6-questions", 0},
+ {"malloc-bytes", 0},
+ {"max-cache-entries", 1000000},
+ {"max-mthread-stack", 33747},
+ {"max-packetcache-entries", 500000},
+ {"negcache-entries", 100070},
+ {"no-packet-error", 0},
+ {"noedns-outqueries", 72409},
+ {"noerror-answers", 25155259},
+ {"noping-outqueries", 0},
+ {"nsset-invalidations", 2385},
+ {"nsspeeds-entries", 3571},
+ {"nxdomain-answers", 3307768},
+ {"outgoing-timeouts", 43876},
+ {"outgoing4-timeouts", 43876},
+ {"outgoing6-timeouts", 0},
+ {"over-capacity-drops", 0},
+ {"packetcache-entries", 80756},
+ {"packetcache-hits", 25698497},
+ {"packetcache-misses", 3064625},
+ {"policy-drops", 0},
+ {"policy-result-custom", 0},
+ {"policy-result-drop", 0},
+ {"policy-result-noaction", 3064779},
+ {"policy-result-nodata", 0},
+ {"policy-result-nxdomain", 0},
+ {"policy-result-truncate", 0},
+ {"qa-latency", 6587},
+ {"query-pipe-full-drops", 0},
+ {"questions", 28763276},
+ {"real-memory-usage", 280465408},
+ {"resource-limits", 0},
+ {"security-status", 1},
+ {"server-parse-errors", 0},
+ {"servfail-answers", 300249},
+ {"spoof-prevents", 0},
+ {"sys-msec", 1296588},
+ {"tcp-client-overflow", 0},
+ {"tcp-clients", 0},
+ {"tcp-outqueries", 116},
+ {"tcp-questions", 130},
+ {"throttle-entries", 33},
+ {"throttled-out", 13187},
+ {"throttled-outqueries", 13187},
+ {"too-old-drops", 2},
+ {"udp-in-errors", 4},
+ {"udp-noport-errors", 2908},
+ {"udp-recvbuf-errors", 0},
+ {"udp-sndbuf-errors", 0},
+ {"unauthorized-tcp", 0},
+ {"unauthorized-udp", 0},
+ {"unexpected-packets", 0},
+ {"unreachables", 1695},
+ {"uptime", 165725},
+ {"user-msec", 1266384},
+ {"x-our-latency", 19},
+ {"x-ourtime-slow", 632},
+ {"x-ourtime0-1", 3060079},
+ {"x-ourtime1-2", 3351},
+ {"x-ourtime16-32", 197},
+ {"x-ourtime2-4", 302},
+ {"x-ourtime4-8", 194},
+ {"x-ourtime8-16", 24},
+ }
+
+ for _, test := range tests {
+ value, ok := values[test.key]
+ if !ok {
+ t.Errorf("Did not find key for metric %s in values", test.key)
+ continue
+ }
+ if value != test.value {
+ t.Errorf("Metric: %s, Expected: %d, actual: %d",
+ test.key, test.value, value)
+ }
+ }
+}
+
+func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) {
+ values := parseResponse(intOverflowMetrics)
+
+ tests := []struct {
+ key string
+ value int64
+ }{
+ {"answers-slow", 36451},
+ {"answers0-1", 177297},
+ {"answers1-10", 1209328},
+ {"answers10-100", 1238786},
+ {"answers100-1000", 402917},
+ {"auth-zone-queries", 4},
+ {"auth4-answers-slow", 44248},
+ {"auth4-answers0-1", 59169},
+ {"auth4-answers1-10", 1747403},
+ {"auth4-answers10-100", 1315621},
+ {"auth4-answers100-1000", 424683},
+ {"auth6-answers-slow", 0},
+ {"auth6-answers0-1", 0},
+ {"auth6-answers1-10", 0},
+ {"auth6-answers10-100", 0},
+ {"auth6-answers100-1000", 0},
+ {"cache-entries", 295917},
+ {"cache-hits", 148630},
+ {"cache-misses", 2916149},
+ {"case-mismatches", 0},
+ {"chain-resends", 418602},
+ {"client-parse-errors", 0},
+ {"concurrent-queries", 0},
+ {"dlg-only-drops", 0},
+ {"dnssec-queries", 151536},
+ {"dnssec-result-bogus", 0},
+ {"dnssec-result-indeterminate", 0},
+ {"dnssec-result-insecure", 0},
+ {"dnssec-result-nta", 0},
+ {"dnssec-result-secure", 46},
+ {"dnssec-validations", 46},
+ {"dont-outqueries", 62},
+ {"ecs-queries", 0},
+ {"ecs-responses", 0},
+ {"edns-ping-matches", 0},
+ {"edns-ping-mismatches", 0},
+ {"failed-host-entries", 33},
+ {"fd-usage", 32},
+ {"ignored-packets", 0},
+ {"ipv6-outqueries", 0},
+ {"ipv6-questions", 0},
+ {"malloc-bytes", 0},
+ {"max-cache-entries", 1000000},
+ {"max-mthread-stack", 33747},
+ {"max-packetcache-entries", 500000},
+ {"negcache-entries", 100070},
+ {"no-packet-error", 0},
+ {"noedns-outqueries", 72409},
+ {"noerror-answers", 25155259},
+ {"noping-outqueries", 0},
+ {"nsset-invalidations", 2385},
+ {"nsspeeds-entries", 3571},
+ {"nxdomain-answers", 3307768},
+ {"outgoing-timeouts", 43876},
+ {"outgoing4-timeouts", 43876},
+ {"outgoing6-timeouts", 0},
+ {"over-capacity-drops", 0},
+ {"packetcache-entries", 80756},
+ {"packetcache-hits", 25698497},
+ {"packetcache-misses", 3064625},
+ {"policy-drops", 0},
+ {"policy-result-custom", 0},
+ {"policy-result-drop", 0},
+ {"policy-result-noaction", 3064779},
+ {"policy-result-nodata", 0},
+ {"policy-result-nxdomain", 0},
+ {"policy-result-truncate", 0},
+ {"qa-latency", 6587},
+ {"query-pipe-full-drops", 0},
+ {"questions", 28763276},
+ {"real-memory-usage", 280465408},
+ {"resource-limits", 0},
+ {"security-status", 1},
+ {"server-parse-errors", 0},
+ {"servfail-answers", 300249},
+ {"spoof-prevents", 0},
+ {"sys-msec", 1296588},
+ {"tcp-client-overflow", 0},
+ {"tcp-clients", 0},
+ {"tcp-outqueries", 116},
+ {"tcp-questions", 130},
+ {"throttle-entries", 33},
+ {"throttled-out", 13187},
+ {"throttled-outqueries", 13187},
+ {"too-old-drops", 2},
+ {"udp-in-errors", 4},
+ {"udp-noport-errors", 2908},
+ {"udp-recvbuf-errors", 0},
+ {"udp-sndbuf-errors", 0},
+ {"unauthorized-tcp", 0},
+ {"unauthorized-udp", 0},
+ {"unexpected-packets", 0},
+ {"unreachables", 1695},
+ {"uptime", 165725},
+ {"user-msec", 1266384},
+ {"x-our-latency", 19},
+ {"x-ourtime-slow", 632},
+ {"x-ourtime0-1", 3060079},
+ {"x-ourtime1-2", 3351},
+ {"x-ourtime16-32", 197},
+ {"x-ourtime2-4", 302},
+ {"x-ourtime4-8", 194},
+ {"x-ourtime8-16", 24},
+ }
+
+ for _, test := range tests {
+ value, ok := values[test.key]
+ if !ok {
+ t.Errorf("Did not find key for metric %s in values", test.key)
+ continue
+ }
+ if value != test.value {
+ t.Errorf("Metric: %s, Expected: %d, actual: %d",
+ test.key, test.value, value)
+ }
+ }
+}
diff --git a/plugins/inputs/processes/README.md b/plugins/inputs/processes/README.md
index 3c2e27291be98..756326d75246d 100644
--- a/plugins/inputs/processes/README.md
+++ b/plugins/inputs/processes/README.md
@@ -6,7 +6,9 @@ them by status (zombie, sleeping, running, etc.)
On linux this plugin requires access to procfs (/proc), on other OSes
it requires access to execute `ps`.
-### Configuration:
+**Supported Platforms**: Linux, FreeBSD, Darwin
+
+### Configuration
```toml
# Get the number of processes and group them by status
@@ -19,9 +21,10 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info
`docker run -v /proc:/rootfs/proc:ro -e HOST_PROC=/rootfs/proc`
-### Measurements & Fields:
+### Metrics
- processes
+ - fields:
- blocked (aka disk sleep or uninterruptible sleep)
- running
- sleeping
@@ -32,6 +35,7 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info
- wait (freebsd only)
- idle (bsd and Linux 4+ only)
- paging (linux only)
+ - parked (linux only)
- total_threads (linux only)
### Process State Mappings
@@ -52,14 +56,8 @@ Linux FreeBSD Darwin meaning
W W none paging (linux kernel < 2.6 only), wait (freebsd)
```
-### Tags:
-
-None
-
-### Example Output:
+### Example Output
```
-$ telegraf --config ~/ws/telegraf.conf --input-filter processes --test
-* Plugin: processes, Collection 1
-> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042
+processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042
```
diff --git a/plugins/inputs/processes/processes.go b/plugins/inputs/processes/processes.go
index c71d72f505e85..9ee583dbacecf 100644
--- a/plugins/inputs/processes/processes.go
+++ b/plugins/inputs/processes/processes.go
@@ -1,237 +1,7 @@
-// +build !windows
-
package processes
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strconv"
- "syscall"
-
- "github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/plugins/inputs"
- "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs"
-)
-
-type Processes struct {
- execPS func() ([]byte, error)
- readProcFile func(filename string) ([]byte, error)
-
- forcePS bool
- forceProc bool
-}
-
func (p *Processes) Description() string {
return "Get the number of processes and group them by status"
}
func (p *Processes) SampleConfig() string { return "" }
-
-func (p *Processes) Gather(acc telegraf.Accumulator) error {
- // Get an empty map of metric fields
- fields := getEmptyFields()
-
- // Decide if we will use 'ps' to get stats (use procfs otherwise)
- usePS := true
- if runtime.GOOS == "linux" {
- usePS = false
- }
- if p.forcePS {
- usePS = true
- } else if p.forceProc {
- usePS = false
- }
-
- // Gather stats from 'ps' or procfs
- if usePS {
- if err := p.gatherFromPS(fields); err != nil {
- return err
- }
- } else {
- if err := p.gatherFromProc(fields); err != nil {
- return err
- }
- }
-
- acc.AddGauge("processes", fields, nil)
- return nil
-}
-
-// Gets empty fields of metrics based on the OS
-func getEmptyFields() map[string]interface{} {
- fields := map[string]interface{}{
- "blocked": int64(0),
- "zombies": int64(0),
- "stopped": int64(0),
- "running": int64(0),
- "sleeping": int64(0),
- "total": int64(0),
- "unknown": int64(0),
- }
- switch runtime.GOOS {
- case "freebsd":
- fields["idle"] = int64(0)
- fields["wait"] = int64(0)
- case "darwin":
- fields["idle"] = int64(0)
- case "openbsd":
- fields["idle"] = int64(0)
- case "linux":
- fields["dead"] = int64(0)
- fields["paging"] = int64(0)
- fields["total_threads"] = int64(0)
- fields["idle"] = int64(0)
- }
- return fields
-}
-
-// exec `ps` to get all process states
-func (p *Processes) gatherFromPS(fields map[string]interface{}) error {
- out, err := p.execPS()
- if err != nil {
- return err
- }
-
- for i, status := range bytes.Fields(out) {
- if i == 0 && string(status) == "STAT" {
- // This is a header, skip it
- continue
- }
- switch status[0] {
- case 'W':
- fields["wait"] = fields["wait"].(int64) + int64(1)
- case 'U', 'D', 'L':
- // Also known as uninterruptible sleep or disk sleep
- fields["blocked"] = fields["blocked"].(int64) + int64(1)
- case 'Z':
- fields["zombies"] = fields["zombies"].(int64) + int64(1)
- case 'X':
- fields["dead"] = fields["dead"].(int64) + int64(1)
- case 'T':
- fields["stopped"] = fields["stopped"].(int64) + int64(1)
- case 'R':
- fields["running"] = fields["running"].(int64) + int64(1)
- case 'S':
- fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
- case 'I':
- fields["idle"] = fields["idle"].(int64) + int64(1)
- case '?':
- fields["unknown"] = fields["unknown"].(int64) + int64(1)
- default:
- log.Printf("I! processes: Unknown state [ %s ] from ps",
- string(status[0]))
- }
- fields["total"] = fields["total"].(int64) + int64(1)
- }
- return nil
-}
-
-// get process states from /proc/(pid)/stat files
-func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
- filenames, err := filepath.Glob(linux_sysctl_fs.GetHostProc() + "/[0-9]*/stat")
-
- if err != nil {
- return err
- }
-
- for _, filename := range filenames {
- _, err := os.Stat(filename)
- data, err := p.readProcFile(filename)
- if err != nil {
- return err
- }
- if data == nil {
- continue
- }
-
- // Parse out data after ()
- i := bytes.LastIndex(data, []byte(")"))
- if i == -1 {
- continue
- }
- data = data[i+2:]
-
- stats := bytes.Fields(data)
- if len(stats) < 3 {
- return fmt.Errorf("Something is terribly wrong with %s", filename)
- }
- switch stats[0][0] {
- case 'R':
- fields["running"] = fields["running"].(int64) + int64(1)
- case 'S':
- fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
- case 'D':
- fields["blocked"] = fields["blocked"].(int64) + int64(1)
- case 'Z':
- fields["zombies"] = fields["zombies"].(int64) + int64(1)
- case 'X':
- fields["dead"] = fields["dead"].(int64) + int64(1)
- case 'T', 't':
- fields["stopped"] = fields["stopped"].(int64) + int64(1)
- case 'W':
- fields["paging"] = fields["paging"].(int64) + int64(1)
- case 'I':
- fields["idle"] = fields["idle"].(int64) + int64(1)
- default:
- log.Printf("I! processes: Unknown state [ %s ] in file %s",
- string(stats[0][0]), filename)
- }
- fields["total"] = fields["total"].(int64) + int64(1)
-
- threads, err := strconv.Atoi(string(stats[17]))
- if err != nil {
- log.Printf("I! processes: Error parsing thread count: %s", err)
- continue
- }
- fields["total_threads"] = fields["total_threads"].(int64) + int64(threads)
- }
- return nil
-}
-
-func readProcFile(filename string) ([]byte, error) {
- data, err := ioutil.ReadFile(filename)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- // Reading from /proc/ fails with ESRCH if the process has
- // been terminated between open() and read().
- if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ESRCH {
- return nil, nil
- }
-
- return nil, err
- }
-
- return data, nil
-}
-
-func execPS() ([]byte, error) {
- bin, err := exec.LookPath("ps")
- if err != nil {
- return nil, err
- }
-
- out, err := exec.Command(bin, "axo", "state").Output()
- if err != nil {
- return nil, err
- }
-
- return out, err
-}
-
-func init() {
- inputs.Add("processes", func() telegraf.Input {
- return &Processes{
- execPS: execPS,
- readProcFile: readProcFile,
- }
- })
-}
diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go
new file mode 100644
index 0000000000000..445e7fb9f7255
--- /dev/null
+++ b/plugins/inputs/processes/processes_notwindows.go
@@ -0,0 +1,235 @@
+// +build !windows
+
+package processes
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "syscall"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs"
+)
+
+type Processes struct {
+ execPS func() ([]byte, error)
+ readProcFile func(filename string) ([]byte, error)
+
+ Log telegraf.Logger
+
+ forcePS bool
+ forceProc bool
+}
+
+func (p *Processes) Gather(acc telegraf.Accumulator) error {
+ // Get an empty map of metric fields
+ fields := getEmptyFields()
+
+ // Decide if we will use 'ps' to get stats (use procfs otherwise)
+ usePS := true
+ if runtime.GOOS == "linux" {
+ usePS = false
+ }
+ if p.forcePS {
+ usePS = true
+ } else if p.forceProc {
+ usePS = false
+ }
+
+ // Gather stats from 'ps' or procfs
+ if usePS {
+ if err := p.gatherFromPS(fields); err != nil {
+ return err
+ }
+ } else {
+ if err := p.gatherFromProc(fields); err != nil {
+ return err
+ }
+ }
+
+ acc.AddGauge("processes", fields, nil)
+ return nil
+}
+
+// Gets empty fields of metrics based on the OS
+func getEmptyFields() map[string]interface{} {
+ fields := map[string]interface{}{
+ "blocked": int64(0),
+ "zombies": int64(0),
+ "stopped": int64(0),
+ "running": int64(0),
+ "sleeping": int64(0),
+ "total": int64(0),
+ "unknown": int64(0),
+ }
+ switch runtime.GOOS {
+ case "freebsd":
+ fields["idle"] = int64(0)
+ fields["wait"] = int64(0)
+ case "darwin":
+ fields["idle"] = int64(0)
+ case "openbsd":
+ fields["idle"] = int64(0)
+ case "linux":
+ fields["dead"] = int64(0)
+ fields["paging"] = int64(0)
+ fields["total_threads"] = int64(0)
+ fields["idle"] = int64(0)
+ }
+ return fields
+}
+
+// exec `ps` to get all process states
+func (p *Processes) gatherFromPS(fields map[string]interface{}) error {
+ out, err := p.execPS()
+ if err != nil {
+ return err
+ }
+
+ for i, status := range bytes.Fields(out) {
+ if i == 0 && string(status) == "STAT" {
+ // This is a header, skip it
+ continue
+ }
+ switch status[0] {
+ case 'W':
+ fields["wait"] = fields["wait"].(int64) + int64(1)
+ case 'U', 'D', 'L':
+ // Also known as uninterruptible sleep or disk sleep
+ fields["blocked"] = fields["blocked"].(int64) + int64(1)
+ case 'Z':
+ fields["zombies"] = fields["zombies"].(int64) + int64(1)
+ case 'X':
+ fields["dead"] = fields["dead"].(int64) + int64(1)
+ case 'T':
+ fields["stopped"] = fields["stopped"].(int64) + int64(1)
+ case 'R':
+ fields["running"] = fields["running"].(int64) + int64(1)
+ case 'S':
+ fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
+ case 'I':
+ fields["idle"] = fields["idle"].(int64) + int64(1)
+ case '?':
+ fields["unknown"] = fields["unknown"].(int64) + int64(1)
+ default:
+ p.Log.Infof("Unknown state %q from ps", string(status[0]))
+ }
+ fields["total"] = fields["total"].(int64) + int64(1)
+ }
+ return nil
+}
+
+// get process states from /proc/(pid)/stat files
+func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
+ filenames, err := filepath.Glob(linux_sysctl_fs.GetHostProc() + "/[0-9]*/stat")
+
+ if err != nil {
+ return err
+ }
+
+ for _, filename := range filenames {
+ _, err := os.Stat(filename)
+ data, err := p.readProcFile(filename)
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ continue
+ }
+
+ // Parse out data after ()
+ i := bytes.LastIndex(data, []byte(")"))
+ if i == -1 {
+ continue
+ }
+ data = data[i+2:]
+
+ stats := bytes.Fields(data)
+ if len(stats) < 3 {
+ return fmt.Errorf("Something is terribly wrong with %s", filename)
+ }
+ switch stats[0][0] {
+ case 'R':
+ fields["running"] = fields["running"].(int64) + int64(1)
+ case 'S':
+ fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
+ case 'D':
+ fields["blocked"] = fields["blocked"].(int64) + int64(1)
+ case 'Z':
+ fields["zombies"] = fields["zombies"].(int64) + int64(1)
+ case 'X':
+ fields["dead"] = fields["dead"].(int64) + int64(1)
+ case 'T', 't':
+ fields["stopped"] = fields["stopped"].(int64) + int64(1)
+ case 'W':
+ fields["paging"] = fields["paging"].(int64) + int64(1)
+ case 'I':
+ fields["idle"] = fields["idle"].(int64) + int64(1)
+ case 'P':
+ if _, ok := fields["parked"]; ok {
+ fields["parked"] = fields["parked"].(int64) + int64(1)
+ }
+ fields["parked"] = int64(1)
+ default:
+ p.Log.Infof("Unknown state %q in file %q", string(stats[0][0]), filename)
+ }
+ fields["total"] = fields["total"].(int64) + int64(1)
+
+ threads, err := strconv.Atoi(string(stats[17]))
+ if err != nil {
+ p.Log.Infof("Error parsing thread count: %s", err.Error())
+ continue
+ }
+ fields["total_threads"] = fields["total_threads"].(int64) + int64(threads)
+ }
+ return nil
+}
+
+func readProcFile(filename string) ([]byte, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+
+ // Reading from /proc/ fails with ESRCH if the process has
+ // been terminated between open() and read().
+ if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ESRCH {
+ return nil, nil
+ }
+
+ return nil, err
+ }
+
+ return data, nil
+}
+
+func execPS() ([]byte, error) {
+ bin, err := exec.LookPath("ps")
+ if err != nil {
+ return nil, err
+ }
+
+ out, err := exec.Command(bin, "axo", "state").Output()
+ if err != nil {
+ return nil, err
+ }
+
+ return out, err
+}
+
+func init() {
+ inputs.Add("processes", func() telegraf.Input {
+ return &Processes{
+ execPS: execPS,
+ readProcFile: readProcFile,
+ }
+ })
+}
diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go
index 27fdf76a17858..268cef9135f8c 100644
--- a/plugins/inputs/processes/processes_test.go
+++ b/plugins/inputs/processes/processes_test.go
@@ -6,7 +6,9 @@ import (
"fmt"
"runtime"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -14,6 +16,7 @@ import (
func TestProcesses(t *testing.T) {
processes := &Processes{
+ Log: testutil.Logger{},
execPS: execPS,
readProcFile: readProcFile,
}
@@ -33,6 +36,7 @@ func TestProcesses(t *testing.T) {
func TestFromPS(t *testing.T) {
processes := &Processes{
+ Log: testutil.Logger{},
execPS: testExecPS,
forcePS: true,
}
@@ -54,6 +58,7 @@ func TestFromPS(t *testing.T) {
func TestFromPSError(t *testing.T) {
processes := &Processes{
+ Log: testutil.Logger{},
execPS: testExecPSError,
forcePS: true,
}
@@ -69,6 +74,7 @@ func TestFromProcFiles(t *testing.T) {
}
tester := tester{}
processes := &Processes{
+ Log: testutil.Logger{},
readProcFile: tester.testProcFile,
forceProc: true,
}
@@ -91,6 +97,7 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) {
}
tester := tester{}
processes := &Processes{
+ Log: testutil.Logger{},
readProcFile: tester.testProcFile2,
forceProc: true,
}
@@ -107,6 +114,60 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) {
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
}
+// Based on `man 5 proc`, parked processes an be found in a
+// limited range of Linux versions:
+//
+// > P Parked (Linux 3.9 to 3.13 only)
+//
+// However, we have had reports of this process state on Ubuntu
+// Bionic w/ Linux 4.15 (#6270)
+func TestParkedProcess(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("Parked process test only relevant on linux")
+ }
+ procstat := `88 (watchdog/13) P 2 0 0 0 -1 69238848 0 0 0 0 0 0 0 0 20 0 1 0 20 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 1 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+`
+ plugin := &Processes{
+ Log: testutil.Logger{},
+ readProcFile: func(string) ([]byte, error) {
+ return []byte(procstat), nil
+ },
+ forceProc: true,
+ }
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "processes",
+ map[string]string{},
+ map[string]interface{}{
+ "blocked": 0,
+ "dead": 0,
+ "idle": 0,
+ "paging": 0,
+ "parked": 1,
+ "running": 0,
+ "sleeping": 0,
+ "stopped": 0,
+ "unknown": 0,
+ "zombies": 0,
+ },
+ time.Unix(0, 0),
+ telegraf.Gauge,
+ ),
+ }
+ actual := acc.GetTelegrafMetrics()
+ for _, a := range actual {
+ a.RemoveField("total")
+ a.RemoveField("total_threads")
+ }
+ testutil.RequireMetricsEqual(t, expected, actual,
+ testutil.IgnoreTime())
+}
+
func testExecPS() ([]byte, error) {
return []byte(testPSOut), nil
}
diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go
index 32c73f918165d..567373c7c7260 100644
--- a/plugins/inputs/processes/processes_windows.go
+++ b/plugins/inputs/processes/processes_windows.go
@@ -1,3 +1,27 @@
// +build windows
package processes
+
+import (
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type Processes struct {
+ Log telegraf.Logger
+}
+
+func (e *Processes) Init() error {
+ e.Log.Warn("Current platform is not supported")
+ return nil
+}
+
+func (e *Processes) Gather(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func init() {
+ inputs.Add("processes", func() telegraf.Input {
+ return &Processes{}
+ })
+}
diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md
index dfe95291abd21..3803215697ec7 100644
--- a/plugins/inputs/procstat/README.md
+++ b/plugins/inputs/procstat/README.md
@@ -1,7 +1,7 @@
# Procstat Input Plugin
The procstat plugin can be used to monitor the system resource usage of one or more processes.
-The procstat_lookup metric displays the query information,
+The procstat_lookup metric displays the query information,
specifically the number of PIDs returned on a search
Processes can be selected for monitoring using one of several methods:
@@ -44,9 +44,12 @@ Processes can be selected for monitoring using one of several methods:
## When true add the full cmdline as a tag.
# cmdline_tag = false
- ## Add PID as a tag instead of a field; useful to differentiate between
- ## processes whose tags are otherwise the same. Can create a large number
- ## of series, use judiciously.
+ ## Add the PID as a tag instead of as a field. When collecting multiple
+ ## processes with otherwise matching tags this setting should be enabled to
+ ## ensure each process has a unique identity.
+ ##
+ ## Enabling this option may result in a large number of series, especially
+ ## when processes have a short lifetime.
# pid_tag = false
## Method to use when finding process IDs. Can be one of 'pgrep', or
@@ -85,6 +88,9 @@ implemented as a WMI query. The pattern allows fuzzy matching using only
- cgroup (when defined)
- win_service (when defined)
- fields:
+ - child_major_faults (int)
+ - child_minor_faults (int)
+ - created_at (int) [epoch in nanoseconds]
- cpu_time (int)
- cpu_time_guest (float)
- cpu_time_guest_nice (float)
@@ -94,17 +100,19 @@ implemented as a WMI query. The pattern allows fuzzy matching using only
- cpu_time_nice (float)
- cpu_time_soft_irq (float)
- cpu_time_steal (float)
- - cpu_time_stolen (float)
- cpu_time_system (float)
- cpu_time_user (float)
- cpu_usage (float)
- involuntary_context_switches (int)
+ - major_faults (int)
- memory_data (int)
- memory_locked (int)
- memory_rss (int)
- memory_stack (int)
- memory_swap (int)
+ - memory_usage (float)
- memory_vms (int)
+ - minor_faults (int)
- nice_priority (int)
- num_fds (int, *telegraf* may need to be ran as **root**)
- num_threads (int)
@@ -160,6 +168,6 @@ implemented as a WMI query. The pattern allows fuzzy matching using only
### Example Output:
```
-procstat,pidfile=/var/run/lxc/dnsmasq.pid,process_name=dnsmasq rlimit_file_locks_soft=2147483647i,rlimit_signals_pending_hard=1758i,voluntary_context_switches=478i,read_bytes=307200i,cpu_time_user=0.01,cpu_time_guest=0,memory_swap=0i,memory_locked=0i,rlimit_num_fds_hard=4096i,rlimit_nice_priority_hard=0i,num_fds=11i,involuntary_context_switches=20i,read_count=23i,memory_rss=1388544i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_rss_hard=2147483647i,nice_priority=20i,rlimit_cpu_time_hard=2147483647i,cpu_time=0i,write_bytes=0i,cpu_time_idle=0,cpu_time_nice=0,memory_data=229376i,memory_stack=135168i,rlimit_cpu_time_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_signals_pending_soft=1758i,write_count=11i,cpu_time_iowait=0,cpu_time_steal=0,cpu_time_stolen=0,rlimit_memory_stack_soft=8388608i,cpu_time_system=0.02,cpu_time_guest_nice=0,rlimit_memory_locked_soft=65536i,rlimit_memory_vms_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_realtime_priority_hard=0i,pid=828i,num_threads=1i,cpu_time_soft_irq=0,rlimit_memory_vms_hard=2147483647i,rlimit_realtime_priority_soft=0i,memory_vms=15884288i,rlimit_memory_stack_hard=2147483647i,cpu_time_irq=0,rlimit_memory_data_soft=2147483647i,rlimit_num_fds_soft=1024i,signals_pending=0i,rlimit_nice_priority_soft=0i,realtime_priority=0i
-procstat,exe=influxd,process_name=influxd rlimit_num_fds_hard=16384i,rlimit_signals_pending_hard=1758i,realtime_priority=0i,rlimit_memory_vms_hard=2147483647i,rlimit_signals_pending_soft=1758i,cpu_time_stolen=0,rlimit_memory_stack_hard=2147483647i,rlimit_realtime_priority_hard=0i,cpu_time=0i,pid=500i,voluntary_context_switches=975i,cpu_time_idle=0,memory_rss=3072000i,memory_locked=0i,rlimit_nice_priority_soft=0i,signals_pending=0i,nice_priority=20i,read_bytes=823296i,cpu_time_soft_irq=0,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_soft=65536i,write_count=8i,cpu_time_irq=0,memory_vms=33501184i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,num_fds=29i,memory_data=229376i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_soft=2147483647i,num_threads=1i,write_bytes=0i,cpu_time_steal=0,rlimit_memory_rss_hard=2147483647i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_usage=0,rlimit_memory_locked_hard=65536i,rlimit_file_locks_hard=2147483647i,involuntary_context_switches=38i,read_count=16851i,memory_swap=0i,rlimit_memory_data_soft=2147483647i,cpu_time_user=0.11,rlimit_cpu_time_hard=2147483647i,rlimit_num_fds_soft=16384i,rlimit_realtime_priority_soft=0i,cpu_time_system=0.27,cpu_time_nice=0,memory_stack=135168i,rlimit_memory_rss_soft=2147483647i
+procstat_lookup,host=prash-laptop,pattern=influxd,pid_finder=pgrep,result=success pid_count=1i,running=1i,result_code=0i 1582089700000000000
+procstat,host=prash-laptop,pattern=influxd,process_name=influxd,user=root involuntary_context_switches=151496i,child_minor_faults=1061i,child_major_faults=8i,cpu_time_user=2564.81,cpu_time_idle=0,cpu_time_irq=0,cpu_time_guest=0,pid=32025i,major_faults=8609i,created_at=1580107536000000000i,voluntary_context_switches=1058996i,cpu_time_system=616.98,cpu_time_steal=0,cpu_time_guest_nice=0,memory_swap=0i,memory_locked=0i,memory_usage=1.7797634601593018,num_threads=18i,cpu_time_nice=0,cpu_time_iowait=0,cpu_time_soft_irq=0,memory_rss=148643840i,memory_vms=1435688960i,memory_data=0i,memory_stack=0i,minor_faults=1856550i 1582089700000000000
```
diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go
index 583e56d063ef1..57d9d81c221ce 100644
--- a/plugins/inputs/procstat/native_finder.go
+++ b/plugins/inputs/procstat/native_finder.go
@@ -3,6 +3,7 @@ package procstat
import (
"fmt"
"io/ioutil"
+ "regexp"
"strconv"
"strings"
@@ -55,3 +56,41 @@ func (pg *NativeFinder) PidFile(path string) ([]PID, error) {
return pids, nil
}
+
+//FullPattern matches on the command line when the process was executed
+func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) {
+ var pids []PID
+ regxPattern, err := regexp.Compile(pattern)
+ if err != nil {
+ return pids, err
+ }
+ procs, err := pg.FastProcessList()
+ if err != nil {
+ return pids, err
+ }
+ for _, p := range procs {
+ cmd, err := p.Cmdline()
+ if err != nil {
+ //skip, this can be caused by the pid no longer existing
+ //or you having no permissions to access it
+ continue
+ }
+ if regxPattern.MatchString(cmd) {
+ pids = append(pids, PID(p.Pid))
+ }
+ }
+ return pids, err
+}
+
+func (pg *NativeFinder) FastProcessList() ([]*process.Process, error) {
+ pids, err := process.Pids()
+ if err != nil {
+ return nil, err
+ }
+
+ result := make([]*process.Process, len(pids))
+ for i, pid := range pids {
+ result[i] = &process.Process{Pid: pid}
+ }
+ return result, nil
+}
diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go
index a1683aad389bc..9d7409ba1df8e 100644
--- a/plugins/inputs/procstat/native_finder_notwindows.go
+++ b/plugins/inputs/procstat/native_finder_notwindows.go
@@ -4,8 +4,6 @@ package procstat
import (
"regexp"
-
- "github.com/shirou/gopsutil/process"
)
//Pattern matches on the process name
@@ -15,7 +13,7 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) {
if err != nil {
return pids, err
}
- procs, err := process.Processes()
+ procs, err := pg.FastProcessList()
if err != nil {
return pids, err
}
@@ -32,28 +30,3 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) {
}
return pids, err
}
-
-//FullPattern matches on the command line when the process was executed
-func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) {
- var pids []PID
- regxPattern, err := regexp.Compile(pattern)
- if err != nil {
- return pids, err
- }
- procs, err := process.Processes()
- if err != nil {
- return pids, err
- }
- for _, p := range procs {
- cmd, err := p.Cmdline()
- if err != nil {
- //skip, this can be caused by the pid no longer existing
- //or you having no permissions to access it
- continue
- }
- if regxPattern.MatchString(cmd) {
- pids = append(pids, PID(p.Pid))
- }
- }
- return pids, err
-}
diff --git a/plugins/inputs/procstat/native_finder_test.go b/plugins/inputs/procstat/native_finder_test.go
new file mode 100644
index 0000000000000..56d1e578cad88
--- /dev/null
+++ b/plugins/inputs/procstat/native_finder_test.go
@@ -0,0 +1,29 @@
+package procstat
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func BenchmarkPattern(b *testing.B) {
+ f, err := NewNativeFinder()
+ require.NoError(b, err)
+ for n := 0; n < b.N; n++ {
+ _, err := f.Pattern(".*")
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+func BenchmarkFullPattern(b *testing.B) {
+ f, err := NewNativeFinder()
+ require.NoError(b, err)
+ for n := 0; n < b.N; n++ {
+ _, err := f.FullPattern(".*")
+ if err != nil {
+ panic(err)
+ }
+ }
+}
diff --git a/plugins/inputs/procstat/native_finder_windows.go b/plugins/inputs/procstat/native_finder_windows.go
index f9c1013ca4c64..6dcc0575af258 100644
--- a/plugins/inputs/procstat/native_finder_windows.go
+++ b/plugins/inputs/procstat/native_finder_windows.go
@@ -1,34 +1,17 @@
package procstat
import (
- "context"
- "fmt"
"regexp"
- "time"
-
- "github.com/StackExchange/wmi"
- "github.com/shirou/gopsutil/process"
-)
-
-//Timeout is the timeout used when making wmi calls
-var Timeout = 5 * time.Second
-
-type queryType string
-
-const (
- like = queryType("LIKE")
- equals = queryType("=")
- notEqual = queryType("!=")
)
-//Pattern matches on the process name
+// Pattern matches on the process name
func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) {
var pids []PID
regxPattern, err := regexp.Compile(pattern)
if err != nil {
return pids, err
}
- procs, err := process.Processes()
+ procs, err := pg.FastProcessList()
if err != nil {
return pids, err
}
@@ -45,47 +28,3 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) {
}
return pids, err
}
-
-//FullPattern matches the cmdLine on windows and will find a pattern using a WMI like query
-func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) {
- var pids []PID
- procs, err := getWin32ProcsByVariable("CommandLine", like, pattern, Timeout)
- if err != nil {
- return pids, err
- }
- for _, p := range procs {
- pids = append(pids, PID(p.ProcessID))
- }
- return pids, nil
-}
-
-//GetWin32ProcsByVariable allows you to query any variable with a like query
-func getWin32ProcsByVariable(variable string, qType queryType, value string, timeout time.Duration) ([]process.Win32_Process, error) {
- var dst []process.Win32_Process
- var query string
- // should look like "WHERE CommandLine LIKE "procstat"
- query = fmt.Sprintf("WHERE %s %s %q", variable, qType, value)
- q := wmi.CreateQuery(&dst, query)
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
- err := WMIQueryWithContext(ctx, q, &dst)
- if err != nil {
- return []process.Win32_Process{}, fmt.Errorf("could not get win32Proc: %s", err)
- }
- return dst, nil
-}
-
-// WMIQueryWithContext - wraps wmi.Query with a timed-out context to avoid hanging
-func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, connectServerArgs ...interface{}) error {
- errChan := make(chan error, 1)
- go func() {
- errChan <- wmi.Query(query, dst, connectServerArgs...)
- }()
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- case err := <-errChan:
- return err
- }
-}
diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go
index 703febaa9253a..48bf76ed69e52 100644
--- a/plugins/inputs/procstat/pgrep.go
+++ b/plugins/inputs/procstat/pgrep.go
@@ -10,7 +10,7 @@ import (
"github.com/influxdata/telegraf/internal"
)
-// Implemention of PIDGatherer that execs pgrep to find processes
+// Implementation of PIDGatherer that execs pgrep to find processes
type Pgrep struct {
path string
}
diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go
index 94a57c1924765..042929f0864cf 100644
--- a/plugins/inputs/procstat/process.go
+++ b/plugins/inputs/procstat/process.go
@@ -12,6 +12,7 @@ type Process interface {
PID() PID
Tags() map[string]string
+ PageFaults() (*process.PageFaultsStat, error)
IOCounters() (*process.IOCountersStat, error)
MemoryInfo() (*process.MemoryInfoStat, error)
Name() (string, error)
@@ -20,9 +21,11 @@ type Process interface {
NumFDs() (int32, error)
NumThreads() (int32, error)
Percent(interval time.Duration) (float64, error)
+ MemoryPercent() (float32, error)
Times() (*cpu.TimesStat, error)
RlimitUsage(bool) ([]process.RlimitStat, error)
Username() (string, error)
+ CreateTime() (int64, error)
}
type PIDFinder interface {
diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go
index 55552bb4af58a..61e575370537b 100644
--- a/plugins/inputs/procstat/procstat.go
+++ b/plugins/inputs/procstat/procstat.go
@@ -69,9 +69,12 @@ var sampleConfig = `
## When true add the full cmdline as a tag.
# cmdline_tag = false
- ## Add PID as a tag instead of a field; useful to differentiate between
- ## processes whose tags are otherwise the same. Can create a large number
- ## of series, use judiciously.
+ ## Add the PID as a tag instead of as a field. When collecting multiple
+ ## processes with otherwise matching tags this setting should be enabled to
+ ## ensure each process has a unique identity.
+ ##
+ ## Enabling this option may result in a large number of series, especially
+ ## when processes have a short lifetime.
# pid_tag = false
## Method to use when finding process IDs. Can be one of 'pgrep', or
@@ -200,6 +203,14 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) {
fields[prefix+"involuntary_context_switches"] = ctx.Involuntary
}
+ faults, err := proc.PageFaults()
+ if err == nil {
+ fields[prefix+"minor_faults"] = faults.MinorFaults
+ fields[prefix+"major_faults"] = faults.MajorFaults
+ fields[prefix+"child_minor_faults"] = faults.ChildMinorFaults
+ fields[prefix+"child_major_faults"] = faults.ChildMajorFaults
+ }
+
io, err := proc.IOCounters()
if err == nil {
fields[prefix+"read_count"] = io.ReadCount
@@ -208,6 +219,11 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) {
fields[prefix+"write_bytes"] = io.WriteBytes
}
+ createdAt, err := proc.CreateTime() //Returns epoch in ms
+ if err == nil {
+ fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns
+ }
+
cpu_time, err := proc.Times()
if err == nil {
fields[prefix+"cpu_time_user"] = cpu_time.User
@@ -218,7 +234,6 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) {
fields[prefix+"cpu_time_irq"] = cpu_time.Irq
fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq
fields[prefix+"cpu_time_steal"] = cpu_time.Steal
- fields[prefix+"cpu_time_stolen"] = cpu_time.Stolen
fields[prefix+"cpu_time_guest"] = cpu_time.Guest
fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice
}
@@ -238,6 +253,11 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) {
fields[prefix+"memory_locked"] = mem.Locked
}
+ mem_perc, err := proc.MemoryPercent()
+ if err == nil {
+ fields[prefix+"memory_usage"] = mem_perc
+ }
+
rlims, err := proc.RlimitUsage(true)
if err == nil {
for _, rlim := range rlims {
@@ -287,6 +307,10 @@ func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo
for _, pid := range pids {
info, ok := prevInfo[pid]
if ok {
+ // Assumption: if a process has no name, it probably does not exist
+ if name, _ := info.Name(); name == "" {
+ continue
+ }
procs[pid] = info
} else {
proc, err := p.createProcess(pid)
@@ -294,6 +318,10 @@ func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo
// No problem; process may have ended after we found it
continue
}
+ // Assumption: if a process has no name, it probably does not exist
+ if name, _ := proc.Name(); name == "" {
+ continue
+ }
procs[pid] = proc
// Add initial tags
@@ -382,7 +410,7 @@ func (p *Procstat) systemdUnitPIDs() ([]PID, error) {
if !bytes.Equal(kv[0], []byte("MainPID")) {
continue
}
- if len(kv[1]) == 0 {
+ if len(kv[1]) == 0 || bytes.Equal(kv[1], []byte("0")) {
return nil, nil
}
pid, err := strconv.Atoi(string(kv[1]))
diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go
index 191c056ea9078..e1ee8ab921841 100644
--- a/plugins/inputs/procstat/procstat_test.go
+++ b/plugins/inputs/procstat/procstat_test.go
@@ -116,6 +116,10 @@ func (p *testProc) Tags() map[string]string {
return p.tags
}
+func (p *testProc) PageFaults() (*process.PageFaultsStat, error) {
+ return &process.PageFaultsStat{}, nil
+}
+
func (p *testProc) IOCounters() (*process.IOCountersStat, error) {
return &process.IOCountersStat{}, nil
}
@@ -144,6 +148,14 @@ func (p *testProc) Percent(interval time.Duration) (float64, error) {
return 0, nil
}
+func (p *testProc) MemoryPercent() (float32, error) {
+ return 0, nil
+}
+
+func (p *testProc) CreateTime() (int64, error) {
+ return 0, nil
+}
+
func (p *testProc) Times() (*cpu.TimesStat, error) {
return &cpu.TimesStat{}, nil
}
diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md
index c1f50bb966d30..e9dd119cc12d4 100644
--- a/plugins/inputs/prometheus/README.md
+++ b/plugins/inputs/prometheus/README.md
@@ -11,6 +11,15 @@ in Prometheus format.
## An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
+ ## Metric version controls the mapping from Prometheus metrics into
+ ## Telegraf metrics. When using the prometheus_client output, use the same
+ ## value in both plugins to ensure metrics are round-tripped without
+ ## modification.
+ ##
+ ## example: metric_version = 1; deprecated in 1.13
+ ## metric_version = 2; recommended version
+ # metric_version = 1
+
## An array of Kubernetes services to scrape metrics from.
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
@@ -27,12 +36,22 @@ in Prometheus format.
## Restricts Kubernetes monitoring to a single namespace
## ex: monitor_kubernetes_pods_namespace = "default"
# monitor_kubernetes_pods_namespace = ""
+ # label selector to target pods which have the label
+ # kubernetes_label_selector = "env=dev,app=nginx"
+ # field selector to target pods
+ # eg. To scrape pods on a specific node
+ # kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
## Use bearer token for authorization. ('bearer_token' takes priority)
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
+ ## HTTP Basic Authentication username and password. ('bearer_token' and
+ ## 'bearer_token_string' take priority)
+ # username = ""
+ # password = ""
+
## Specify timeout duration for slower prometheus clients (default is 3s)
# response_timeout = "3s"
@@ -84,7 +103,7 @@ If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin:
* Restart Caddy
* Configure Telegraf to fetch metrics on it:
-```
+```toml
[[inputs.prometheus]]
# ## An array of urls to scrape metrics from.
urls = ["http://localhost:9180/metrics"]
@@ -135,3 +154,18 @@ cpu_usage_user,cpu=cpu1,url=http://example.org:9273/metrics gauge=5.829145728641
cpu_usage_user,cpu=cpu2,url=http://example.org:9273/metrics gauge=2.119071644805144 1505776751000000000
cpu_usage_user,cpu=cpu3,url=http://example.org:9273/metrics gauge=1.5228426395944945 1505776751000000000
```
+
+**Output (when metric_version = 2)**
+```
+prometheus,quantile=1,url=http://example.org:9273/metrics go_gc_duration_seconds=0.005574303 1556075100000000000
+prometheus,quantile=0.75,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0001046 1556075100000000000
+prometheus,quantile=0.5,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000719 1556075100000000000
+prometheus,quantile=0.25,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000579 1556075100000000000
+prometheus,quantile=0,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000349 1556075100000000000
+prometheus,url=http://example.org:9273/metrics go_gc_duration_seconds_count=324,go_gc_duration_seconds_sum=0.091340353 1556075100000000000
+prometheus,url=http://example.org:9273/metrics go_goroutines=15 1556075100000000000
+prometheus,cpu=cpu0,url=http://example.org:9273/metrics cpu_usage_user=1.513622603430151 1505776751000000000
+prometheus,cpu=cpu1,url=http://example.org:9273/metrics cpu_usage_user=5.829145728641773 1505776751000000000
+prometheus,cpu=cpu2,url=http://example.org:9273/metrics cpu_usage_user=2.119071644805144 1505776751000000000
+prometheus,cpu=cpu3,url=http://example.org:9273/metrics cpu_usage_user=1.5228426395944945 1505776751000000000
+```
diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go
index d92d90ead72fc..16f69cbd14228 100644
--- a/plugins/inputs/prometheus/kubernetes.go
+++ b/plugins/inputs/prometheus/kubernetes.go
@@ -68,7 +68,7 @@ func (p *Prometheus) start(ctx context.Context) error {
case <-time.After(time.Second):
err := p.watch(ctx, client)
if err != nil {
- log.Printf("E! [inputs.prometheus] unable to watch resources: %v", err)
+ p.Log.Errorf("Unable to watch resources: %s", err.Error())
}
}
}
@@ -82,8 +82,11 @@ func (p *Prometheus) start(ctx context.Context) error {
// pod, causing errors in the logs. This is only true if the pod going offline is not
// directed to do so by K8s.
func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error {
+
+ selectors := podSelector(p)
+
pod := &corev1.Pod{}
- watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{})
+ watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}, selectors...)
if err != nil {
return err
}
@@ -135,6 +138,21 @@ func podReady(statuss []*corev1.ContainerStatus) bool {
return true
}
+func podSelector(p *Prometheus) []k8s.Option {
+ options := []k8s.Option{}
+
+ if len(p.KubernetesLabelSelector) > 0 {
+ options = append(options, k8s.QueryParam("labelSelector", p.KubernetesLabelSelector))
+ }
+
+ if len(p.KubernetesFieldSelector) > 0 {
+ options = append(options, k8s.QueryParam("fieldSelector", p.KubernetesFieldSelector))
+ }
+
+ return options
+
+}
+
func registerPod(pod *corev1.Pod, p *Prometheus) {
if p.kubernetesPods == nil {
p.kubernetesPods = map[string]URLAndAddress{}
@@ -144,7 +162,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) {
return
}
- log.Printf("D! [inputs.prometheus] will scrape metrics from %s", *targetURL)
+ log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL)
// add annotation as metrics tags
tags := pod.GetMetadata().GetAnnotations()
if tags == nil {
@@ -158,7 +176,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) {
}
URL, err := url.Parse(*targetURL)
if err != nil {
- log.Printf("E! [inputs.prometheus] could not parse URL %s: %v", *targetURL, err)
+ log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error())
return
}
podURL := p.AddressToURL(URL, URL.Hostname())
@@ -211,13 +229,13 @@ func unregisterPod(pod *corev1.Pod, p *Prometheus) {
return
}
- log.Printf("D! [inputs.prometheus] registered a delete request for %s in namespace %s",
+ log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q",
pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace())
p.lock.Lock()
defer p.lock.Unlock()
if _, ok := p.kubernetesPods[*url]; ok {
delete(p.kubernetesPods, *url)
- log.Printf("D! [inputs.prometheus] will stop scraping for %s", *url)
+ log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url)
}
}
diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go
index c1bbe0a1f8aaa..8568ac946437e 100644
--- a/plugins/inputs/prometheus/kubernetes_test.go
+++ b/plugins/inputs/prometheus/kubernetes_test.go
@@ -1,8 +1,10 @@
package prometheus
import (
+ "github.com/ericchiang/k8s"
"testing"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
v1 "github.com/ericchiang/k8s/apis/core/v1"
@@ -53,7 +55,7 @@ func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) {
}
func TestAddPod(t *testing.T) {
- prom := &Prometheus{}
+ prom := &Prometheus{Log: testutil.Logger{}}
p := pod()
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
@@ -62,7 +64,7 @@ func TestAddPod(t *testing.T) {
}
func TestAddMultipleDuplicatePods(t *testing.T) {
- prom := &Prometheus{}
+ prom := &Prometheus{Log: testutil.Logger{}}
p := pod()
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
@@ -73,7 +75,7 @@ func TestAddMultipleDuplicatePods(t *testing.T) {
}
func TestAddMultiplePods(t *testing.T) {
- prom := &Prometheus{}
+ prom := &Prometheus{Log: testutil.Logger{}}
p := pod()
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
@@ -85,7 +87,7 @@ func TestAddMultiplePods(t *testing.T) {
}
func TestDeletePods(t *testing.T) {
- prom := &Prometheus{}
+ prom := &Prometheus{Log: testutil.Logger{}}
p := pod()
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
@@ -94,8 +96,54 @@ func TestDeletePods(t *testing.T) {
assert.Equal(t, 0, len(prom.kubernetesPods))
}
+func TestPodSelector(t *testing.T) {
+
+ cases := []struct {
+ expected []k8s.Option
+ labelselector string
+ fieldselector string
+ }{
+ {
+ expected: []k8s.Option{
+ k8s.QueryParam("labelSelector", "key1=val1,key2=val2,key3"),
+ k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"),
+ },
+ labelselector: "key1=val1,key2=val2,key3",
+ fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com",
+ },
+ {
+ expected: []k8s.Option{
+ k8s.QueryParam("labelSelector", "key1"),
+ k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"),
+ },
+ labelselector: "key1",
+ fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com",
+ },
+ {
+ expected: []k8s.Option{
+ k8s.QueryParam("labelSelector", "key1"),
+ k8s.QueryParam("fieldSelector", "somefield"),
+ },
+ labelselector: "key1",
+ fieldselector: "somefield",
+ },
+ }
+
+ for _, c := range cases {
+ prom := &Prometheus{
+ Log: testutil.Logger{},
+ KubernetesLabelSelector: c.labelselector,
+ KubernetesFieldSelector: c.fieldselector,
+ }
+
+ output := podSelector(prom)
+
+ assert.Equal(t, len(output), len(c.expected))
+ }
+}
+
func pod() *v1.Pod {
- p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}}
+ p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}, Spec: &v1.PodSpec{}}
p.Status.PodIP = str("127.0.0.1")
p.Metadata.Name = str("myPod")
p.Metadata.Namespace = str("default")
diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go
index 6584fbc05e466..6427c3f8c6d52 100644
--- a/plugins/inputs/prometheus/parser.go
+++ b/plugins/inputs/prometheus/parser.go
@@ -15,12 +15,151 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
-
"github.com/matttproud/golang_protobuf_extensions/pbutil"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
)
+// Parse returns a slice of Metrics from a text representation of a
+// metrics
+func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) {
+ var metrics []telegraf.Metric
+ var parser expfmt.TextParser
+ // parse even if the buffer begins with a newline
+ buf = bytes.TrimPrefix(buf, []byte("\n"))
+ // Read raw data
+ buffer := bytes.NewBuffer(buf)
+ reader := bufio.NewReader(buffer)
+
+ mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type"))
+ // Prepare output
+ metricFamilies := make(map[string]*dto.MetricFamily)
+
+ if err == nil && mediatype == "application/vnd.google.protobuf" &&
+ params["encoding"] == "delimited" &&
+ params["proto"] == "io.prometheus.client.MetricFamily" {
+ for {
+ mf := &dto.MetricFamily{}
+ if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil {
+ if ierr == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr)
+ }
+ metricFamilies[mf.GetName()] = mf
+ }
+ } else {
+ metricFamilies, err = parser.TextToMetricFamilies(reader)
+ if err != nil {
+ return nil, fmt.Errorf("reading text format failed: %s", err)
+ }
+ }
+
+ // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds
+ now := time.Now()
+ // read metrics
+ for metricName, mf := range metricFamilies {
+ for _, m := range mf.Metric {
+ // reading tags
+ tags := makeLabels(m)
+
+ if mf.GetType() == dto.MetricType_SUMMARY {
+ // summary metric
+ telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType(), now)
+ metrics = append(metrics, telegrafMetrics...)
+ } else if mf.GetType() == dto.MetricType_HISTOGRAM {
+ // histogram metric
+ telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType(), now)
+ metrics = append(metrics, telegrafMetrics...)
+ } else {
+ // standard metric
+ // reading fields
+ fields := make(map[string]interface{})
+ fields = getNameAndValueV2(m, metricName)
+ // converting to telegraf metric
+ if len(fields) > 0 {
+ var t time.Time
+ if m.TimestampMs != nil && *m.TimestampMs > 0 {
+ t = time.Unix(0, *m.TimestampMs*1000000)
+ } else {
+ t = now
+ }
+ metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType()))
+ if err == nil {
+ metrics = append(metrics, metric)
+ }
+ }
+ }
+ }
+ }
+
+ return metrics, err
+}
+
+// Get Quantiles for summary metric & Buckets for histogram
+func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric {
+ var metrics []telegraf.Metric
+ fields := make(map[string]interface{})
+ var t time.Time
+ if m.TimestampMs != nil && *m.TimestampMs > 0 {
+ t = time.Unix(0, *m.TimestampMs*1000000)
+ } else {
+ t = now
+ }
+ fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount())
+ fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum())
+ met, err := metric.New("prometheus", tags, fields, t, valueType(metricType))
+ if err == nil {
+ metrics = append(metrics, met)
+ }
+
+ for _, q := range m.GetSummary().Quantile {
+ newTags := tags
+ fields = make(map[string]interface{})
+
+ newTags["quantile"] = fmt.Sprint(q.GetQuantile())
+ fields[metricName] = float64(q.GetValue())
+
+ quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType))
+ if err == nil {
+ metrics = append(metrics, quantileMetric)
+ }
+ }
+ return metrics
+}
+
+// Get Buckets from histogram metric
+func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric {
+ var metrics []telegraf.Metric
+ fields := make(map[string]interface{})
+ var t time.Time
+ if m.TimestampMs != nil && *m.TimestampMs > 0 {
+ t = time.Unix(0, *m.TimestampMs*1000000)
+ } else {
+ t = now
+ }
+ fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount())
+ fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum())
+
+ met, err := metric.New("prometheus", tags, fields, t, valueType(metricType))
+ if err == nil {
+ metrics = append(metrics, met)
+ }
+
+ for _, b := range m.GetHistogram().Bucket {
+ newTags := tags
+ fields = make(map[string]interface{})
+ newTags["le"] = fmt.Sprint(b.GetUpperBound())
+ fields[metricName+"_bucket"] = float64(b.GetCumulativeCount())
+
+ histogramMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType))
+ if err == nil {
+ metrics = append(metrics, histogramMetric)
+ }
+ }
+ return metrics
+}
+
// Parse returns a slice of Metrics from a text representation of a
// metrics
func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
@@ -56,6 +195,8 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
}
}
+ // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds
+ now := time.Now()
// read metrics
for metricName, mf := range metricFamilies {
for _, m := range mf.Metric {
@@ -84,7 +225,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
if m.TimestampMs != nil && *m.TimestampMs > 0 {
t = time.Unix(0, *m.TimestampMs*1000000)
} else {
- t = time.Now()
+ t = now
}
metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType()))
if err == nil {
@@ -159,3 +300,22 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} {
}
return fields
}
+
+// Get name and value from metric
+func getNameAndValueV2(m *dto.Metric, metricName string) map[string]interface{} {
+ fields := make(map[string]interface{})
+ if m.Gauge != nil {
+ if !math.IsNaN(m.GetGauge().GetValue()) {
+ fields[metricName] = float64(m.GetGauge().GetValue())
+ }
+ } else if m.Counter != nil {
+ if !math.IsNaN(m.GetCounter().GetValue()) {
+ fields[metricName] = float64(m.GetCounter().GetValue())
+ }
+ } else if m.Untyped != nil {
+ if !math.IsNaN(m.GetUntyped().GetValue()) {
+ fields[metricName] = float64(m.GetUntyped().GetValue())
+ }
+ }
+ return fields
+}
diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go
index a4409c5b037d0..b4a8204b724ba 100644
--- a/plugins/inputs/prometheus/prometheus.go
+++ b/plugins/inputs/prometheus/prometheus.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io/ioutil"
- "log"
"net"
"net/http"
"net/url"
@@ -14,11 +13,11 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
-const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3`
+const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1`
type Prometheus struct {
// An array of urls to scrape metrics from.
@@ -30,14 +29,30 @@ type Prometheus struct {
// Location of kubernetes config file
KubeConfig string
+ // Label Selector/s for Kubernetes
+ KubernetesLabelSelector string `toml:"kubernetes_label_selector"`
+
+ // Field Selector/s for Kubernetes
+ KubernetesFieldSelector string `toml:"kubernetes_field_selector"`
+
// Bearer Token authorization file path
BearerToken string `toml:"bearer_token"`
BearerTokenString string `toml:"bearer_token_string"`
+ // Basic authentication credentials
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+
ResponseTimeout internal.Duration `toml:"response_timeout"`
+ MetricVersion int `toml:"metric_version"`
+
+ URLTag string `toml:"url_tag"`
+
tls.ClientConfig
+ Log telegraf.Logger
+
client *http.Client
// Should we scrape Kubernetes services for prometheus annotations
@@ -53,6 +68,18 @@ var sampleConfig = `
## An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
+ ## Metric version controls the mapping from Prometheus metrics into
+ ## Telegraf metrics. When using the prometheus_client output, use the same
+ ## value in both plugins to ensure metrics are round-tripped without
+ ## modification.
+ ##
+ ## example: metric_version = 1; deprecated in 1.13
+ ## metric_version = 2; recommended version
+ # metric_version = 1
+
+ ## Url tag name (tag containing scrapped url. optional, default is "url")
+ # url_tag = "scrapeUrl"
+
## An array of Kubernetes services to scrape metrics from.
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
@@ -69,12 +96,22 @@ var sampleConfig = `
## Restricts Kubernetes monitoring to a single namespace
## ex: monitor_kubernetes_pods_namespace = "default"
# monitor_kubernetes_pods_namespace = ""
+ # label selector to target pods which have the label
+ # kubernetes_label_selector = "env=dev,app=nginx"
+ # field selector to target pods
+ # eg. To scrape pods on a specific node
+ # kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
## Use bearer token for authorization. ('bearer_token' takes priority)
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
+ ## HTTP Basic Authentication username and password. ('bearer_token' and
+ ## 'bearer_token_string' take priority)
+ # username = ""
+ # password = ""
+
## Specify timeout duration for slower prometheus clients (default is 3s)
# response_timeout = "3s"
@@ -94,6 +131,14 @@ func (p *Prometheus) Description() string {
return "Read metrics from one or many prometheus clients"
}
+func (p *Prometheus) Init() error {
+ if p.MetricVersion != 2 {
+ p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'")
+ }
+
+ return nil
+}
+
var ErrProtocolError = errors.New("prometheus protocol error")
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
@@ -127,7 +172,7 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
for _, u := range p.URLs {
URL, err := url.Parse(u)
if err != nil {
- log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err.Error())
+ p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error())
continue
}
allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL}
@@ -148,7 +193,7 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
resolvedAddresses, err := net.LookupHost(URL.Hostname())
if err != nil {
- log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", URL.Host, err.Error())
+ p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error())
continue
}
for _, resolved := range resolvedAddresses {
@@ -214,6 +259,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
var req *http.Request
var err error
var uClient *http.Client
+ var metrics []telegraf.Metric
if u.URL.Scheme == "unix" {
path := u.URL.Query().Get("path")
if path == "" {
@@ -251,6 +297,8 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
req.Header.Set("Authorization", "Bearer "+string(token))
} else if p.BearerTokenString != "" {
req.Header.Set("Authorization", "Bearer "+p.BearerTokenString)
+ } else if p.Username != "" || p.Password != "" {
+ req.SetBasicAuth(p.Username, p.Password)
}
var resp *http.Response
@@ -273,7 +321,12 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
return fmt.Errorf("error reading body: %s", err)
}
- metrics, err := Parse(body, resp.Header)
+ if p.MetricVersion == 2 {
+ metrics, err = ParseV2(body, resp.Header)
+ } else {
+ metrics, err = Parse(body, resp.Header)
+ }
+
if err != nil {
return fmt.Errorf("error reading metrics for %s: %s",
u.URL, err)
@@ -283,7 +336,9 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
tags := metric.Tags()
// strip user and password from URL
u.OriginalURL.User = nil
- tags["url"] = u.OriginalURL.String()
+ if p.URLTag != "" {
+ tags[p.URLTag] = u.OriginalURL.String()
+ }
if u.Address != "" {
tags["address"] = u.Address
}
@@ -330,6 +385,7 @@ func init() {
return &Prometheus{
ResponseTimeout: internal.Duration{Duration: time.Second * 3},
kubernetesPods: map[string]URLAndAddress{},
+ URLTag: "url",
}
})
}
diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go
index ef3902fc908cb..d33cba273c276 100644
--- a/plugins/inputs/prometheus/prometheus_test.go
+++ b/plugins/inputs/prometheus/prometheus_test.go
@@ -2,12 +2,14 @@ package prometheus
import (
"fmt"
+ "math"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -29,6 +31,21 @@ go_goroutines 15
# TYPE test_metric untyped
test_metric{label="value"} 1.0 1490802350000
`
+const sampleSummaryTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 0.00010425500000000001
+go_gc_duration_seconds{quantile="0.25"} 0.000139108
+go_gc_duration_seconds{quantile="0.5"} 0.00015749400000000002
+go_gc_duration_seconds{quantile="0.75"} 0.000331463
+go_gc_duration_seconds{quantile="1"} 0.000667154
+go_gc_duration_seconds_sum 0.0018183950000000002
+go_gc_duration_seconds_count 7
+`
+const sampleGaugeTextFormat = `
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 15 1490802350000
+`
func TestPrometheusGeneratesMetrics(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -37,7 +54,9 @@ func TestPrometheusGeneratesMetrics(t *testing.T) {
defer ts.Close()
p := &Prometheus{
- URLs: []string{ts.URL},
+ Log: testutil.Logger{},
+ URLs: []string{ts.URL},
+ URLTag: "url",
}
var acc testutil.Accumulator
@@ -60,7 +79,9 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) {
defer ts.Close()
p := &Prometheus{
+ Log: testutil.Logger{},
KubernetesServices: []string{ts.URL},
+ URLTag: "url",
}
u, _ := url.Parse(ts.URL)
tsAddress := u.Hostname()
@@ -89,6 +110,7 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) {
defer ts.Close()
p := &Prometheus{
+ Log: testutil.Logger{},
URLs: []string{ts.URL},
KubernetesServices: []string{"http://random.telegraf.local:88/metrics"},
}
@@ -103,3 +125,112 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) {
assert.True(t, acc.HasFloatField("test_metric", "value"))
assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0)))
}
+
+func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, sampleSummaryTextFormat)
+ }))
+ defer ts.Close()
+
+ p := &Prometheus{
+ URLs: []string{ts.URL},
+ URLTag: "url",
+ MetricVersion: 2,
+ }
+
+ var acc testutil.Accumulator
+
+ err := acc.GatherError(p.Gather)
+ require.NoError(t, err)
+
+ assert.True(t, acc.TagSetValue("prometheus", "quantile") == "0")
+ assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum"))
+ assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count"))
+ assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics")
+
+}
+
+func TestSummaryMayContainNaN(t *testing.T) {
+ const data = `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} NaN
+go_gc_duration_seconds{quantile="1"} NaN
+go_gc_duration_seconds_sum 42.0
+go_gc_duration_seconds_count 42
+`
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, data)
+ }))
+ defer ts.Close()
+
+ p := &Prometheus{
+ URLs: []string{ts.URL},
+ URLTag: "",
+ MetricVersion: 2,
+ }
+
+ var acc testutil.Accumulator
+
+ err := p.Gather(&acc)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{
+ "quantile": "0",
+ },
+ map[string]interface{}{
+ "go_gc_duration_seconds": math.NaN(),
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{
+ "quantile": "1",
+ },
+ map[string]interface{}{
+ "go_gc_duration_seconds": math.NaN(),
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "go_gc_duration_seconds_sum": 42.0,
+ "go_gc_duration_seconds_count": 42.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime(), testutil.SortMetrics())
+}
+
+func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, sampleGaugeTextFormat)
+ }))
+ defer ts.Close()
+
+ p := &Prometheus{
+ URLs: []string{ts.URL},
+ URLTag: "url",
+ MetricVersion: 2,
+ }
+
+ var acc testutil.Accumulator
+
+ err := acc.GatherError(p.Gather)
+ require.NoError(t, err)
+
+ assert.True(t, acc.HasFloatField("prometheus", "go_goroutines"))
+ assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics")
+ assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0)))
+}
diff --git a/plugins/inputs/proxmox/README.md b/plugins/inputs/proxmox/README.md
new file mode 100644
index 0000000000000..767756178b1ce
--- /dev/null
+++ b/plugins/inputs/proxmox/README.md
@@ -0,0 +1,62 @@
+# Proxmox Input Plugin
+
+The proxmox plugin gathers metrics about containers and VMs using the Proxmox API.
+
+### Configuration:
+
+```toml
+[[inputs.proxmox]]
+ ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /.
+ base_url = "https://localhost:8006/api2/json"
+ api_token = "USER@REALM!TOKENID=UUID"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ insecure_skip_verify = false
+
+ # HTTP response timeout (default: 5s)
+ response_timeout = "5s"
+```
+
+#### Permissions
+
+The plugin will need to have access to the Proxmox API. An API token
+must be provided with the corresponding user being assigned at least the PVEAuditor
+role on /.
+
+### Measurements & Fields:
+
+- proxmox
+ - status
+ - uptime
+ - cpuload
+ - mem_used
+ - mem_total
+ - mem_free
+ - mem_used_percentage
+ - swap_used
+ - swap_total
+ - swap_free
+ - swap_used_percentage
+ - disk_used
+ - disk_total
+ - disk_free
+ - disk_used_percentage
+
+### Tags:
+
+ - node_fqdn - FQDN of the node telegraf is running on
+ - vm_name - Name of the VM/container
+ - vm_fqdn - FQDN of the VM/container
+ - vm_type - Type of the VM/container (lxc, qemu)
+
+### Example Output:
+
+```
+$ ./telegraf --config telegraf.conf --input-filter proxmox --test
+> proxmox,host=pxnode,node_fqdn=pxnode.example.com,vm_fqdn=vm1.example.com,vm_name=vm1,vm_type=lxc cpuload=0.147998116735236,disk_free=4461129728i,disk_total=5217320960i,disk_used=756191232i,disk_used_percentage=14,mem_free=1046827008i,mem_total=1073741824i,mem_used=26914816i,mem_used_percentage=2,status="running",swap_free=536698880i,swap_total=536870912i,swap_used=172032i,swap_used_percentage=0,uptime=1643793i 1595457277000000000
+> ...
+```
diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go
new file mode 100644
index 0000000000000..41b74760aa869
--- /dev/null
+++ b/plugins/inputs/proxmox/proxmox.go
@@ -0,0 +1,250 @@
+package proxmox
+
+import (
+ "encoding/json"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+var sampleConfig = `
+ ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /.
+ base_url = "https://localhost:8006/api2/json"
+ api_token = "USER@REALM!TOKENID=UUID"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ insecure_skip_verify = false
+
+ # HTTP response timeout (default: 5s)
+ response_timeout = "5s"
+`
+
+func (px *Proxmox) SampleConfig() string {
+ return sampleConfig
+}
+
+func (px *Proxmox) Description() string {
+ return "Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2)."
+}
+
+func (px *Proxmox) Gather(acc telegraf.Accumulator) error {
+ err := getNodeSearchDomain(px)
+ if err != nil {
+ return err
+ }
+
+ gatherLxcData(px, acc)
+ gatherQemuData(px, acc)
+
+ return nil
+}
+
+func (px *Proxmox) Init() error {
+ hostname, err := os.Hostname()
+ if err != nil {
+ return err
+ }
+ px.hostname = hostname
+
+ tlsCfg, err := px.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+ px.httpClient = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ },
+ Timeout: px.ResponseTimeout.Duration,
+ }
+
+ return nil
+}
+
+func init() {
+ px := Proxmox{
+ requestFunction: performRequest,
+ }
+
+ inputs.Add("proxmox", func() telegraf.Input { return &px })
+}
+
+func getNodeSearchDomain(px *Proxmox) error {
+ apiUrl := "/nodes/" + px.hostname + "/dns"
+ jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil)
+
+ var nodeDns NodeDns
+ err = json.Unmarshal(jsonData, &nodeDns)
+ if err != nil {
+ return err
+ }
+ px.nodeSearchDomain = nodeDns.Data.Searchdomain
+
+ return nil
+}
+
+func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) {
+ request, err := http.NewRequest(method, px.BaseURL+apiUrl, strings.NewReader(data.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ request.Header.Add("Authorization", "PVEAPIToken="+px.APIToken)
+
+ resp, err := px.httpClient.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ responseBody, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ return responseBody, nil
+}
+
+func gatherLxcData(px *Proxmox, acc telegraf.Accumulator) {
+ gatherVmData(px, acc, LXC)
+}
+
+func gatherQemuData(px *Proxmox, acc telegraf.Accumulator) {
+ gatherVmData(px, acc, QEMU)
+}
+
+func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) {
+ vmStats, err := getVmStats(px, rt)
+ if err != nil {
+ px.Log.Error("Error getting VM stats: %v", err)
+ return
+ }
+
+ // For each VM add metrics to Accumulator
+ for _, vmStat := range vmStats.Data {
+ vmConfig, err := getVmConfig(px, vmStat.ID, rt)
+ if err != nil {
+ px.Log.Error("Error getting VM config: %v", err)
+ return
+ }
+ tags := getTags(px, vmStat.Name, vmConfig, rt)
+ fields, err := getFields(vmStat)
+ if err != nil {
+ px.Log.Error("Error getting VM measurements: %v", err)
+ return
+ }
+ acc.AddFields("proxmox", fields, tags)
+ }
+}
+
+func getVmStats(px *Proxmox, rt ResourceType) (VmStats, error) {
+ apiUrl := "/nodes/" + px.hostname + "/" + string(rt)
+ jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil)
+ if err != nil {
+ return VmStats{}, err
+ }
+
+ var vmStats VmStats
+ err = json.Unmarshal(jsonData, &vmStats)
+ if err != nil {
+ return VmStats{}, err
+ }
+
+ return vmStats, nil
+}
+
+func getVmConfig(px *Proxmox, vmId string, rt ResourceType) (VmConfig, error) {
+ apiUrl := "/nodes/" + px.hostname + "/" + string(rt) + "/" + vmId + "/config"
+ jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil)
+ if err != nil {
+ return VmConfig{}, err
+ }
+
+ var vmConfig VmConfig
+ err = json.Unmarshal(jsonData, &vmConfig)
+ if err != nil {
+ return VmConfig{}, err
+ }
+
+ return vmConfig, nil
+}
+
+func getFields(vmStat VmStat) (map[string]interface{}, error) {
+ mem_total, mem_used, mem_free, mem_used_percentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem)
+ swap_total, swap_used, swap_free, swap_used_percentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap)
+ disk_total, disk_used, disk_free, disk_used_percentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk)
+
+ return map[string]interface{}{
+ "status": vmStat.Status,
+ "uptime": jsonNumberToInt64(vmStat.Uptime),
+ "cpuload": jsonNumberToFloat64(vmStat.CpuLoad),
+ "mem_used": mem_used,
+ "mem_total": mem_total,
+ "mem_free": mem_free,
+ "mem_used_percentage": mem_used_percentage,
+ "swap_used": swap_used,
+ "swap_total": swap_total,
+ "swap_free": swap_free,
+ "swap_used_percentage": swap_used_percentage,
+ "disk_used": disk_used,
+ "disk_total": disk_total,
+ "disk_free": disk_free,
+ "disk_used_percentage": disk_used_percentage,
+ }, nil
+}
+
+func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, float64) {
+ int64Total := jsonNumberToInt64(total)
+ int64Used := jsonNumberToInt64(used)
+ int64Free := int64Total - int64Used
+ usedPercentage := 0.0
+ if int64Total != 0 {
+ usedPercentage = float64(int64Used) * 100 / float64(int64Total)
+ }
+
+ return int64Total, int64Used, int64Free, usedPercentage
+}
+
+func jsonNumberToInt64(value json.Number) int64 {
+ int64Value, err := value.Int64()
+ if err != nil {
+ return 0
+ }
+
+ return int64Value
+}
+
+func jsonNumberToFloat64(value json.Number) float64 {
+ float64Value, err := value.Float64()
+ if err != nil {
+ return 0
+ }
+
+ return float64Value
+}
+
+func getTags(px *Proxmox, name string, vmConfig VmConfig, rt ResourceType) map[string]string {
+ domain := vmConfig.Data.Searchdomain
+ if len(domain) == 0 {
+ domain = px.nodeSearchDomain
+ }
+
+ hostname := vmConfig.Data.Hostname
+ if len(hostname) == 0 {
+ hostname = name
+ }
+ fqdn := hostname + "." + domain
+
+ return map[string]string{
+ "node_fqdn": px.hostname + "." + px.nodeSearchDomain,
+ "vm_name": name,
+ "vm_fqdn": fqdn,
+ "vm_type": string(rt),
+ }
+}
diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go
new file mode 100644
index 0000000000000..274ebdf69ff28
--- /dev/null
+++ b/plugins/inputs/proxmox/proxmox_test.go
@@ -0,0 +1,136 @@
+package proxmox
+
+import (
+ "github.com/bmizerany/assert"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+ "net/url"
+ "strings"
+ "testing"
+)
+
+var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}`
+var qemuTestData = `{"data":[{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}]}`
+var qemuConfigTestData = `{"data":{"hostname":"qemu1","searchdomain":"test.example.com"}}`
+var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}]}`
+var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.example.com"}}`
+
+func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) {
+ var bytedata = []byte("")
+
+ if strings.HasSuffix(apiUrl, "dns") {
+ bytedata = []byte(nodeSearchDomainTestData)
+ } else if strings.HasSuffix(apiUrl, "qemu") {
+ bytedata = []byte(qemuTestData)
+ } else if strings.HasSuffix(apiUrl, "113/config") {
+ bytedata = []byte(qemuConfigTestData)
+ } else if strings.HasSuffix(apiUrl, "lxc") {
+ bytedata = []byte(lxcTestData)
+ } else if strings.HasSuffix(apiUrl, "111/config") {
+ bytedata = []byte(lxcConfigTestData)
+ }
+
+ return bytedata, nil
+}
+
+func setUp(t *testing.T) *Proxmox {
+ px := &Proxmox{
+ requestFunction: performTestRequest,
+ }
+
+ require.NoError(t, px.Init())
+
+ // Override hostname and logger for test
+ px.hostname = "testnode"
+ px.Log = testutil.Logger{}
+ return px
+}
+
+func TestGetNodeSearchDomain(t *testing.T) {
+ px := setUp(t)
+
+ err := getNodeSearchDomain(px)
+
+ require.NoError(t, err)
+ assert.Equal(t, px.nodeSearchDomain, "test.example.com")
+}
+
+func TestGatherLxcData(t *testing.T) {
+ px := setUp(t)
+ px.nodeSearchDomain = "test.example.com"
+
+ acc := &testutil.Accumulator{}
+ gatherLxcData(px, acc)
+
+ assert.Equal(t, acc.NFields(), 15)
+ testFields := map[string]interface{}{
+ "status": "running",
+ "uptime": int64(2078164),
+ "cpuload": float64(0.00371567669193613),
+ "mem_used": int64(98500608),
+ "mem_total": int64(536870912),
+ "mem_free": int64(438370304),
+ "mem_used_percentage": float64(18.34716796875),
+ "swap_used": int64(9412608),
+ "swap_total": int64(536870912),
+ "swap_free": int64(527458304),
+ "swap_used_percentage": float64(1.75323486328125),
+ "disk_used": int64(744189952),
+ "disk_total": int64(5217320960),
+ "disk_free": int64(4473131008),
+ "disk_used_percentage": float64(14.26383306117322),
+ }
+ testTags := map[string]string{
+ "node_fqdn": "testnode.test.example.com",
+ "vm_name": "container1",
+ "vm_fqdn": "container1.test.example.com",
+ "vm_type": "lxc",
+ }
+ acc.AssertContainsTaggedFields(t, "proxmox", testFields, testTags)
+}
+
+func TestGatherQemuData(t *testing.T) {
+ px := setUp(t)
+ px.nodeSearchDomain = "test.example.com"
+
+ acc := &testutil.Accumulator{}
+ gatherQemuData(px, acc)
+
+ assert.Equal(t, acc.NFields(), 15)
+ testFields := map[string]interface{}{
+ "status": "running",
+ "uptime": int64(2159739),
+ "cpuload": float64(0.029336643550795),
+ "mem_used": int64(1722451796),
+ "mem_total": int64(2147483648),
+ "mem_free": int64(425031852),
+ "mem_used_percentage": float64(80.20791206508875),
+ "swap_used": int64(0),
+ "swap_total": int64(0),
+ "swap_free": int64(0),
+ "swap_used_percentage": float64(0),
+ "disk_used": int64(0),
+ "disk_total": int64(10737418240),
+ "disk_free": int64(10737418240),
+ "disk_used_percentage": float64(0),
+ }
+ testTags := map[string]string{
+ "node_fqdn": "testnode.test.example.com",
+ "vm_name": "qemu1",
+ "vm_fqdn": "qemu1.test.example.com",
+ "vm_type": "qemu",
+ }
+ acc.AssertContainsTaggedFields(t, "proxmox", testFields, testTags)
+}
+
+func TestGather(t *testing.T) {
+ px := setUp(t)
+ px.nodeSearchDomain = "test.example.com"
+
+ acc := &testutil.Accumulator{}
+ err := px.Gather(acc)
+ require.NoError(t, err)
+
+ // Results from both tests above
+ assert.Equal(t, acc.NFields(), 30)
+}
diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go
new file mode 100644
index 0000000000000..eef5dffff1f28
--- /dev/null
+++ b/plugins/inputs/proxmox/structs.go
@@ -0,0 +1,62 @@
+package proxmox
+
+import (
+ "encoding/json"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "net/http"
+ "net/url"
+)
+
+type Proxmox struct {
+ BaseURL string `toml:"base_url"`
+ APIToken string `toml:"api_token"`
+ ResponseTimeout internal.Duration `toml:"response_timeout"`
+ tls.ClientConfig
+
+ hostname string
+ httpClient *http.Client
+ nodeSearchDomain string
+
+ requestFunction func(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error)
+ Log telegraf.Logger `toml:"-"`
+}
+
+type ResourceType string
+
+var (
+ QEMU ResourceType = "qemu"
+ LXC ResourceType = "lxc"
+)
+
+type VmStats struct {
+ Data []VmStat `json:"data"`
+}
+
+type VmStat struct {
+ ID string `json:"vmid"`
+ Name string `json:"name"`
+ Status string `json:"status"`
+ UsedMem json.Number `json:"mem"`
+ TotalMem json.Number `json:"maxmem"`
+ UsedDisk json.Number `json:"disk"`
+ TotalDisk json.Number `json:"maxdisk"`
+ UsedSwap json.Number `json:"swap"`
+ TotalSwap json.Number `json:"maxswap"`
+ Uptime json.Number `json:"uptime"`
+ CpuLoad json.Number `json:"cpu"`
+}
+
+type VmConfig struct {
+ Data struct {
+ Searchdomain string `json:"searchdomain"`
+ Hostname string `json:"hostname"`
+ } `json:"data"`
+}
+
+type NodeDns struct {
+ Data struct {
+ Searchdomain string `json:"search"`
+ } `json:"data"`
+}
diff --git a/plugins/inputs/puppetagent/README.md b/plugins/inputs/puppetagent/README.md
index 480f7531ddbff..687005b98cc11 100644
--- a/plugins/inputs/puppetagent/README.md
+++ b/plugins/inputs/puppetagent/README.md
@@ -1,10 +1,10 @@
-## Telegraf Plugin: PuppetAgent
+# PuppetAgent Input Plugin
#### Description
The puppetagent plugin collects variables outputted from the 'last_run_summary.yaml' file
usually located in `/var/lib/puppet/state/`
-[PuppetAgent Runs](https://puppetlabs.com/blog/puppet-monitoring-how-to-monitor-the-success-or-failure-of-puppet-runs).
+[PuppetAgent Runs](https://puppet.com/blog/puppet-monitoring-how-to-monitor-success-or-failure-of-puppet-runs/).
```
cat /var/lib/puppet/state/last_run_summary.yaml
diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go
index c8a265bb857bc..1d0e30aa88ed5 100644
--- a/plugins/inputs/puppetagent/puppetagent.go
+++ b/plugins/inputs/puppetagent/puppetagent.go
@@ -79,7 +79,7 @@ func (pa *PuppetAgent) SampleConfig() string {
// Description returns description of PuppetAgent plugin
func (pa *PuppetAgent) Description() string {
- return `Reads last_run_summary.yaml file and converts to measurments`
+ return `Reads last_run_summary.yaml file and converts to measurements`
}
// Gather reads stats from all configured servers accumulates stats
diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md
index 0406df70057a3..1274b4ee230f8 100644
--- a/plugins/inputs/rabbitmq/README.md
+++ b/plugins/inputs/rabbitmq/README.md
@@ -1,10 +1,13 @@
# RabbitMQ Input Plugin
-Reads metrics from RabbitMQ servers via the [Management Plugin](https://www.rabbitmq.com/management.html).
+Reads metrics from RabbitMQ servers via the [Management Plugin][management].
-For additional details reference the [RabbitMQ Management HTTP Stats](https://cdn.rawgit.com/rabbitmq/rabbitmq-management/master/priv/www/doc/stats.html).
+For additional details reference the [RabbitMQ Management HTTP Stats][management-reference].
-### Configuration:
+[management]: https://www.rabbitmq.com/management.html
+[management-reference]: https://raw.githack.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_9/priv/www/api/index.html
+
+### Configuration
```toml
[[inputs.rabbitmq]]
@@ -49,131 +52,174 @@ For additional details reference the [RabbitMQ Management HTTP Stats](https://cd
## Note that an empty array for both will include all queues
# queue_name_include = []
# queue_name_exclude = []
-```
-### Measurements & Fields:
-
-- rabbitmq_overview
- - channels (int, channels)
- - connections (int, connections)
- - consumers (int, consumers)
- - exchanges (int, exchanges)
- - messages (int, messages)
- - messages_acked (int, messages)
- - messages_delivered (int, messages)
- - messages_delivered_get (int, messages)
- - messages_published (int, messages)
- - messages_ready (int, messages)
- - messages_unacked (int, messages)
- - queues (int, queues)
- - clustering_listeners (int, cluster nodes)
- - amqp_listeners (int, amqp nodes up)
- - return_unroutable (int, number of unroutable messages)
- - return_unroutable_rate (float, number of unroutable messages per second)
-
-- rabbitmq_node
- - disk_free (int, bytes)
- - disk_free_limit (int, bytes)
- - disk_free_alarm (int, disk alarm)
- - fd_total (int, file descriptors)
- - fd_used (int, file descriptors)
- - mem_limit (int, bytes)
- - mem_used (int, bytes)
- - mem_alarm (int, memory a)
- - proc_total (int, erlang processes)
- - proc_used (int, erlang processes)
- - run_queue (int, erlang processes)
- - sockets_total (int, sockets)
- - sockets_used (int, sockets)
- - running (int, node up)
- - uptime (int, milliseconds)
- - health_check_status (int, 1 or 0)
- - mnesia_disk_tx_count (int, number of disk transaction)
- - mnesia_ram_tx_count (int, number of ram transaction)
- - mnesia_disk_tx_count_rate (float, number of disk transaction per second)
- - mnesia_ram_tx_count_rate (float, number of ram transaction per second)
- - gc_num (int, number of garbage collection)
- - gc_bytes_reclaimed (int, bytes)
- - gc_num_rate (float, number of garbage collection per second)
- - gc_bytes_reclaimed_rate (float, bytes per second)
- - io_read_avg_time (float, number of read operations)
- - io_read_avg_time_rate (int, number of read operations per second)
- - io_read_bytes (int, bytes)
- - io_read_bytes_rate (float, bytes per second)
- - io_write_avg_time (int, milliseconds)
- - io_write_avg_time_rate (float, milliseconds per second)
- - io_write_bytes (int, bytes)
- - io_write_bytes_rate (float, bytes per second)
+ ## Federation upstreams to include and exclude specified as an array of glob
+ ## pattern strings. Federation links can also be limited by the queue and
+ ## exchange filters.
+ # federation_upstream_include = []
+ # federation_upstream_exclude = []
+```
-- rabbitmq_queue
- - consumer_utilisation (float, percent)
- - consumers (int, int)
- - idle_since (string, time - e.g., "2006-01-02 15:04:05")
- - memory (int, bytes)
- - message_bytes (int, bytes)
- - message_bytes_persist (int, bytes)
- - message_bytes_ram (int, bytes)
- - message_bytes_ready (int, bytes)
- - message_bytes_unacked (int, bytes)
- - messages (int, count)
- - messages_ack (int, count)
- - messages_ack_rate (float, messages per second)
- - messages_deliver (int, count)
- - messages_deliver_rate (float, messages per second)
- - messages_deliver_get (int, count)
- - messages_deliver_get_rate (float, messages per second)
- - messages_publish (int, count)
- - messages_publish_rate (float, messages per second)
- - messages_ready (int, count)
- - messages_redeliver (int, count)
- - messages_redeliver_rate (float, messages per second)
- - messages_unack (integer, count)
-
-- rabbitmq_exchange
- - messages_publish_in (int, count)
- - messages_publish_in_rate (int, messages per second)
- - messages_publish_out (int, count)
- - messages_publish_out_rate (int, messages per second)
-
-### Tags:
-
-- All measurements have the following tags:
- - url
+### Metrics
- rabbitmq_overview
- - name
-
-- rabbitmq_node
- - node
- - url
+ - tags:
+ - url
+ - name
+ - fields:
+ - channels (int, channels)
+ - connections (int, connections)
+ - consumers (int, consumers)
+ - exchanges (int, exchanges)
+ - messages (int, messages)
+ - messages_acked (int, messages)
+ - messages_delivered (int, messages)
+ - messages_delivered_get (int, messages)
+ - messages_published (int, messages)
+ - messages_ready (int, messages)
+ - messages_unacked (int, messages)
+ - queues (int, queues)
+ - clustering_listeners (int, cluster nodes)
+ - amqp_listeners (int, amqp nodes up)
+ - return_unroutable (int, number of unroutable messages)
+ - return_unroutable_rate (float, number of unroutable messages per second)
+
++ rabbitmq_node
+ - tags:
+ - url
+ - node
+ - url
+ - fields:
+ - disk_free (int, bytes)
+ - disk_free_limit (int, bytes)
+ - disk_free_alarm (int, disk alarm)
+ - fd_total (int, file descriptors)
+ - fd_used (int, file descriptors)
+ - mem_limit (int, bytes)
+ - mem_used (int, bytes)
+ - mem_alarm (int, memory a)
+ - proc_total (int, erlang processes)
+ - proc_used (int, erlang processes)
+ - run_queue (int, erlang processes)
+ - sockets_total (int, sockets)
+ - sockets_used (int, sockets)
+ - running (int, node up)
+ - uptime (int, milliseconds)
+ - mnesia_disk_tx_count (int, number of disk transaction)
+ - mnesia_ram_tx_count (int, number of ram transaction)
+ - mnesia_disk_tx_count_rate (float, number of disk transaction per second)
+ - mnesia_ram_tx_count_rate (float, number of ram transaction per second)
+ - gc_num (int, number of garbage collection)
+ - gc_bytes_reclaimed (int, bytes)
+ - gc_num_rate (float, number of garbage collection per second)
+ - gc_bytes_reclaimed_rate (float, bytes per second)
+ - io_read_avg_time (float, number of read operations)
+ - io_read_avg_time_rate (int, number of read operations per second)
+ - io_read_bytes (int, bytes)
+ - io_read_bytes_rate (float, bytes per second)
+ - io_write_avg_time (int, milliseconds)
+ - io_write_avg_time_rate (float, milliseconds per second)
+ - io_write_bytes (int, bytes)
+ - io_write_bytes_rate (float, bytes per second)
+ - mem_connection_readers (int, bytes)
+ - mem_connection_writers (int, bytes)
+ - mem_connection_channels (int, bytes)
+ - mem_connection_other (int, bytes)
+ - mem_queue_procs (int, bytes)
+ - mem_queue_slave_procs (int, bytes)
+ - mem_plugins (int, bytes)
+ - mem_other_proc (int, bytes)
+ - mem_metrics (int, bytes)
+ - mem_mgmt_db (int, bytes)
+ - mem_mnesia (int, bytes)
+ - mem_other_ets (int, bytes)
+ - mem_binary (int, bytes)
+ - mem_msg_index (int, bytes)
+ - mem_code (int, bytes)
+ - mem_atom (int, bytes)
+ - mem_other_system (int, bytes)
+ - mem_allocated_unused (int, bytes)
+ - mem_reserved_unallocated (int, bytes)
+ - mem_total (int, bytes)
- rabbitmq_queue
- - url
- - queue
- - vhost
- - node
- - durable
- - auto_delete
-
-- rabbitmq_exchange
- - url
- - exchange
- - type
- - vhost
- - internal
- - durable
- - auto_delete
-
-### Sample Queries:
+ - tags:
+ - url
+ - queue
+ - vhost
+ - node
+ - durable
+ - auto_delete
+ - fields:
+ - consumer_utilisation (float, percent)
+ - consumers (int, int)
+ - idle_since (string, time - e.g., "2006-01-02 15:04:05")
+ - memory (int, bytes)
+ - message_bytes (int, bytes)
+ - message_bytes_persist (int, bytes)
+ - message_bytes_ram (int, bytes)
+ - message_bytes_ready (int, bytes)
+ - message_bytes_unacked (int, bytes)
+ - messages (int, count)
+ - messages_ack (int, count)
+ - messages_ack_rate (float, messages per second)
+ - messages_deliver (int, count)
+ - messages_deliver_rate (float, messages per second)
+ - messages_deliver_get (int, count)
+ - messages_deliver_get_rate (float, messages per second)
+ - messages_publish (int, count)
+ - messages_publish_rate (float, messages per second)
+ - messages_ready (int, count)
+ - messages_redeliver (int, count)
+ - messages_redeliver_rate (float, messages per second)
+ - messages_unack (int, count)
+ - slave_nodes (int, count)
+ - synchronised_slave_nodes (int, count)
+
++ rabbitmq_exchange
+ - tags:
+ - url
+ - exchange
+ - type
+ - vhost
+ - internal
+ - durable
+ - auto_delete
+ - fields:
+ - messages_publish_in (int, count)
+ - messages_publish_in_rate (int, messages per second)
+ - messages_publish_out (int, count)
+ - messages_publish_out_rate (int, messages per second)
+
+- rabbitmq_federation
+ - tags:
+ - url
+ - vhost
+ - type
+ - upstream
+ - exchange
+ - upstream_exchange
+ - queue
+ - upstream_queue
+ - fields:
+ - acks_uncommitted (int, count)
+ - consumers (int, count)
+ - messages_unacknowledged (int, count)
+ - messages_uncommitted (int, count)
+ - messages_unconfirmed (int, count)
+ - messages_confirm (int, count)
+ - messages_publish (int, count)
+ - messages_return_unroutable (int, count)
+
+### Sample Queries
Message rates for the entire node can be calculated from total message counts. For instance, to get the rate of messages published per minute, use this query:
```
-SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate
-FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m)
+SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m)
```
-### Example Output:
+### Example Output
```
rabbitmq_queue,url=http://amqp.example.org:15672,queue=telegraf,vhost=influxdb,node=rabbit@amqp.example.org,durable=true,auto_delete=false,host=amqp.example.org messages_deliver_get=0i,messages_publish=329i,messages_publish_rate=0.2,messages_redeliver_rate=0,message_bytes_ready=0i,message_bytes_unacked=0i,messages_deliver=329i,messages_unack=0i,consumers=1i,idle_since="",messages=0i,messages_deliver_rate=0.2,messages_deliver_get_rate=0.2,messages_redeliver=0i,memory=43032i,message_bytes_ram=0i,messages_ack=329i,messages_ready=0i,messages_ack_rate=0.2,consumer_utilisation=1,message_bytes=0i,message_bytes_persist=0i 1493684035000000000
diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go
index 4e7e918da17ef..4d8050c33fbca 100644
--- a/plugins/inputs/rabbitmq/rabbitmq.go
+++ b/plugins/inputs/rabbitmq/rabbitmq.go
@@ -11,19 +11,19 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
-// DefaultUsername will set a default value that corrasponds to the default
+// DefaultUsername will set a default value that corresponds to the default
// value used by Rabbitmq
const DefaultUsername = "guest"
-// DefaultPassword will set a default value that corrasponds to the default
+// DefaultPassword will set a default value that corresponds to the default
// value used by Rabbitmq
const DefaultPassword = "guest"
-// DefaultURL will set a default value that corrasponds to the default value
+// DefaultURL will set a default value that corresponds to the default value
// used by Rabbitmq
const DefaultURL = "http://localhost:15672"
@@ -34,27 +34,30 @@ const DefaultClientTimeout = 4
// RabbitMQ defines the configuration necessary for gathering metrics,
// see the sample config for further details
type RabbitMQ struct {
- URL string
- Name string
- Username string
- Password string
+ URL string `toml:"url"`
+ Name string `toml:"name"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
tls.ClientConfig
ResponseHeaderTimeout internal.Duration `toml:"header_timeout"`
ClientTimeout internal.Duration `toml:"client_timeout"`
- Nodes []string
- Queues []string
- Exchanges []string
+ Nodes []string `toml:"nodes"`
+ Queues []string `toml:"queues"`
+ Exchanges []string `toml:"exchanges"`
- QueueInclude []string `toml:"queue_name_include"`
- QueueExclude []string `toml:"queue_name_exclude"`
+ QueueInclude []string `toml:"queue_name_include"`
+ QueueExclude []string `toml:"queue_name_exclude"`
+ FederationUpstreamInclude []string `toml:"federation_upstream_include"`
+ FederationUpstreamExclude []string `toml:"federation_upstream_exclude"`
- Client *http.Client
+ Client *http.Client `toml:"-"`
filterCreated bool
excludeEveryQueue bool
queueFilter filter.Filter
+ upstreamFilter filter.Filter
}
// OverviewResponse ...
@@ -118,17 +121,19 @@ type QueueTotals struct {
// Queue ...
type Queue struct {
- QueueTotals // just to not repeat the same code
- MessageStats `json:"message_stats"`
- Memory int64
- Consumers int64
- ConsumerUtilisation float64 `json:"consumer_utilisation"`
- Name string
- Node string
- Vhost string
- Durable bool
- AutoDelete bool `json:"auto_delete"`
- IdleSince string `json:"idle_since"`
+ QueueTotals // just to not repeat the same code
+ MessageStats `json:"message_stats"`
+ Memory int64
+ Consumers int64
+ ConsumerUtilisation float64 `json:"consumer_utilisation"`
+ Name string
+ Node string
+ Vhost string
+ Durable bool
+ AutoDelete bool `json:"auto_delete"`
+ IdleSince string `json:"idle_since"`
+ SlaveNodes []string `json:"slave_nodes"`
+ SynchronisedSlaveNodes []string `json:"synchronised_slave_nodes"`
}
// Node ...
@@ -178,14 +183,75 @@ type Exchange struct {
AutoDelete bool `json:"auto_delete"`
}
+// FederationLinkChannelMessageStats ...
+type FederationLinkChannelMessageStats struct {
+ Confirm int64 `json:"confirm"`
+ ConfirmDetails Details `json:"confirm_details"`
+ Publish int64 `json:"publish"`
+ PublishDetails Details `json:"publish_details"`
+ ReturnUnroutable int64 `json:"return_unroutable"`
+ ReturnUnroutableDetails Details `json:"return_unroutable_details"`
+}
+
+// FederationLinkChannel ...
+type FederationLinkChannel struct {
+ AcksUncommitted int64 `json:"acks_uncommitted"`
+ ConsumerCount int64 `json:"consumer_count"`
+ MessagesUnacknowledged int64 `json:"messages_unacknowledged"`
+ MessagesUncommitted int64 `json:"messages_uncommitted"`
+ MessagesUnconfirmed int64 `json:"messages_unconfirmed"`
+ MessageStats FederationLinkChannelMessageStats `json:"message_stats"`
+}
+
+// FederationLink ...
+type FederationLink struct {
+ Type string `json:"type"`
+ Queue string `json:"queue"`
+ UpstreamQueue string `json:"upstream_queue"`
+ Exchange string `json:"exchange"`
+ UpstreamExchange string `json:"upstream_exchange"`
+ Vhost string `json:"vhost"`
+ Upstream string `json:"upstream"`
+ LocalChannel FederationLinkChannel `json:"local_channel"`
+}
+
type HealthCheck struct {
Status string `json:"status"`
}
+// MemoryResponse ...
+type MemoryResponse struct {
+ Memory *Memory `json:"memory"`
+}
+
+// Memory details
+type Memory struct {
+ ConnectionReaders int64 `json:"connection_readers"`
+ ConnectionWriters int64 `json:"connection_writers"`
+ ConnectionChannels int64 `json:"connection_channels"`
+ ConnectionOther int64 `json:"connection_other"`
+ QueueProcs int64 `json:"queue_procs"`
+ QueueSlaveProcs int64 `json:"queue_slave_procs"`
+ Plugins int64 `json:"plugins"`
+ OtherProc int64 `json:"other_proc"`
+ Metrics int64 `json:"metrics"`
+ MgmtDb int64 `json:"mgmt_db"`
+ Mnesia int64 `json:"mnesia"`
+ OtherEts int64 `json:"other_ets"`
+ Binary int64 `json:"binary"`
+ MsgIndex int64 `json:"msg_index"`
+ Code int64 `json:"code"`
+ Atom int64 `json:"atom"`
+ OtherSystem int64 `json:"other_system"`
+ AllocatedUnused int64 `json:"allocated_unused"`
+ ReservedUnallocated int64 `json:"reserved_unallocated"`
+ Total int64 `json:"total"`
+}
+
// gatherFunc ...
type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator)
-var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges}
+var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges, gatherFederationLinks}
var sampleConfig = `
## Management Plugin url. (default: http://localhost:15672)
@@ -229,6 +295,15 @@ var sampleConfig = `
## Note that an empty array for both will include all queues
queue_name_include = []
queue_name_exclude = []
+
+ ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement.
+ ## If neither are specified, metrics for all federation upstreams are gathered.
+ ## Federation link metrics will only be gathered for queues and exchanges
+ ## whose non-federation metrics will be collected (e.g a queue excluded
+ ## by the 'queue_name_exclude' option will also be excluded from federation).
+ ## Globs accepted.
+ # federation_upstream_include = ["dataCentre-*"]
+ # federation_upstream_exclude = []
`
func boolToInt(b bool) int64 {
@@ -265,12 +340,16 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error {
}
}
- // Create queue filter if not already created
+ // Create gather filters if not already created
if !r.filterCreated {
err := r.createQueueFilter()
if err != nil {
return err
}
+ err = r.createUpstreamFilter()
+ if err != nil {
+ return err
+ }
r.filterCreated = true
}
@@ -371,103 +450,99 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) {
}
func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) {
- allNodes := make([]Node, 0)
- // Gather information about nodes
+ allNodes := make([]*Node, 0)
+
err := r.requestJSON("/api/nodes", &allNodes)
if err != nil {
acc.AddError(err)
return
}
- nodes := make(map[string]Node)
+ nodes := allNodes[:0]
for _, node := range allNodes {
if r.shouldGatherNode(node) {
- nodes[node.Name] = node
+ nodes = append(nodes, node)
}
}
- numberNodes := len(nodes)
- if numberNodes == 0 {
- return
- }
+ var wg sync.WaitGroup
+ for _, node := range nodes {
+ wg.Add(1)
+ go func(node *Node) {
+ defer wg.Done()
- type NodeHealthCheck struct {
- NodeName string
- HealthCheck HealthCheck
- Error error
- }
+ tags := map[string]string{"url": r.URL}
+ tags["node"] = node.Name
+
+ fields := map[string]interface{}{
+ "disk_free": node.DiskFree,
+ "disk_free_limit": node.DiskFreeLimit,
+ "disk_free_alarm": boolToInt(node.DiskFreeAlarm),
+ "fd_total": node.FdTotal,
+ "fd_used": node.FdUsed,
+ "mem_limit": node.MemLimit,
+ "mem_used": node.MemUsed,
+ "mem_alarm": boolToInt(node.MemAlarm),
+ "proc_total": node.ProcTotal,
+ "proc_used": node.ProcUsed,
+ "run_queue": node.RunQueue,
+ "sockets_total": node.SocketsTotal,
+ "sockets_used": node.SocketsUsed,
+ "uptime": node.Uptime,
+ "mnesia_disk_tx_count": node.MnesiaDiskTxCount,
+ "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate,
+ "mnesia_ram_tx_count": node.MnesiaRamTxCount,
+ "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate,
+ "gc_num": node.GcNum,
+ "gc_num_rate": node.GcNumDetails.Rate,
+ "gc_bytes_reclaimed": node.GcBytesReclaimed,
+ "gc_bytes_reclaimed_rate": node.GcBytesReclaimedDetails.Rate,
+ "io_read_avg_time": node.IoReadAvgTime,
+ "io_read_avg_time_rate": node.IoReadAvgTimeDetails.Rate,
+ "io_read_bytes": node.IoReadBytes,
+ "io_read_bytes_rate": node.IoReadBytesDetails.Rate,
+ "io_write_avg_time": node.IoWriteAvgTime,
+ "io_write_avg_time_rate": node.IoWriteAvgTimeDetails.Rate,
+ "io_write_bytes": node.IoWriteBytes,
+ "io_write_bytes_rate": node.IoWriteBytesDetails.Rate,
+ "running": boolToInt(node.Running),
+ }
- healthChecksChannel := make(chan NodeHealthCheck, numberNodes)
+ var memory MemoryResponse
+ err = r.requestJSON("/api/nodes/"+node.Name+"/memory", &memory)
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
- for _, node := range nodes {
- go func(nodeName string, healthChecksChannel chan NodeHealthCheck) {
- var healthCheck HealthCheck
-
- err := r.requestJSON("/api/healthchecks/node/"+nodeName, &healthCheck)
- nodeHealthCheck := NodeHealthCheck{
- NodeName: nodeName,
- Error: err,
- HealthCheck: healthCheck,
+ if memory.Memory != nil {
+ fields["mem_connection_readers"] = memory.Memory.ConnectionReaders
+ fields["mem_connection_writers"] = memory.Memory.ConnectionWriters
+ fields["mem_connection_channels"] = memory.Memory.ConnectionChannels
+ fields["mem_connection_other"] = memory.Memory.ConnectionOther
+ fields["mem_queue_procs"] = memory.Memory.QueueProcs
+ fields["mem_queue_slave_procs"] = memory.Memory.QueueSlaveProcs
+ fields["mem_plugins"] = memory.Memory.Plugins
+ fields["mem_other_proc"] = memory.Memory.OtherProc
+ fields["mem_metrics"] = memory.Memory.Metrics
+ fields["mem_mgmt_db"] = memory.Memory.MgmtDb
+ fields["mem_mnesia"] = memory.Memory.Mnesia
+ fields["mem_other_ets"] = memory.Memory.OtherEts
+ fields["mem_binary"] = memory.Memory.Binary
+ fields["mem_msg_index"] = memory.Memory.MsgIndex
+ fields["mem_code"] = memory.Memory.Code
+ fields["mem_atom"] = memory.Memory.Atom
+ fields["mem_other_system"] = memory.Memory.OtherSystem
+ fields["mem_allocated_unused"] = memory.Memory.AllocatedUnused
+ fields["mem_reserved_unallocated"] = memory.Memory.ReservedUnallocated
+ fields["mem_total"] = memory.Memory.Total
}
- healthChecksChannel <- nodeHealthCheck
- }(node.Name, healthChecksChannel)
+ acc.AddFields("rabbitmq_node", fields, tags)
+ }(node)
}
- now := time.Now()
-
- for i := 0; i < len(nodes); i++ {
- nodeHealthCheck := <-healthChecksChannel
-
- var healthCheckStatus int64 = 0
-
- if nodeHealthCheck.Error != nil {
- acc.AddError(nodeHealthCheck.Error)
- } else if nodeHealthCheck.HealthCheck.Status == "ok" {
- healthCheckStatus = 1
- }
-
- node := nodes[nodeHealthCheck.NodeName]
-
- tags := map[string]string{"url": r.URL}
- tags["node"] = node.Name
-
- fields := map[string]interface{}{
- "disk_free": node.DiskFree,
- "disk_free_limit": node.DiskFreeLimit,
- "disk_free_alarm": boolToInt(node.DiskFreeAlarm),
- "fd_total": node.FdTotal,
- "fd_used": node.FdUsed,
- "mem_limit": node.MemLimit,
- "mem_used": node.MemUsed,
- "mem_alarm": boolToInt(node.MemAlarm),
- "proc_total": node.ProcTotal,
- "proc_used": node.ProcUsed,
- "run_queue": node.RunQueue,
- "sockets_total": node.SocketsTotal,
- "sockets_used": node.SocketsUsed,
- "uptime": node.Uptime,
- "mnesia_disk_tx_count": node.MnesiaDiskTxCount,
- "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate,
- "mnesia_ram_tx_count": node.MnesiaRamTxCount,
- "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate,
- "gc_num": node.GcNum,
- "gc_num_rate": node.GcNumDetails.Rate,
- "gc_bytes_reclaimed": node.GcBytesReclaimed,
- "gc_bytes_reclaimed_rate": node.GcBytesReclaimedDetails.Rate,
- "io_read_avg_time": node.IoReadAvgTime,
- "io_read_avg_time_rate": node.IoReadAvgTimeDetails.Rate,
- "io_read_bytes": node.IoReadBytes,
- "io_read_bytes_rate": node.IoReadBytesDetails.Rate,
- "io_write_avg_time": node.IoWriteAvgTime,
- "io_write_avg_time_rate": node.IoWriteAvgTimeDetails.Rate,
- "io_write_bytes": node.IoWriteBytes,
- "io_write_bytes_rate": node.IoWriteBytesDetails.Rate,
- "running": boolToInt(node.Running),
- "health_check_status": healthCheckStatus,
- }
- acc.AddFields("rabbitmq_node", fields, tags, now)
- }
+ wg.Wait()
}
func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator) {
@@ -499,10 +574,12 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator) {
"rabbitmq_queue",
map[string]interface{}{
// common information
- "consumers": queue.Consumers,
- "consumer_utilisation": queue.ConsumerUtilisation,
- "idle_since": queue.IdleSince,
- "memory": queue.Memory,
+ "consumers": queue.Consumers,
+ "consumer_utilisation": queue.ConsumerUtilisation,
+ "idle_since": queue.IdleSince,
+ "slave_nodes": len(queue.SlaveNodes),
+ "synchronised_slave_nodes": len(queue.SynchronisedSlaveNodes),
+ "memory": queue.Memory,
// messages information
"message_bytes": queue.MessageBytes,
"message_bytes_ready": queue.MessageBytesReady,
@@ -538,7 +615,7 @@ func gatherExchanges(r *RabbitMQ, acc telegraf.Accumulator) {
}
for _, exchange := range exchanges {
- if !r.shouldGatherExchange(exchange) {
+ if !r.shouldGatherExchange(exchange.Name) {
continue
}
tags := map[string]string{
@@ -564,7 +641,53 @@ func gatherExchanges(r *RabbitMQ, acc telegraf.Accumulator) {
}
}
-func (r *RabbitMQ) shouldGatherNode(node Node) bool {
+func gatherFederationLinks(r *RabbitMQ, acc telegraf.Accumulator) {
+ // Gather information about federation links
+ federationLinks := make([]FederationLink, 0)
+ err := r.requestJSON("/api/federation-links", &federationLinks)
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
+
+ for _, link := range federationLinks {
+ if !r.shouldGatherFederationLink(link) {
+ continue
+ }
+
+ tags := map[string]string{
+ "url": r.URL,
+ "type": link.Type,
+ "vhost": link.Vhost,
+ "upstream": link.Upstream,
+ }
+
+ if link.Type == "exchange" {
+ tags["exchange"] = link.Exchange
+ tags["upstream_exchange"] = link.UpstreamExchange
+ } else {
+ tags["queue"] = link.Queue
+ tags["upstream_queue"] = link.UpstreamQueue
+ }
+
+ acc.AddFields(
+ "rabbitmq_federation",
+ map[string]interface{}{
+ "acks_uncommitted": link.LocalChannel.AcksUncommitted,
+ "consumers": link.LocalChannel.ConsumerCount,
+ "messages_unacknowledged": link.LocalChannel.MessagesUnacknowledged,
+ "messages_uncommitted": link.LocalChannel.MessagesUncommitted,
+ "messages_unconfirmed": link.LocalChannel.MessagesUnconfirmed,
+ "messages_confirm": link.LocalChannel.MessageStats.Confirm,
+ "messages_publish": link.LocalChannel.MessageStats.Publish,
+ "messages_return_unroutable": link.LocalChannel.MessageStats.ReturnUnroutable,
+ },
+ tags,
+ )
+ }
+}
+
+func (r *RabbitMQ) shouldGatherNode(node *Node) bool {
if len(r.Nodes) == 0 {
return true
}
@@ -599,13 +722,23 @@ func (r *RabbitMQ) createQueueFilter() error {
return nil
}
-func (r *RabbitMQ) shouldGatherExchange(exchange Exchange) bool {
+func (r *RabbitMQ) createUpstreamFilter() error {
+ upstreamFilter, err := filter.NewIncludeExcludeFilter(r.FederationUpstreamInclude, r.FederationUpstreamExclude)
+ if err != nil {
+ return err
+ }
+ r.upstreamFilter = upstreamFilter
+
+ return nil
+}
+
+func (r *RabbitMQ) shouldGatherExchange(exchangeName string) bool {
if len(r.Exchanges) == 0 {
return true
}
for _, name := range r.Exchanges {
- if name == exchange.Name {
+ if name == exchangeName {
return true
}
}
@@ -613,6 +746,21 @@ func (r *RabbitMQ) shouldGatherExchange(exchange Exchange) bool {
return false
}
+func (r *RabbitMQ) shouldGatherFederationLink(link FederationLink) bool {
+ if !r.upstreamFilter.Match(link.Upstream) {
+ return false
+ }
+
+ switch link.Type {
+ case "exchange":
+ return r.shouldGatherExchange(link.Exchange)
+ case "queue":
+ return r.queueFilter.Match(link.Queue)
+ default:
+ return false
+ }
+}
+
func init() {
inputs.Add("rabbitmq", func() telegraf.Input {
return &RabbitMQ{
diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go
index 0f98f95ce2905..869e8036d157d 100644
--- a/plugins/inputs/rabbitmq/rabbitmq_test.go
+++ b/plugins/inputs/rabbitmq/rabbitmq_test.go
@@ -6,10 +6,11 @@ import (
"net/http/httptest"
"testing"
+ "io/ioutil"
+
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "io/ioutil"
)
func TestRabbitMQGeneratesMetrics(t *testing.T) {
@@ -25,8 +26,10 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) {
jsonFilePath = "testdata/queues.json"
case "/api/exchanges":
jsonFilePath = "testdata/exchanges.json"
- case "/api/healthchecks/node/rabbit@vagrant-ubuntu-trusty-64":
- jsonFilePath = "testdata/healthchecks.json"
+ case "/api/federation-links":
+ jsonFilePath = "testdata/federation-links.json"
+ case "/api/nodes/rabbit@vagrant-ubuntu-trusty-64/memory":
+ jsonFilePath = "testdata/memory.json"
default:
panic("Cannot handle request")
}
@@ -93,6 +96,8 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) {
"messages_redeliver": 33,
"messages_redeliver_rate": 2.5,
"idle_since": "2015-11-01 8:22:14",
+ "slave_nodes": 1,
+ "synchronised_slave_nodes": 1,
}
compareMetrics(t, queuesMetrics, acc, "rabbitmq_queue")
@@ -112,7 +117,6 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) {
"sockets_used": 45,
"uptime": 7464827,
"running": 1,
- "health_check_status": 1,
"mnesia_disk_tx_count": 16,
"mnesia_ram_tx_count": 296,
"mnesia_disk_tx_count_rate": 1.1,
@@ -129,6 +133,26 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) {
"io_write_avg_time_rate": 4.32,
"io_write_bytes": 823,
"io_write_bytes_rate": 32.8,
+ "mem_connection_readers": 1234,
+ "mem_connection_writers": 5678,
+ "mem_connection_channels": 1133,
+ "mem_connection_other": 2840,
+ "mem_queue_procs": 2840,
+ "mem_queue_slave_procs": 0,
+ "mem_plugins": 1755976,
+ "mem_other_proc": 23056584,
+ "mem_metrics": 196536,
+ "mem_mgmt_db": 491272,
+ "mem_mnesia": 115600,
+ "mem_other_ets": 2121872,
+ "mem_binary": 418848,
+ "mem_msg_index": 42848,
+ "mem_code": 25179322,
+ "mem_atom": 1041593,
+ "mem_other_system": 14741981,
+ "mem_allocated_unused": 38208528,
+ "mem_reserved_unallocated": 0,
+ "mem_total": 83025920,
}
compareMetrics(t, nodeMetrics, acc, "rabbitmq_node")
@@ -139,6 +163,18 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) {
"messages_publish_out_rate": 5.1,
}
compareMetrics(t, exchangeMetrics, acc, "rabbitmq_exchange")
+
+ federationLinkMetrics := map[string]interface{}{
+ "acks_uncommitted": 1,
+ "consumers": 2,
+ "messages_unacknowledged": 3,
+ "messages_uncommitted": 4,
+ "messages_unconfirmed": 5,
+ "messages_confirm": 67,
+ "messages_publish": 890,
+ "messages_return_unroutable": 1,
+ }
+ compareMetrics(t, federationLinkMetrics, acc, "rabbitmq_federation")
}
func compareMetrics(t *testing.T, expectedMetrics map[string]interface{},
diff --git a/plugins/inputs/rabbitmq/testdata/federation-links.json b/plugins/inputs/rabbitmq/testdata/federation-links.json
new file mode 100644
index 0000000000000..4cf5148705371
--- /dev/null
+++ b/plugins/inputs/rabbitmq/testdata/federation-links.json
@@ -0,0 +1,63 @@
+[
+ {
+ "node": "rabbit@rmqlocal",
+ "queue": "exampleLocalQueue",
+ "upstream_queue": "exampleUpstreamQueue",
+ "type": "queue",
+ "vhost": "/",
+ "upstream": "ExampleFederationUpstream",
+ "id": "8ba5218f",
+ "status": "running",
+ "local_connection": "",
+ "uri": "amqp://appsv03",
+ "timestamp": "2019-08-19 15:34:15",
+ "local_channel": {
+ "acks_uncommitted": 1,
+ "confirm": true,
+ "connection_details": {
+ "name": "",
+ "peer_host": "undefined",
+ "peer_port": "undefined"
+ },
+ "consumer_count": 2,
+ "garbage_collection": {
+ "fullsweep_after": 65535,
+ "max_heap_size": 0,
+ "min_bin_vheap_size": 46422,
+ "min_heap_size": 233,
+ "minor_gcs": 203
+ },
+ "global_prefetch_count": 0,
+ "message_stats": {
+ "confirm": 67,
+ "confirm_details": {
+ "rate": 2
+ },
+ "publish": 890,
+ "publish_details": {
+ "rate": 2
+ },
+ "return_unroutable": 1,
+ "return_unroutable_details": {
+ "rate": 0.1
+ }
+ },
+ "messages_unacknowledged": 3,
+ "messages_uncommitted": 4,
+ "messages_unconfirmed": 5,
+ "name": "",
+ "node": "rabbit@rmqlocal",
+ "number": 1,
+ "prefetch_count": 0,
+ "reductions": 1926653,
+ "reductions_details": {
+ "rate": 1068
+ },
+ "state": "running",
+ "transactional": false,
+ "user": "none",
+ "user_who_performed_action": "none",
+ "vhost": "sorandomsorandom"
+ }
+ }
+]
diff --git a/plugins/inputs/rabbitmq/testdata/healthchecks.json b/plugins/inputs/rabbitmq/testdata/healthchecks.json
deleted file mode 100644
index 1a36cf5fc27a0..0000000000000
--- a/plugins/inputs/rabbitmq/testdata/healthchecks.json
+++ /dev/null
@@ -1 +0,0 @@
-{"status":"ok"}
\ No newline at end of file
diff --git a/plugins/inputs/rabbitmq/testdata/memory.json b/plugins/inputs/rabbitmq/testdata/memory.json
new file mode 100644
index 0000000000000..da252eb61221b
--- /dev/null
+++ b/plugins/inputs/rabbitmq/testdata/memory.json
@@ -0,0 +1,24 @@
+{
+ "memory": {
+ "connection_readers": 1234,
+ "connection_writers": 5678,
+ "connection_channels": 1133,
+ "connection_other": 2840,
+ "queue_procs": 2840,
+ "queue_slave_procs": 0,
+ "plugins": 1755976,
+ "other_proc": 23056584,
+ "metrics": 196536,
+ "mgmt_db": 491272,
+ "mnesia": 115600,
+ "other_ets": 2121872,
+ "binary": 418848,
+ "msg_index": 42848,
+ "code": 25179322,
+ "atom": 1041593,
+ "other_system": 14741981,
+ "allocated_unused": 38208528,
+ "reserved_unallocated": 0,
+ "total": 83025920
+ }
+}
\ No newline at end of file
diff --git a/plugins/inputs/rabbitmq/testdata/queues.json b/plugins/inputs/rabbitmq/testdata/queues.json
index 356e1a4663daa..294f78872a91c 100644
--- a/plugins/inputs/rabbitmq/testdata/queues.json
+++ b/plugins/inputs/rabbitmq/testdata/queues.json
@@ -109,6 +109,12 @@
"exclusive_consumer_tag": null,
"effective_policy_definition": [],
"operator_policy": null,
- "policy": null
+ "policy": null,
+ "slave_nodes":[
+ "rabbit@ip-10-1-2-118"
+ ],
+ "synchronised_slave_nodes":[
+ "rabbit@ip-10-1-2-118"
+ ]
}
]
\ No newline at end of file
diff --git a/plugins/inputs/redfish/README.md b/plugins/inputs/redfish/README.md
new file mode 100644
index 0000000000000..a22b9d3141741
--- /dev/null
+++ b/plugins/inputs/redfish/README.md
@@ -0,0 +1,127 @@
+# Redfish Input Plugin
+
+The `redfish` plugin gathers metrics and status information about CPU temperature, fanspeed, Powersupply, voltage, hostname and Location details (datacenter, placement, rack and room) of hardware servers for which [DMTF's Redfish](https://redfish.dmtf.org/) is enabled.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration
+
+```toml
+[[inputs.redfish]]
+ ## Redfish API Base URL.
+ address = "https://127.0.0.1:5000"
+
+ ## Credentials for the Redfish API.
+ username = "root"
+ password = "password123456"
+
+ ## System Id to collect data for in Redfish APIs.
+ computer_system_id="System.Embedded.1"
+
+ ## Amount of time allowed to complete the HTTP request
+ # timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+```
+
+### Metrics
+
+- redfish_thermal_temperatures
+ - tags:
+ - source
+ - address
+ - name
+ - datacenter (available only if location data is found)
+ - rack (available only if location data is found)
+ - room (available only if location data is found)
+ - row (available only if location data is found)
+ - state
+ - health
+ - fields:
+ - reading_celsius
+ - upper_threshold_critical
+ - upper_threshold_fatal
+ - lower_threshold_critical
+ - lower_threshold_fatal
+
+
++ redfish_thermal_fans
+ - tags:
+ - source
+ - address
+ - name
+ - datacenter (available only if location data is found)
+ - rack (available only if location data is found)
+ - room (available only if location data is found)
+ - row (available only if location data is found)
+ - state
+ - health
+ - fields:
+ - reading_rpm (or) reading_percent
+ - upper_threshold_critical
+ - upper_threshold_fatal
+ - lower_threshold_critical
+ - lower_threshold_fatal
+
+
+- redfish_power_powersupplies
+ - tags:
+ - source
+ - address
+ - name
+ - datacenter (available only if location data is found)
+ - rack (available only if location data is found)
+ - room (available only if location data is found)
+ - row (available only if location data is found)
+ - state
+ - health
+ - fields:
+ - last_power_output_watts
+ - line_input_voltage
+ - power_capacity_watts
+ - power_input_watts
+ - power_output_watts
+
+
+- redfish_power_voltages (available only if voltage data is found)
+ - tags:
+ - source
+ - address
+ - name
+ - datacenter (available only if location data is found)
+ - rack (available only if location data is found)
+ - room (available only if location data is found)
+ - row (available only if location data is found)
+ - state
+ - health
+ - fields:
+ - reading_volts
+ - upper_threshold_critical
+ - upper_threshold_fatal
+ - lower_threshold_critical
+ - lower_threshold_fatal
+
+
+### Example Output
+
+```
+redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_thermal_temperatures,source=test-hostname,name=SystemBoardExhaust,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=33,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1A,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17720,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1B,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17760,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_thermal_fans,source=test-hostname,name=SystemBoardFan2A,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17880,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_power_powersupplies,source=test-hostname,name=PS1Status,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=208,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000
+redfish_power_powersupplies,source=test-hostname,name=PS2Status,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=194,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000
+redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_power_voltages,source=test-hostname,name=CPU1MEM347,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+redfish_power_voltages,source=test-hostname,name=PS1voltage1,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=208,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000
+
+```
diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go
new file mode 100644
index 0000000000000..54d1d15b8c097
--- /dev/null
+++ b/plugins/inputs/redfish/redfish.go
@@ -0,0 +1,380 @@
+package redfish
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+const description = "Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs"
+const sampleConfig = `
+ ## Server url
+ address = "https://127.0.0.1:5000"
+
+ ## Username, Password for hardware server
+ username = "root"
+ password = "password123456"
+
+ ## ComputerSystemId
+ computer_system_id="2M220100SL"
+
+ ## Amount of time allowed to complete the HTTP request
+ # timeout = "5s"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+`
+
+type Redfish struct {
+ Address string `toml:"address"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ ComputerSystemId string `toml:"computer_system_id"`
+ Timeout config.Duration `toml:"timeout"`
+
+ client http.Client
+ tls.ClientConfig
+ baseURL *url.URL
+}
+
+type System struct {
+ Hostname string `json:"hostname"`
+ Links struct {
+ Chassis []struct {
+ Ref string `json:"@odata.id"`
+ }
+ }
+}
+
+type Chassis struct {
+ Location *Location
+ Power struct {
+ Ref string `json:"@odata.id"`
+ }
+ Thermal struct {
+ Ref string `json:"@odata.id"`
+ }
+}
+
+type Power struct {
+ PowerSupplies []struct {
+ Name string
+ PowerInputWatts *float64
+ PowerCapacityWatts *float64
+ PowerOutputWatts *float64
+ LastPowerOutputWatts *float64
+ Status Status
+ LineInputVoltage *float64
+ }
+ Voltages []struct {
+ Name string
+ ReadingVolts *float64
+ UpperThresholdCritical *float64
+ UpperThresholdFatal *float64
+ LowerThresholdCritical *float64
+ LowerThresholdFatal *float64
+ Status Status
+ }
+}
+
+type Thermal struct {
+ Fans []struct {
+ Name string
+ Reading *int64
+ ReadingUnits *string
+ UpperThresholdCritical *int64
+ UpperThresholdFatal *int64
+ LowerThresholdCritical *int64
+ LowerThresholdFatal *int64
+ Status Status
+ }
+ Temperatures []struct {
+ Name string
+ ReadingCelsius *float64
+ UpperThresholdCritical *float64
+ UpperThresholdFatal *float64
+ LowerThresholdCritical *float64
+ LowerThresholdFatal *float64
+ Status Status
+ }
+}
+
+type Location struct {
+ PostalAddress struct {
+ DataCenter string
+ Room string
+ }
+ Placement struct {
+ Rack string
+ Row string
+ }
+}
+
+type Status struct {
+ State string
+ Health string
+}
+
+func (r *Redfish) Description() string {
+ return description
+}
+
+func (r *Redfish) SampleConfig() string {
+ return sampleConfig
+}
+
+func (r *Redfish) Init() error {
+ if r.Address == "" {
+ return fmt.Errorf("did not provide IP")
+ }
+
+ if r.Username == "" && r.Password == "" {
+ return fmt.Errorf("did not provide username and password")
+ }
+
+ if r.ComputerSystemId == "" {
+ return fmt.Errorf("did not provide the computer system ID of the resource")
+ }
+
+ var err error
+ r.baseURL, err = url.Parse(r.Address)
+ if err != nil {
+ return err
+ }
+
+ tlsCfg, err := r.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ r.client = http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ Proxy: http.ProxyFromEnvironment,
+ },
+ Timeout: time.Duration(r.Timeout),
+ }
+
+ return nil
+}
+
+func (r *Redfish) getData(url string, payload interface{}) error {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ req.SetBasicAuth(r.Username, r.Password)
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("Content-Type", "application/json")
+ resp, err := r.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("received status code %d (%s), expected 200",
+ resp.StatusCode,
+ http.StatusText(resp.StatusCode))
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ err = json.Unmarshal(body, &payload)
+ if err != nil {
+ return fmt.Errorf("error parsing input: %v", err)
+ }
+
+ return nil
+}
+
+func (r *Redfish) getComputerSystem(id string) (*System, error) {
+ loc := r.baseURL.ResolveReference(&url.URL{Path: path.Join("/redfish/v1/Systems/", id)})
+ system := &System{}
+ err := r.getData(loc.String(), system)
+ if err != nil {
+ return nil, err
+ }
+ return system, nil
+}
+
+func (r *Redfish) getChassis(ref string) (*Chassis, error) {
+ loc := r.baseURL.ResolveReference(&url.URL{Path: ref})
+ chassis := &Chassis{}
+ err := r.getData(loc.String(), chassis)
+ if err != nil {
+ return nil, err
+ }
+ return chassis, nil
+}
+
+func (r *Redfish) getPower(ref string) (*Power, error) {
+ loc := r.baseURL.ResolveReference(&url.URL{Path: ref})
+ power := &Power{}
+ err := r.getData(loc.String(), power)
+ if err != nil {
+ return nil, err
+ }
+ return power, nil
+}
+
+func (r *Redfish) getThermal(ref string) (*Thermal, error) {
+ loc := r.baseURL.ResolveReference(&url.URL{Path: ref})
+ thermal := &Thermal{}
+ err := r.getData(loc.String(), thermal)
+ if err != nil {
+ return nil, err
+ }
+ return thermal, nil
+}
+
+func (r *Redfish) Gather(acc telegraf.Accumulator) error {
+ address, _, err := net.SplitHostPort(r.baseURL.Host)
+ if err != nil {
+ address = r.baseURL.Host
+ }
+
+ system, err := r.getComputerSystem(r.ComputerSystemId)
+ if err != nil {
+ return err
+ }
+
+ for _, link := range system.Links.Chassis {
+ chassis, err := r.getChassis(link.Ref)
+ if err != nil {
+ return err
+ }
+
+ thermal, err := r.getThermal(chassis.Thermal.Ref)
+ if err != nil {
+ return err
+ }
+
+ for _, j := range thermal.Temperatures {
+ tags := map[string]string{}
+ tags["address"] = address
+ tags["name"] = j.Name
+ tags["source"] = system.Hostname
+ tags["state"] = j.Status.State
+ tags["health"] = j.Status.Health
+ if chassis.Location != nil {
+ tags["datacenter"] = chassis.Location.PostalAddress.DataCenter
+ tags["room"] = chassis.Location.PostalAddress.Room
+ tags["rack"] = chassis.Location.Placement.Rack
+ tags["row"] = chassis.Location.Placement.Row
+ }
+
+ fields := make(map[string]interface{})
+ fields["reading_celsius"] = j.ReadingCelsius
+ fields["upper_threshold_critical"] = j.UpperThresholdCritical
+ fields["upper_threshold_fatal"] = j.UpperThresholdFatal
+ fields["lower_threshold_critical"] = j.LowerThresholdCritical
+ fields["lower_threshold_fatal"] = j.LowerThresholdFatal
+ acc.AddFields("redfish_thermal_temperatures", fields, tags)
+ }
+
+ for _, j := range thermal.Fans {
+ tags := map[string]string{}
+ fields := make(map[string]interface{})
+ tags["address"] = address
+ tags["name"] = j.Name
+ tags["source"] = system.Hostname
+ tags["state"] = j.Status.State
+ tags["health"] = j.Status.Health
+ if chassis.Location != nil {
+ tags["datacenter"] = chassis.Location.PostalAddress.DataCenter
+ tags["room"] = chassis.Location.PostalAddress.Room
+ tags["rack"] = chassis.Location.Placement.Rack
+ tags["row"] = chassis.Location.Placement.Row
+ }
+
+ if j.ReadingUnits != nil && *j.ReadingUnits == "RPM" {
+ fields["upper_threshold_critical"] = j.UpperThresholdCritical
+ fields["upper_threshold_fatal"] = j.UpperThresholdFatal
+ fields["lower_threshold_critical"] = j.LowerThresholdCritical
+ fields["lower_threshold_fatal"] = j.LowerThresholdFatal
+ fields["reading_rpm"] = j.Reading
+ } else {
+ fields["reading_percent"] = j.Reading
+ }
+ acc.AddFields("redfish_thermal_fans", fields, tags)
+ }
+
+ power, err := r.getPower(chassis.Power.Ref)
+ if err != nil {
+ return err
+ }
+
+ for _, j := range power.PowerSupplies {
+ tags := map[string]string{}
+ tags["address"] = address
+ tags["name"] = j.Name
+ tags["source"] = system.Hostname
+ tags["state"] = j.Status.State
+ tags["health"] = j.Status.Health
+ if chassis.Location != nil {
+ tags["datacenter"] = chassis.Location.PostalAddress.DataCenter
+ tags["room"] = chassis.Location.PostalAddress.Room
+ tags["rack"] = chassis.Location.Placement.Rack
+ tags["row"] = chassis.Location.Placement.Row
+ }
+
+ fields := make(map[string]interface{})
+ fields["power_input_watts"] = j.PowerInputWatts
+ fields["power_output_watts"] = j.PowerOutputWatts
+ fields["line_input_voltage"] = j.LineInputVoltage
+ fields["last_power_output_watts"] = j.LastPowerOutputWatts
+ fields["power_capacity_watts"] = j.PowerCapacityWatts
+ acc.AddFields("redfish_power_powersupplies", fields, tags)
+ }
+
+ for _, j := range power.Voltages {
+ tags := map[string]string{}
+ tags["address"] = address
+ tags["name"] = j.Name
+ tags["source"] = system.Hostname
+ tags["state"] = j.Status.State
+ tags["health"] = j.Status.Health
+ if chassis.Location != nil {
+ tags["datacenter"] = chassis.Location.PostalAddress.DataCenter
+ tags["room"] = chassis.Location.PostalAddress.Room
+ tags["rack"] = chassis.Location.Placement.Rack
+ tags["row"] = chassis.Location.Placement.Row
+ }
+
+ fields := make(map[string]interface{})
+ fields["reading_volts"] = j.ReadingVolts
+ fields["upper_threshold_critical"] = j.UpperThresholdCritical
+ fields["upper_threshold_fatal"] = j.UpperThresholdFatal
+ fields["lower_threshold_critical"] = j.LowerThresholdCritical
+ fields["lower_threshold_fatal"] = j.LowerThresholdFatal
+ acc.AddFields("redfish_power_voltages", fields, tags)
+ }
+ }
+
+ return nil
+}
+
+func init() {
+ inputs.Add("redfish", func() telegraf.Input {
+ return &Redfish{}
+ })
+}
diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go
new file mode 100644
index 0000000000000..8821b3d97557f
--- /dev/null
+++ b/plugins/inputs/redfish/redfish_test.go
@@ -0,0 +1,867 @@
+package redfish
+
+import (
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDellApis(t *testing.T) {
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ if !checkAuth(r, "test", "test") {
+ http.Error(w, "Unauthorized.", 401)
+ return
+ }
+
+ switch r.URL.Path {
+ case "/redfish/v1/Chassis/System.Embedded.1/Thermal":
+ http.ServeFile(w, r, "testdata/dell_thermal.json")
+ case "/redfish/v1/Chassis/System.Embedded.1/Power":
+ http.ServeFile(w, r, "testdata/dell_power.json")
+ case "/redfish/v1/Chassis/System.Embedded.1":
+ http.ServeFile(w, r, "testdata/dell_chassis.json")
+ case "/redfish/v1/Systems/System.Embedded.1":
+ http.ServeFile(w, r, "testdata/dell_systems.json")
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ defer ts.Close()
+
+ u, err := url.Parse(ts.URL)
+ require.NoError(t, err)
+ address, _, err := net.SplitHostPort(u.Host)
+ require.NoError(t, err)
+
+ expected_metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "redfish_thermal_temperatures",
+ map[string]string{
+ "name": "CPU1 Temp",
+ "source": "tpa-hostname",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 3.0,
+ "lower_threshold_fatal": 3.0,
+ "reading_celsius": 40.0,
+ "upper_threshold_critical": 93.0,
+ "upper_threshold_fatal": 93.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan1A",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_rpm": 17760,
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan1B",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 15360,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan2A",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 17880,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan2B",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 15120,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan3A",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 18000,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan3B",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 15600,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan4A",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 17280,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan4B",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 15360,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan5A",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 17640,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan5B",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 15600,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan6A",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 17760,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan6B",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 15600,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan7A",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 17400,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan7B",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 15720,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan8A",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 18000,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board Fan8B",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "lower_threshold_critical": 600,
+ "lower_threshold_fatal": 600,
+ "reading_rpm": 15840,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_power_powersupplies",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "PS1 Status",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "power_capacity_watts": 750.00,
+ "power_input_watts": 900.0,
+ "power_output_watts": 203.0,
+ "line_input_voltage": 206.00,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_power_voltages",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board DIMM PG",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_volts": 1.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_power_voltages",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board NDC PG",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_volts": 1.0,
+ },
+ time.Unix(0, 0),
+ ),
+
+ testutil.MustMetric(
+ "redfish_power_voltages",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "System Board PS1 PG FAIL",
+ "address": address,
+ "datacenter": "",
+ "health": "OK",
+ "rack": "",
+ "room": "",
+ "row": "",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_volts": 1.0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ plugin := &Redfish{
+ Address: ts.URL,
+ Username: "test",
+ Password: "test",
+ ComputerSystemId: "System.Embedded.1",
+ }
+ plugin.Init()
+ var acc testutil.Accumulator
+
+ err = plugin.Gather(&acc)
+ require.NoError(t, err)
+ require.True(t, acc.HasMeasurement("redfish_thermal_temperatures"))
+ testutil.RequireMetricsEqual(t, expected_metrics, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime())
+}
+
+func TestHPApis(t *testing.T) {
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ if !checkAuth(r, "test", "test") {
+ http.Error(w, "Unauthorized.", 401)
+ return
+ }
+
+ switch r.URL.Path {
+ case "/redfish/v1/Chassis/1/Thermal":
+ http.ServeFile(w, r, "testdata/hp_thermal.json")
+ case "/redfish/v1/Chassis/1/Power":
+ http.ServeFile(w, r, "testdata/hp_power.json")
+ case "/redfish/v1/Systems/1":
+ http.ServeFile(w, r, "testdata/hp_systems.json")
+ case "/redfish/v1/Chassis/1/":
+ http.ServeFile(w, r, "testdata/hp_chassis.json")
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ defer ts.Close()
+
+ u, err := url.Parse(ts.URL)
+ require.NoError(t, err)
+ address, _, err := net.SplitHostPort(u.Host)
+ require.NoError(t, err)
+
+ expected_metrics_hp := []telegraf.Metric{
+ testutil.MustMetric(
+ "redfish_thermal_temperatures",
+ map[string]string{
+ "name": "01-Inlet Ambient",
+ "source": "tpa-hostname",
+ "address": address,
+ "health": "OK",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_celsius": 19.0,
+ "upper_threshold_critical": 42.0,
+ "upper_threshold_fatal": 47.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_temperatures",
+ map[string]string{
+ "name": "44-P/S 2 Zone",
+ "source": "tpa-hostname",
+ "address": address,
+ "health": "OK",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_celsius": 34.0,
+ "upper_threshold_critical": 75.0,
+ "upper_threshold_fatal": 80.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "Fan 1",
+ "address": address,
+ "health": "OK",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_percent": 23,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "Fan 2",
+ "address": address,
+ "health": "OK",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_percent": 23,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_thermal_fans",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "Fan 3",
+ "address": address,
+ "health": "OK",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "reading_percent": 23,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_power_powersupplies",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "HpeServerPowerSupply",
+ "address": address,
+ "health": "OK",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "power_capacity_watts": 800.0,
+ "line_input_voltage": 205.0,
+ "last_power_output_watts": 0.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "redfish_power_powersupplies",
+ map[string]string{
+ "source": "tpa-hostname",
+ "name": "HpeServerPowerSupply",
+ "address": address,
+ "health": "OK",
+ "state": "Enabled",
+ },
+ map[string]interface{}{
+ "power_capacity_watts": 800.0,
+ "line_input_voltage": 205.0,
+ "last_power_output_watts": 90.0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ hp_plugin := &Redfish{
+ Address: ts.URL,
+ Username: "test",
+ Password: "test",
+ ComputerSystemId: "1",
+ }
+ hp_plugin.Init()
+ var hp_acc testutil.Accumulator
+
+ err = hp_plugin.Gather(&hp_acc)
+ require.NoError(t, err)
+ require.True(t, hp_acc.HasMeasurement("redfish_thermal_temperatures"))
+ testutil.RequireMetricsEqual(t, expected_metrics_hp, hp_acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime())
+}
+
+func checkAuth(r *http.Request, username, password string) bool {
+ user, pass, ok := r.BasicAuth()
+ if !ok {
+ return false
+ }
+ return user == username && pass == password
+}
+
+func TestConnection(t *testing.T) {
+
+ r := &Redfish{
+ Address: "http://127.0.0.1",
+ Username: "test",
+ Password: "test",
+ ComputerSystemId: "System.Embedded.1",
+ }
+
+ var acc testutil.Accumulator
+ r.Init()
+ err := r.Gather(&acc)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "connect: connection refused")
+}
+
+func TestInvalidUsernameorPassword(t *testing.T) {
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ if !checkAuth(r, "testing", "testing") {
+ http.Error(w, "Unauthorized.", 401)
+ return
+ }
+
+ switch r.URL.Path {
+ case "/redfish/v1/Chassis/System.Embedded.1/Thermal":
+ http.ServeFile(w, r, "testdata/dell_thermal.json")
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ defer ts.Close()
+
+ r := &Redfish{
+ Address: ts.URL,
+ Username: "test",
+ Password: "test",
+ ComputerSystemId: "System.Embedded.1",
+ }
+
+ var acc testutil.Accumulator
+ r.Init()
+ err := r.Gather(&acc)
+ require.Error(t, err)
+ require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200")
+}
+func TestNoUsernameorPasswordConfiguration(t *testing.T) {
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ if !checkAuth(r, "testing", "testing") {
+ http.Error(w, "Unauthorized.", 401)
+ return
+ }
+
+ switch r.URL.Path {
+ case "/redfish/v1/Chassis/System.Embedded.1/Thermal":
+ http.ServeFile(w, r, "testdata/dell_thermal.json")
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ defer ts.Close()
+
+ r := &Redfish{
+ Address: ts.URL,
+ ComputerSystemId: "System.Embedded.1",
+ }
+
+ err := r.Init()
+ require.Error(t, err)
+ require.EqualError(t, err, "did not provide username and password")
+}
+
+func TestInvalidDellJSON(t *testing.T) {
+
+ tests := []struct {
+ name string
+ thermalfilename string
+ powerfilename string
+ chassisfilename string
+ hostnamefilename string
+ }{
+ {
+ name: "check Thermal",
+ thermalfilename: "testdata/dell_thermalinvalid.json",
+ powerfilename: "testdata/dell_power.json",
+ chassisfilename: "testdata/dell_chassis.json",
+ hostnamefilename: "testdata/dell_systems.json",
+ },
+ {
+ name: "check Power",
+ thermalfilename: "testdata/dell_thermal.json",
+ powerfilename: "testdata/dell_powerinvalid.json",
+ chassisfilename: "testdata/dell_chassis.json",
+ hostnamefilename: "testdata/dell_systems.json",
+ },
+ {
+ name: "check Location",
+ thermalfilename: "testdata/dell_thermal.json",
+ powerfilename: "testdata/dell_power.json",
+ chassisfilename: "testdata/dell_chassisinvalid.json",
+ hostnamefilename: "testdata/dell_systems.json",
+ },
+ {
+ name: "check Hostname",
+ thermalfilename: "testdata/dell_thermal.json",
+ powerfilename: "testdata/dell_power.json",
+ chassisfilename: "testdata/dell_chassis.json",
+ hostnamefilename: "testdata/dell_systemsinvalid.json",
+ },
+ }
+ for _, tt := range tests {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ if !checkAuth(r, "test", "test") {
+ http.Error(w, "Unauthorized.", 401)
+ return
+ }
+
+ switch r.URL.Path {
+ case "/redfish/v1/Chassis/System.Embedded.1/Thermal":
+ http.ServeFile(w, r, tt.thermalfilename)
+ case "/redfish/v1/Chassis/System.Embedded.1/Power":
+ http.ServeFile(w, r, tt.powerfilename)
+ case "/redfish/v1/Chassis/System.Embedded.1":
+ http.ServeFile(w, r, tt.chassisfilename)
+ case "/redfish/v1/Systems/System.Embedded.1":
+ http.ServeFile(w, r, tt.hostnamefilename)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ defer ts.Close()
+
+ plugin := &Redfish{
+ Address: ts.URL,
+ Username: "test",
+ Password: "test",
+ ComputerSystemId: "System.Embedded.1",
+ }
+
+ plugin.Init()
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "error parsing input:")
+ }
+}
+
+func TestInvalidHPJSON(t *testing.T) {
+
+ tests := []struct {
+ name string
+ thermalfilename string
+ powerfilename string
+ hostnamefilename string
+ chassisfilename string
+ }{
+ {
+ name: "check Thermal",
+ thermalfilename: "testdata/hp_thermalinvalid.json",
+ powerfilename: "testdata/hp_power.json",
+ hostnamefilename: "testdata/hp_systems.json",
+ chassisfilename: "testdata/hp_chassis.json",
+ },
+ {
+ name: "check Power",
+ thermalfilename: "testdata/hp_thermal.json",
+ powerfilename: "testdata/hp_powerinvalid.json",
+ hostnamefilename: "testdata/hp_systems.json",
+ chassisfilename: "testdata/hp_chassis.json",
+ },
+ {
+ name: "check Hostname",
+ thermalfilename: "testdata/hp_thermal.json",
+ powerfilename: "testdata/hp_power.json",
+ hostnamefilename: "testdata/hp_systemsinvalid.json",
+ chassisfilename: "testdata/hp_chassis.json",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ if !checkAuth(r, "test", "test") {
+ http.Error(w, "Unauthorized.", 401)
+ return
+ }
+
+ switch r.URL.Path {
+ case "/redfish/v1/Chassis/1/Thermal":
+ http.ServeFile(w, r, tt.thermalfilename)
+ case "/redfish/v1/Chassis/1/Power":
+ http.ServeFile(w, r, tt.powerfilename)
+ case "/redfish/v1/Chassis/1/":
+ http.ServeFile(w, r, tt.chassisfilename)
+ case "/redfish/v1/Systems/System.Embedded.2":
+ http.ServeFile(w, r, tt.hostnamefilename)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ defer ts.Close()
+
+ plugin := &Redfish{
+ Address: ts.URL,
+ Username: "test",
+ Password: "test",
+ ComputerSystemId: "System.Embedded.2",
+ }
+
+ plugin.Init()
+
+ var acc testutil.Accumulator
+ err := plugin.Gather(&acc)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "error parsing input:")
+ })
+ }
+}
diff --git a/plugins/inputs/redfish/testdata/dell_chassis.json b/plugins/inputs/redfish/testdata/dell_chassis.json
new file mode 100644
index 0000000000000..48d0db49d60ce
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/dell_chassis.json
@@ -0,0 +1,187 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Chassis.Chassis",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1",
+ "@odata.type": "#Chassis.v1_6_0.Chassis",
+ "Actions": {
+ "#Chassis.Reset": {
+ "ResetType@Redfish.AllowableValues": [
+ "On",
+ "ForceOff"
+ ],
+ "target": "/redfish/v1/Chassis/System.Embedded.1/Actions/Chassis.Reset"
+ }
+ },
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "AssetTag": null,
+ "ChassisType": "RackMount",
+ "Description": "It represents the properties for physical components for any system.It represent racks, rackmount servers, blades, standalone, modular systems,enclosures, and all other containers.The non-cpu/device centric parts of the schema are all accessed either directly or indirectly through this resource.",
+ "Id": "System.Embedded.1",
+ "IndicatorLED": "Off",
+ "Links": {
+ "ComputerSystems": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1"
+ }
+ ],
+ "ComputerSystems@odata.count": 1,
+ "Contains": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/Enclosure.Internal.0-1:RAID.Integrated.1-1"
+ }
+ ],
+ "Contains@odata.count": 1,
+ "CooledBy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8B"
+ }
+ ],
+ "CooledBy@odata.count": 16,
+ "Drives": [],
+ "Drives@odata.count": 0,
+ "ManagedBy": [
+ {
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1"
+ }
+ ],
+ "ManagedBy@odata.count": 1,
+ "ManagersInChassis": [
+ {
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1"
+ }
+ ],
+ "ManagersInChassis@odata.count": 1,
+ "PCIeDevices": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/216-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-31"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-17"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/3-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-28"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-23"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/25-0"
+ }
+ ],
+ "PCIeDevices@odata.count": 8,
+ "PoweredBy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2"
+ }
+ ],
+ "PoweredBy@odata.count": 2,
+ "Storage": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/RAID.Integrated.1-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/AHCI.Embedded.1-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/AHCI.Embedded.2-1"
+ }
+ ],
+ "Storage@odata.count": 3
+ },
+ "Location": {
+ "Info": ";;;;1",
+ "InfoFormat": "DataCenter;RoomName;Aisle;RackName;RackSlot",
+ "Placement": {
+ "Rack": "",
+ "Row": ""
+ },
+ "PostalAddress": {
+ "Building": "",
+ "Room": ""
+ }
+ },
+ "Manufacturer": "Dell Inc.",
+ "Model": "PowerEdge R640",
+ "Name": "Computer System Chassis",
+ "NetworkAdapters": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/NetworkAdapters"
+ },
+ "PartNumber": "0CRT1GA05",
+ "PhysicalSecurity": {
+ "IntrusionSensor": "Normal",
+ "IntrusionSensorNumber": 115,
+ "IntrusionSensorReArm": "Manual"
+ },
+ "Power": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power"
+ },
+ "PowerState": "On",
+ "SKU": "CLFV7M2",
+ "SerialNumber": "CNIVC007CV0803",
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ },
+ "Thermal": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Thermal"
+ }
+}
diff --git a/plugins/inputs/redfish/testdata/dell_chassisinvalid.json b/plugins/inputs/redfish/testdata/dell_chassisinvalid.json
new file mode 100644
index 0000000000000..c7789a69394b8
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/dell_chassisinvalid.json
@@ -0,0 +1,188 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Chassis.Chassis",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1",
+ "@odata.type": "#Chassis.v1_6_0.Chassis",
+ "Actions": {
+ "#Chassis.Reset": {
+ "ResetType@Redfish.AllowableValues": [
+ "On",
+ "ForceOff"
+ ],
+ "target": "/redfish/v1/Chassis/System.Embedded.1/Actions/Chassis.Reset"
+ }
+ },
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "AssetTag": null,
+ "ChassisType": "RackMount",
+ "Description": "It represents the properties for physical components for any system.It represent racks, rackmount servers, blades, standalone, modular systems,enclosures, and all other containers.The non-cpu/device centric parts of the schema are all accessed either directly or indirectly through this resource.",
+ "Id": "System.Embedded.1",
+ "IndicatorLED": "Off",
+ "Links": {
+ "ComputerSystems": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1"
+ }
+ ],
+ "ComputerSystems@odata.count": 1,
+ "Contains": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/Enclosure.Internal.0-1:RAID.Integrated.1-1"
+ }
+ ],
+ "Contains@odata.count": 1,
+ "CooledBy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8B"
+ }
+ ],
+ "CooledBy@odata.count": 16,
+ "Drives": [],
+ "Drives@odata.count": 0,
+ "ManagedBy": [
+ {
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1"
+ }
+ ],
+ "ManagedBy@odata.count": 1,
+ "ManagersInChassis": [
+ {
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1"
+ }
+ ],
+ "ManagersInChassis@odata.count": 1,
+ "PCIeDevices": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/216-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-31"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-17"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/3-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-28"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-23"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/25-0"
+ }
+ ],
+ "PCIeDevices@odata.count": 8,
+ "PoweredBy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2"
+ }
+ ],
+ "PoweredBy@odata.count": 2,
+ "Storage": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/RAID.Integrated.1-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/AHCI.Embedded.1-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/AHCI.Embedded.2-1"
+ }
+ ],
+ "Storage@odata.count": 3
+ },
+ "Location": {
+ "Info": ";;;;1",
+ "InfoFormat": "DataCenter;RoomName;Aisle;RackName;RackSlot",
+ "Placement": {
+ "Rack": "",
+ "Row": ""
+ },
+ "PostalAddress": {
+ "Building": "",
+ "Room": ""
+ }
+ },
+ "Manufacturer": "Dell Inc.",
+ "Model": "PowerEdge R640",
+ "Name": "Computer System Chassis",
+ "NetworkAdapters": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/NetworkAdapters"
+ },
+ "PartNumber": "0CRT1GA05",
+ "PhysicalSecurity": {
+ "IntrusionSensor": "Normal",
+ "IntrusionSensorNumber": 115,
+ "IntrusionSensorReArm": "Manual"
+ },
+ "Power": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power"
+ },
+ "PowerState": "On",
+ "SKU": "CLFV7M2",
+ "SerialNumber": "CNIVC007CV0803",
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ },
+ "Thermal": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Thermal"
+ }
+}
+{
diff --git a/plugins/inputs/redfish/testdata/dell_power.json b/plugins/inputs/redfish/testdata/dell_power.json
new file mode 100644
index 0000000000000..7f2b38baa6758
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/dell_power.json
@@ -0,0 +1,207 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power",
+ "@odata.type": "#Power.v1_5_0.Power",
+ "Description": "Power",
+ "Id": "Power",
+ "Name": "Power",
+ "PowerControl": [
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerControl",
+ "@odata.type": "#Power.v1_4_0.PowerControl",
+ "MemberId": "PowerControl",
+ "Name": "System Power Control",
+ "PowerAllocatedWatts": 1628,
+ "PowerAvailableWatts": 0,
+ "PowerCapacityWatts": 1628,
+ "PowerConsumedWatts": 429,
+ "PowerLimit": {
+ "CorrectionInMs": 0,
+ "LimitException": "HardPowerOff",
+ "LimitInWatts": 348
+ },
+ "PowerMetrics": {
+ "AverageConsumedWatts": 426,
+ "IntervalInMin": 1,
+ "MaxConsumedWatts": 436,
+ "MinConsumedWatts": 425
+ },
+ "PowerRequestedWatts": 704,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 2
+ }
+ ],
+ "PowerControl@odata.count": 1,
+ "PowerSupplies": [
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.1",
+ "@odata.type": "#Power.v1_5_0.PowerSupply",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "EfficiencyPercent": 0.9100000262260437,
+ "FirmwareVersion": "00.1B.53",
+ "HotPluggable": true,
+ "InputRanges": [
+ {
+ "InputType": "AC",
+ "MaximumFrequencyHz": 63,
+ "MaximumVoltage": 264,
+ "MinimumFrequencyHz": 47,
+ "MinimumVoltage": 90,
+ "OutputWattage": 750
+ }
+ ],
+ "InputRanges@odata.count": 1,
+ "LastPowerOutputWatts": null,
+ "LineInputVoltage": 206,
+ "LineInputVoltageType": "AC240V",
+ "Manufacturer": "Dell",
+ "MemberId": "PSU.Slot.1",
+ "Model": "PWR SPLY,750W,RDNT,ARTESYN ",
+ "Name": "PS1 Status",
+ "Oem": {
+ "Dell": {
+ "DellPowerSupply": {
+ "@odata.context": "/redfish/v1/$metadata#DellPowerSupply.DellPowerSupply",
+ "@odata.id": "/redfish/v1/Dell/Chassis/System.Embedded.1/Power/PowerSupplies/DellPowerSupply/PSU.Slot.1",
+ "@odata.type": "#DellPowerSupply.v1_0_0.DellPowerSupply",
+ "IsSwitchingSupply": true,
+ "Links": {
+ "DellPSNumericSensorCollection": [
+ {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellPSNumericSensor/iDRAC.Embedded.1%23PS1Current1"
+ }
+ ]
+ }
+ },
+ "DellPowerSupplyView": {
+ "@odata.context": "/redfish/v1/$metadata#DellPowerSupplyView.DellPowerSupplyView",
+ "@odata.id": "/redfish/v1/Dell/Chassis/System.Embedded.1/Power/PowerSupplies/DellPowerSupplyView/PSU.Slot.1",
+ "@odata.type": "#DellPowerSupplyView.v1_0_0.DellPowerSupplyView",
+ "DetailedState": "Presence Detected",
+ "Range1MaxInputPowerWatts": 900
+ }
+ }
+ },
+ "PartNumber": "0PJMDNA01",
+ "PowerCapacityWatts": 750,
+ "PowerInputWatts": 900,
+ "PowerOutputWatts": 203,
+ "PowerSupplyType": "AC",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SerialNumber": "PHARP0079G0049",
+ "SparePartNumber": "0PJMDNA01",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ],
+ "PowerSupplies@odata.count": 1,
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "Voltages": [
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Voltages/iDRAC.Embedded.1%23SystemBoardDIMMPG",
+ "@odata.type": "#Power.v1_3_0.Voltage",
+ "LowerThresholdCritical": null,
+ "LowerThresholdFatal": null,
+ "LowerThresholdNonCritical": null,
+ "MaxReadingRange": 0,
+ "MemberId": "iDRAC.Embedded.1#SystemBoardDIMMPG",
+ "MinReadingRange": 0,
+ "Name": "System Board DIMM PG",
+ "PhysicalContext": "SystemBoard",
+ "ReadingVolts": 1,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SensorNumber": 7,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Voltages/iDRAC.Embedded.1%23SystemBoardNDCPG",
+ "@odata.type": "#Power.v1_3_0.Voltage",
+ "LowerThresholdCritical": null,
+ "LowerThresholdFatal": null,
+ "LowerThresholdNonCritical": null,
+ "MaxReadingRange": 197,
+ "MemberId": "iDRAC.Embedded.1#SystemBoardNDCPG",
+ "MinReadingRange": 139,
+ "Name": "System Board NDC PG",
+ "PhysicalContext": "SystemBoard",
+ "ReadingVolts": 1,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SensorNumber": 8,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Voltages/iDRAC.Embedded.1%23SystemBoardPS1PGFAIL",
+ "@odata.type": "#Power.v1_3_0.Voltage",
+ "LowerThresholdCritical": null,
+ "LowerThresholdFatal": null,
+ "LowerThresholdNonCritical": null,
+ "MaxReadingRange": 197,
+ "MemberId": "iDRAC.Embedded.1#SystemBoardPS1PGFAIL",
+ "MinReadingRange": 139,
+ "Name": "System Board PS1 PG FAIL",
+ "PhysicalContext": "SystemBoard",
+ "ReadingVolts": 1,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SensorNumber": 9,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ }
+ ],
+ "Voltages@odata.count": 4
+}
diff --git a/plugins/inputs/redfish/testdata/dell_powerinvalid.json b/plugins/inputs/redfish/testdata/dell_powerinvalid.json
new file mode 100644
index 0000000000000..59f31503fa237
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/dell_powerinvalid.json
@@ -0,0 +1,207 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power",
+ "@odata.type": "#Power.v1_5_0.Power",
+ "Description": "Power",
+ "Id": "Power",
+ "Name": "Power",
+ "PowerControl": [
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerControl",
+ "@odata.type": "#Power.v1_4_0.PowerControl",
+ "MemberId": "PowerControl",
+ "Name": "System Power Control",
+ "PowerAllocatedWatts": 1628,
+ "PowerAvailableWatts": 0,
+ "PowerCapacityWatts": 1628,
+ "PowerConsumedWatts": 429,
+ "PowerLimit": {
+ "CorrectionInMs": 0,
+ "LimitException": "HardPowerOff",
+ "LimitInWatts": 348
+ },
+ "PowerMetrics": {
+ "AverageConsumedWatts": 426,
+ "IntervalInMin": 1,
+ "MaxConsumedWatts": 436,
+ "MinConsumedWatts": 425
+ },
+ "PowerRequestedWatts": 704,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 2
+ }
+ ],
+ "PowerControl@odata.count": 1,
+ "PowerSupplies": [
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.1",
+ "@odata.type": "#Power.v1_5_0.PowerSupply",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "EfficiencyPercent": 0.9100000262260437,
+ "FirmwareVersion": "00.1B.53",
+ "HotPluggable": true,
+ "InputRanges": [
+ {
+ "InputType": "AC",
+ "MaximumFrequencyHz": 63,
+ "MaximumVoltage": 264,
+ "MinimumFrequencyHz": 47,
+ "MinimumVoltage": 90,
+ "OutputWattage": 750
+ }
+ ],
+ "InputRanges@odata.count": 1,
+ "LastPowerOutputWatts": null,
+ "LineInputVoltage": 206,
+ "LineInputVoltageType": "AC240V",
+ "Manufacturer": "Dell",
+ "MemberId": "PSU.Slot.1",
+ "Model": "PWR SPLY,750W,RDNT,ARTESYN ",
+ "Name": PS1 Status,
+ "Oem": {
+ "Dell": {
+ "DellPowerSupply": {
+ "@odata.context": "/redfish/v1/$metadata#DellPowerSupply.DellPowerSupply",
+ "@odata.id": "/redfish/v1/Dell/Chassis/System.Embedded.1/Power/PowerSupplies/DellPowerSupply/PSU.Slot.1",
+ "@odata.type": "#DellPowerSupply.v1_0_0.DellPowerSupply",
+ "IsSwitchingSupply": true,
+ "Links": {
+ "DellPSNumericSensorCollection": [
+ {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellPSNumericSensor/iDRAC.Embedded.1%23PS1Current1"
+ }
+ ]
+ }
+ },
+ "DellPowerSupplyView": {
+ "@odata.context": "/redfish/v1/$metadata#DellPowerSupplyView.DellPowerSupplyView",
+ "@odata.id": "/redfish/v1/Dell/Chassis/System.Embedded.1/Power/PowerSupplies/DellPowerSupplyView/PSU.Slot.1",
+ "@odata.type": "#DellPowerSupplyView.v1_0_0.DellPowerSupplyView",
+ "DetailedState": "Presence Detected",
+ "Range1MaxInputPowerWatts": 900
+ }
+ }
+ },
+ "PartNumber": "0PJMDNA01",
+ "PowerCapacityWatts": 750,
+ "PowerInputWatts": 900,
+ "PowerOutputWatts": 203,
+ "PowerSupplyType": "AC",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SerialNumber": "PHARP0079G0049",
+ "SparePartNumber": "0PJMDNA01",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ],
+ "PowerSupplies@odata.count": 1,
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "Voltages": [
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Voltages/iDRAC.Embedded.1%23SystemBoardDIMMPG",
+ "@odata.type": "#Power.v1_3_0.Voltage",
+ "LowerThresholdCritical": null,
+ "LowerThresholdFatal": null,
+ "LowerThresholdNonCritical": null,
+ "MaxReadingRange": 0,
+ "MemberId": "iDRAC.Embedded.1#SystemBoardDIMMPG",
+ "MinReadingRange": 0,
+ "Name": "System Board DIMM PG",
+ "PhysicalContext": "SystemBoard",
+ "ReadingVolts": 1,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SensorNumber": 7,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Voltages/iDRAC.Embedded.1%23SystemBoardNDCPG",
+ "@odata.type": "#Power.v1_3_0.Voltage",
+ "LowerThresholdCritical": null,
+ "LowerThresholdFatal": null,
+ "LowerThresholdNonCritical": null,
+ "MaxReadingRange": 197,
+ "MemberId": "iDRAC.Embedded.1#SystemBoardNDCPG",
+ "MinReadingRange": 139,
+ "Name": "System Board NDC PG",
+ "PhysicalContext": "SystemBoard",
+ "ReadingVolts": 1,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SensorNumber": 8,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Voltages/iDRAC.Embedded.1%23SystemBoardPS1PGFAIL",
+ "@odata.type": "#Power.v1_3_0.Voltage",
+ "LowerThresholdCritical": null,
+ "LowerThresholdFatal": null,
+ "LowerThresholdNonCritical": null,
+ "MaxReadingRange": 197,
+ "MemberId": "iDRAC.Embedded.1#SystemBoardPS1PGFAIL",
+ "MinReadingRange": 139,
+ "Name": "System Board PS1 PG FAIL",
+ "PhysicalContext": "SystemBoard",
+ "ReadingVolts": 1,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SensorNumber": 9,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ }
+ ],
+ "Voltages@odata.count": 4
+}
diff --git a/plugins/inputs/redfish/testdata/dell_systems.json b/plugins/inputs/redfish/testdata/dell_systems.json
new file mode 100644
index 0000000000000..40de02595acc2
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/dell_systems.json
@@ -0,0 +1,329 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#ComputerSystem.ComputerSystem",
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1",
+ "@odata.type": "#ComputerSystem.v1_5_0.ComputerSystem",
+ "Actions": {
+ "#ComputerSystem.Reset": {
+ "ResetType@Redfish.AllowableValues": [
+ "On",
+ "ForceOff",
+ "ForceRestart",
+ "GracefulShutdown",
+ "PushPowerButton",
+ "Nmi"
+ ],
+ "target": "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
+ }
+ },
+ "AssetTag": "",
+ "Bios": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Bios"
+ },
+ "BiosVersion": "2.3.10",
+ "Boot": {
+ "BootOptions": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/BootOptions"
+ },
+ "BootOrder": [
+ "Boot0002",
+ "Boot0003",
+ "Boot0005",
+ "Boot0004"
+ ],
+ "BootOrder@odata.count": 4,
+ "BootSourceOverrideEnabled": "Once",
+ "BootSourceOverrideMode": "UEFI",
+ "BootSourceOverrideTarget": "None",
+ "BootSourceOverrideTarget@Redfish.AllowableValues": [
+ "None",
+ "Pxe",
+ "Floppy",
+ "Cd",
+ "Hdd",
+ "BiosSetup",
+ "Utilities",
+ "UefiTarget",
+ "SDCard",
+ "UefiHttp"
+ ],
+ "UefiTargetBootSourceOverride": ""
+ },
+ "Description": "Computer System which represents a machine (physical or virtual) and the local resources such as memory, cpu and other devices that can be accessed from that machine.",
+ "EthernetInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/EthernetInterfaces"
+ },
+ "HostName": "tpa-hostname",
+ "HostWatchdogTimer": {
+ "FunctionEnabled": true,
+ "Status": {
+ "State": "Enabled"
+ },
+ "TimeoutAction": "None"
+ },
+ "HostingRoles": [],
+ "HostingRoles@odata.count": 0,
+ "Id": "System.Embedded.1",
+ "IndicatorLED": "Off",
+ "Links": {
+ "Chassis": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "Chassis@odata.count": 1,
+ "CooledBy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8B"
+ }
+ ],
+ "CooledBy@odata.count": 16,
+ "ManagedBy": [
+ {
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1"
+ }
+ ],
+ "ManagedBy@odata.count": 1,
+ "Oem": {
+ "Dell": {
+ "BootOrder": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/BootSources"
+ },
+ "DellNumericSensorCollection": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellNumericSensorCollection"
+ },
+ "DellOSDeploymentService": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellOSDeploymentService"
+ },
+ "DellPresenceAndStatusSensorCollection": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellPresenceAndStatusSensorCollection"
+ },
+ "DellRaidService": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellRaidService"
+ },
+ "DellSensorCollection": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellSensorCollection"
+ },
+ "DellSoftwareInstallationService": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService"
+ }
+ }
+ },
+ "PoweredBy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2"
+ }
+ ],
+ "PoweredBy@odata.count": 2
+ },
+ "Manufacturer": "Dell Inc.",
+ "Memory": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Memory"
+ },
+ "MemorySummary": {
+ "MemoryMirroring": "System",
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ },
+ "TotalSystemMemoryGiB": 476.837376
+ },
+ "Model": "PowerEdge R640",
+ "Name": "System",
+ "NetworkInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/NetworkInterfaces"
+ },
+ "Oem": {
+ "Dell": {
+ "DellSystem": {
+ "@odata.context": "/redfish/v1/$metadata#DellSystem.DellSystem",
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellSystem/System.Embedded.1",
+ "@odata.type": "#DellSystem.v1_0_0.DellSystem",
+ "BIOSReleaseDate": "08/15/2019",
+ "BaseBoardChassisSlot": "NA",
+ "BatteryRollupStatus": "OK",
+ "BladeGeometry": "NotApplicable",
+ "CMCIP": null,
+ "CPURollupStatus": "OK",
+ "ChassisServiceTag": "CLFV7M2",
+ "ExpressServiceCode": "27417828170",
+ "FanRollupStatus": "OK",
+ "IntrusionRollupStatus": "OK",
+ "LicensingRollupStatus": "OK",
+ "MaxDIMMSlots": 24,
+ "MaxPCIeSlots": 3,
+ "NodeID": "CLFV7M2",
+ "PSRollupStatus": "OK",
+ "PowerCapEnabledState": "Disabled",
+ "StorageRollupStatus": "OK",
+ "SysMemPrimaryStatus": "OK",
+ "SystemGeneration": "14G Monolithic",
+ "SystemID": 1814,
+ "TempRollupStatus": "OK",
+ "UUID": "4c4c4544-004c-4610-8056-c3c04f374d32",
+ "VoltRollupStatus": "OK"
+ }
+ }
+ },
+ "PCIeDevices": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/216-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-31"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-17"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/3-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-28"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-23"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/25-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/24-0"
+ }
+ ],
+ "PCIeDevices@odata.count": 9,
+ "PCIeFunctions": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/216-0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/216-0-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-31-4"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-17-5"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-31-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/3-0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-28-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-23-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-28-4"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/25-0-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/25-0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/24-0-0"
+ }
+ ],
+ "PCIeFunctions@odata.count": 13,
+ "PartNumber": "0CRT1GA05",
+ "PowerState": "On",
+ "ProcessorSummary": {
+ "Count": 2,
+ "LogicalProcessorCount": 80,
+ "Model": "Intel(R) Xeon(R) Gold 6138 CPU @ 2.00GHz",
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ }
+ },
+ "Processors": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Processors"
+ },
+ "SKU": "CLFV7M2",
+ "SecureBoot": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/SecureBoot"
+ },
+ "SerialNumber": "CNIVC007CV0803",
+ "SimpleStorage": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/SimpleStorage/Controllers"
+ },
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ },
+ "Storage": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage"
+ },
+ "SystemType": "Physical",
+ "TrustedModules": [
+ {
+ "FirmwareVersion": "Unknown",
+ "Status": {
+ "State": "Disabled"
+ }
+ }
+ ],
+ "UUID": "4c4c4544-004c-4610-8056-c3c04f374d32"
+}
diff --git a/plugins/inputs/redfish/testdata/dell_systemsinvalid.json b/plugins/inputs/redfish/testdata/dell_systemsinvalid.json
new file mode 100644
index 0000000000000..f3fdc0db8f7d8
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/dell_systemsinvalid.json
@@ -0,0 +1,329 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#ComputerSystem.ComputerSystem",
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1",
+ "@odata.type": "#ComputerSystem.v1_5_0.ComputerSystem",
+ "Actions": {
+ "#ComputerSystem.Reset": {
+ "ResetType@Redfish.AllowableValues": [
+ "On",
+ "ForceOff",
+ "ForceRestart",
+ "GracefulShutdown",
+ "PushPowerButton",
+ "Nmi"
+ ],
+ "target": "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
+ }
+ },
+ "AssetTag": "",
+ "Bios": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Bios"
+ },
+ "BiosVersion": "2.3.10",
+ "Boot": {
+ "BootOptions": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/BootOptions"
+ },
+ "BootOrder": [
+ "Boot0002",
+ "Boot0003",
+ "Boot0005",
+ "Boot0004"
+ ],
+ "BootOrder@odata.count": 4,
+ "BootSourceOverrideEnabled": "Once",
+ "BootSourceOverrideMode": "UEFI",
+ "BootSourceOverrideTarget": "None",
+ "BootSourceOverrideTarget@Redfish.AllowableValues": [
+ "None",
+ "Pxe",
+ "Floppy",
+ "Cd",
+ "Hdd",
+ "BiosSetup",
+ "Utilities",
+ "UefiTarget",
+ "SDCard",
+ "UefiHttp"
+ ],
+ "UefiTargetBootSourceOverride": ""
+ },
+ "Description": "Computer System which represents a machine (physical or virtual) and the local resources such as memory, cpu and other devices that can be accessed from that machine.",
+ "EthernetInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/EthernetInterfaces"
+ }
+ "HostName": "tpa-hostname",
+ "HostWatchdogTimer": {
+ "FunctionEnabled": true,
+ "Status": {
+ "State": "Enabled"
+ },
+ "TimeoutAction": "None"
+ },
+ "HostingRoles": [],
+ "HostingRoles@odata.count": 0,
+ "Id": "System.Embedded.1",
+ "IndicatorLED": "Off",
+ "Links": {
+ "Chassis": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "Chassis@odata.count": 1,
+ "CooledBy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7B"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8B"
+ }
+ ],
+ "CooledBy@odata.count": 16,
+ "ManagedBy": [
+ {
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1"
+ }
+ ],
+ "ManagedBy@odata.count": 1,
+ "Oem": {
+ "Dell": {
+ "BootOrder": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/BootSources"
+ },
+ "DellNumericSensorCollection": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellNumericSensorCollection"
+ },
+ "DellOSDeploymentService": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellOSDeploymentService"
+ },
+ "DellPresenceAndStatusSensorCollection": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellPresenceAndStatusSensorCollection"
+ },
+ "DellRaidService": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellRaidService"
+ },
+ "DellSensorCollection": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellSensorCollection"
+ },
+ "DellSoftwareInstallationService": {
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService"
+ }
+ }
+ },
+ "PoweredBy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2"
+ }
+ ],
+ "PoweredBy@odata.count": 2
+ },
+ "Manufacturer": "Dell Inc.",
+ "Memory": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Memory"
+ },
+ "MemorySummary": {
+ "MemoryMirroring": "System",
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ },
+ "TotalSystemMemoryGiB": 476.837376
+ },
+ "Model": "PowerEdge R640",
+ "Name": "System",
+ "NetworkInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/NetworkInterfaces"
+ },
+ "Oem": {
+ "Dell": {
+ "DellSystem": {
+ "@odata.context": "/redfish/v1/$metadata#DellSystem.DellSystem",
+ "@odata.id": "/redfish/v1/Dell/Systems/System.Embedded.1/DellSystem/System.Embedded.1",
+ "@odata.type": "#DellSystem.v1_0_0.DellSystem",
+ "BIOSReleaseDate": "08/15/2019",
+ "BaseBoardChassisSlot": "NA",
+ "BatteryRollupStatus": "OK",
+ "BladeGeometry": "NotApplicable",
+ "CMCIP": null,
+ "CPURollupStatus": "OK",
+ "ChassisServiceTag": "CLFV7M2",
+ "ExpressServiceCode": "27417828170",
+ "FanRollupStatus": "OK",
+ "IntrusionRollupStatus": "OK",
+ "LicensingRollupStatus": "OK",
+ "MaxDIMMSlots": 24,
+ "MaxPCIeSlots": 3,
+ "NodeID": "CLFV7M2",
+ "PSRollupStatus": "OK",
+ "PowerCapEnabledState": "Disabled",
+ "StorageRollupStatus": "OK",
+ "SysMemPrimaryStatus": "OK",
+ "SystemGeneration": "14G Monolithic",
+ "SystemID": 1814,
+ "TempRollupStatus": "OK",
+ "UUID": "4c4c4544-004c-4610-8056-c3c04f374d32",
+ "VoltRollupStatus": "OK"
+ }
+ }
+ },
+ "PCIeDevices": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/216-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-31"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-17"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/3-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-28"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/0-23"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/25-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeDevice/24-0"
+ }
+ ],
+ "PCIeDevices@odata.count": 9,
+ "PCIeFunctions": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/216-0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/216-0-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-31-4"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-17-5"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-31-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/3-0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-28-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-23-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/0-28-4"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/25-0-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/25-0-0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/PCIeFunction/24-0-0"
+ }
+ ],
+ "PCIeFunctions@odata.count": 13,
+ "PartNumber": "0CRT1GA05",
+ "PowerState": "On",
+ "ProcessorSummary": {
+ "Count": 2,
+ "LogicalProcessorCount": 80,
+ "Model": "Intel(R) Xeon(R) Gold 6138 CPU @ 2.00GHz",
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ }
+ },
+ "Processors": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Processors"
+ },
+ "SKU": "CLFV7M2",
+ "SecureBoot": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/SecureBoot"
+ },
+ "SerialNumber": "CNIVC007CV0803",
+ "SimpleStorage": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/SimpleStorage/Controllers"
+ },
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ },
+ "Storage": {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage"
+ },
+ "SystemType": "Physical",
+ "TrustedModules": [
+ {
+ "FirmwareVersion": "Unknown",
+ "Status": {
+ "State": "Disabled"
+ }
+ }
+ ],
+ "UUID": "4c4c4544-004c-4610-8056-c3c04f374d32"
+}
diff --git a/plugins/inputs/redfish/testdata/dell_thermal.json b/plugins/inputs/redfish/testdata/dell_thermal.json
new file mode 100644
index 0000000000000..07e7fe4c29dbd
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/dell_thermal.json
@@ -0,0 +1,589 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Thermal.Thermal",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Thermal",
+ "@odata.type": "#Thermal.v1_4_0.Thermal",
+ "Description": "Represents the properties for Temperature and Cooling",
+ "Fans": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan1A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.1A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan1A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17760,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan1B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.1B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan1B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15360,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan2A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.2A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan2A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17880,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan2B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.2B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan2B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15120,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan3A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.3A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan3A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 18000,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan3B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.3B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan3B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15600,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan4A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.4A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan4A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17280,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan4B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.4B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan4B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15360,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan5A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.5A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan5A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17640,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan5B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.5B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan5B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15600,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan6A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.6A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan6A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17760,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan6B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.6B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan6B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15600,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan7A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.7A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan7A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17400,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan7B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.7B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan7B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15720,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan8A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.8A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan8A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 18000,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan8B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.8B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan8B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15840,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ }
+ ],
+ "Fans@odata.count": 16,
+ "Id": "Thermal",
+ "Name": "Thermal",
+ "Redundancy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Thermal/Redundancy/iDRAC.Embedded.1%23SystemBoardFanRedundancy",
+ "@odata.type": "#Redundancy.v1_3_0.Redundancy",
+ "MaxNumSupported": 0,
+ "MemberId": "iDRAC.Embedded.1#SystemBoardFanRedundancy",
+ "MinNumNeeded": 0,
+ "Mode": "N+m",
+ "Name": "System Board Fan Redundancy",
+ "RedundancyEnabled": true,
+ "RedundancySet": [],
+ "RedundancySet@odata.count": 0,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ],
+ "Redundancy@odata.count": 1,
+ "Temperatures": [
+ {
+ "@odata.context": "/redfish/v1/$metadata#Thermal.Thermal",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Temperatures/iDRAC.Embedded.1%23CPU1Temp",
+ "@odata.type": "#Thermal.v1_4_0.Temperature",
+ "LowerThresholdCritical": 3,
+ "LowerThresholdFatal": 3,
+ "LowerThresholdNonCritical": null,
+ "MaxReadingRangeTemp": 93,
+ "MemberId": "iDRAC.Embedded.1#CPU1Temp",
+ "MinReadingRangeTemp": 3,
+ "Name": "CPU1 Temp",
+ "PhysicalContext": "CPU",
+ "ReadingCelsius": 40,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Processors/CPU.Socket.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SensorNumber": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": 93,
+ "UpperThresholdFatal": 93,
+ "UpperThresholdNonCritical": null
+ }
+ ],
+ "Temperatures@odata.count": 4
+}
diff --git a/plugins/inputs/redfish/testdata/dell_thermalinvalid.json b/plugins/inputs/redfish/testdata/dell_thermalinvalid.json
new file mode 100644
index 0000000000000..139322ad1378e
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/dell_thermalinvalid.json
@@ -0,0 +1,589 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Thermal.Thermal",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Thermal",
+ "@odata.type": "#Thermal.v1_4_0.Thermal",
+ "Description": "Represents the properties for Temperature and Cooling",
+ "Fans": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan1A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.1A",
+ "MinReadingRange": 600,
+ "Name": System Board Fan1A,
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17760,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.1B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan1B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.1B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan1B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15360,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan2A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.2A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan2A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17880,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.2B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan2B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.2B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan2B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15120,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan3A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.3A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan3A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 18000,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.3B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan3B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.3B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan3B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15600,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan4A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.4A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan4A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17280,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.4B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan4B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.4B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan4B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15360,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan5A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.5A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan5A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17640,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.5B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan5B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.5B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan5B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15600,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan6A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.6A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan6A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17760,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.6B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan6B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.6B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan6B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15600,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan7A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.7A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan7A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 17400,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.7B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan7B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.7B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan7B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15720,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8A",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan8A",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.8A",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan8A",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 18000,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Fans/0x17%7C%7CFan.Embedded.8B",
+ "@odata.type": "#Thermal.v1_4_0.Fan",
+ "Assembly": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Assembly"
+ },
+ "FanName": "System Board Fan8B",
+ "LowerThresholdCritical": 600,
+ "LowerThresholdFatal": 600,
+ "LowerThresholdNonCritical": 960,
+ "MaxReadingRange": null,
+ "MemberId": "0x17||Fan.Embedded.8B",
+ "MinReadingRange": 600,
+ "Name": "System Board Fan8B",
+ "PhysicalContext": "SystemBoard",
+ "Reading": 15840,
+ "ReadingUnits": "RPM",
+ "Redundancy": [],
+ "Redundancy@odata.count": 0,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": null,
+ "UpperThresholdFatal": null,
+ "UpperThresholdNonCritical": null
+ }
+ ],
+ "Fans@odata.count": 16,
+ "Id": "Thermal",
+ "Name": "Thermal",
+ "Redundancy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Thermal/Redundancy/iDRAC.Embedded.1%23SystemBoardFanRedundancy",
+ "@odata.type": "#Redundancy.v1_3_0.Redundancy",
+ "MaxNumSupported": 0,
+ "MemberId": "iDRAC.Embedded.1#SystemBoardFanRedundancy",
+ "MinNumNeeded": 0,
+ "Mode": "N+m",
+ "Name": "System Board Fan Redundancy",
+ "RedundancyEnabled": true,
+ "RedundancySet": [],
+ "RedundancySet@odata.count": 0,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ],
+ "Redundancy@odata.count": 1,
+ "Temperatures": [
+ {
+ "@odata.context": "/redfish/v1/$metadata#Thermal.Thermal",
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/Sensors/Temperatures/iDRAC.Embedded.1%23CPU1Temp",
+ "@odata.type": "#Thermal.v1_4_0.Temperature",
+ "LowerThresholdCritical": 3,
+ "LowerThresholdFatal": 3,
+ "LowerThresholdNonCritical": null,
+ "MaxReadingRangeTemp": 93,
+ "MemberId": "iDRAC.Embedded.1#CPU1Temp",
+ "MinReadingRangeTemp": 3,
+ "Name": "CPU1 Temp",
+ "PhysicalContext": "CPU",
+ "ReadingCelsius": 40,
+ "RelatedItem": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Processors/CPU.Socket.1"
+ }
+ ],
+ "RelatedItem@odata.count": 1,
+ "SensorNumber": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": 93,
+ "UpperThresholdFatal": 93,
+ "UpperThresholdNonCritical": null
+ }
+ ],
+ "Temperatures@odata.count": 4
+}
diff --git a/plugins/inputs/redfish/testdata/hp_chassis.json b/plugins/inputs/redfish/testdata/hp_chassis.json
new file mode 100644
index 0000000000000..a7da7face3f37
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/hp_chassis.json
@@ -0,0 +1,8 @@
+{
+ "Power": {
+ "@odata.id": "/redfish/v1/Chassis/1/Power"
+ },
+ "Thermal": {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal"
+ }
+}
diff --git a/plugins/inputs/redfish/testdata/hp_power.json b/plugins/inputs/redfish/testdata/hp_power.json
new file mode 100644
index 0000000000000..cc00b063fe6da
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/hp_power.json
@@ -0,0 +1,144 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.etag": "W/\"2E43EED0\"",
+ "@odata.id": "/redfish/v1/Chassis/1/Power",
+ "@odata.type": "#Power.v1_3_0.Power",
+ "Id": "Power",
+ "Name": "PowerMetrics",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpePowerMetricsExt.HpePowerMetricsExt",
+ "@odata.type": "#HpePowerMetricsExt.v2_2_0.HpePowerMetricsExt",
+ "BrownoutRecoveryEnabled": true,
+ "HasCpuPowerMetering": true,
+ "HasDimmPowerMetering": true,
+ "HasGpuPowerMetering": false,
+ "HasPowerMetering": true,
+ "HighEfficiencyMode": "Balanced",
+ "Links": {
+ "PowerMeter": {
+ "@odata.id": "/redfish/v1/Chassis/1/Power/PowerMeter"
+ },
+ "FastPowerMeter": {
+ "@odata.id": "/redfish/v1/Chassis/1/Power/FastPowerMeter"
+ },
+ "FederatedGroupCapping": {
+ "@odata.id": "/redfish/v1/Chassis/1/Power/FederatedGroupCapping"
+ }
+ },
+ "MinimumSafelyAchievableCap": null,
+ "MinimumSafelyAchievableCapValid": false,
+ "SNMPPowerThresholdAlert": {
+ "DurationInMin": 0,
+ "ThresholdWatts": 0,
+ "Trigger": "Disabled"
+ }
+ }
+ },
+ "PowerControl": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerControl/0",
+ "MemberId": "0",
+ "PowerCapacityWatts": 1600,
+ "PowerConsumedWatts": 221,
+ "PowerMetrics": {
+ "AverageConsumedWatts": 221,
+ "IntervalInMin": 20,
+ "MaxConsumedWatts": 252,
+ "MinConsumedWatts": 220
+ }
+ }
+ ],
+ "PowerSupplies": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerSupplies/0",
+ "FirmwareVersion": "1.02",
+ "LastPowerOutputWatts": 0,
+ "LineInputVoltage": 205,
+ "LineInputVoltageType": "ACHighLine",
+ "Manufacturer": "CHCNY",
+ "MemberId": "0",
+ "Model": "865414-B21",
+ "Name": "HpeServerPowerSupply",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerPowerSupply.HpeServerPowerSupply",
+ "@odata.type": "#HpeServerPowerSupply.v2_0_0.HpeServerPowerSupply",
+ "AveragePowerOutputWatts": 0,
+ "BayNumber": 1,
+ "HotplugCapable": true,
+ "MaxPowerOutputWatts": 143,
+ "Mismatched": false,
+ "PowerSupplyStatus": {
+ "State": "Ok"
+ },
+ "iPDUCapable": false
+ }
+ },
+ "PowerCapacityWatts": 800,
+ "PowerSupplyType": "AC",
+ "SerialNumber": "5WEBP0B8JAQ2K9",
+ "SparePartNumber": "866730-001",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerSupplies/1",
+ "FirmwareVersion": "1.02",
+ "LastPowerOutputWatts": 90,
+ "LineInputVoltage": 205,
+ "LineInputVoltageType": "ACHighLine",
+ "Manufacturer": "CHCNY",
+ "MemberId": "1",
+ "Model": "865414-B21",
+ "Name": "HpeServerPowerSupply",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerPowerSupply.HpeServerPowerSupply",
+ "@odata.type": "#HpeServerPowerSupply.v2_0_0.HpeServerPowerSupply",
+ "AveragePowerOutputWatts": 90,
+ "BayNumber": 2,
+ "HotplugCapable": true,
+ "MaxPowerOutputWatts": 99,
+ "Mismatched": false,
+ "PowerSupplyStatus": {
+ "State": "Ok"
+ },
+ "iPDUCapable": false
+ }
+ },
+ "PowerCapacityWatts": 800,
+ "PowerSupplyType": "AC",
+ "SerialNumber": "5WEBP0B8JAQ2KL",
+ "SparePartNumber": "866730-001",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ],
+ "Redundancy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#Redundancy/0",
+ "MaxNumSupported": 2,
+ "MemberId": "0",
+ "MinNumNeeded": 2,
+ "Mode": "Failover",
+ "Name": "PowerSupply Redundancy Group 1",
+ "RedundancySet": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerSupplies/0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerSupplies/1"
+ }
+ ],
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ]
+}
diff --git a/plugins/inputs/redfish/testdata/hp_powerinvalid.json b/plugins/inputs/redfish/testdata/hp_powerinvalid.json
new file mode 100644
index 0000000000000..b3e8a7f7a1a87
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/hp_powerinvalid.json
@@ -0,0 +1,145 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Power.Power",
+ "@odata.etag": "W/\"2E43EED0\"",
+ "@odata.id": "/redfish/v1/Chassis/1/Power",
+ "@odata.type": "#Power.v1_3_0.Power",
+ "Id": "Power",
+ "Name": "PowerMetrics",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpePowerMetricsExt.HpePowerMetricsExt",
+ "@odata.type": "#HpePowerMetricsExt.v2_2_0.HpePowerMetricsExt",
+ "BrownoutRecoveryEnabled": true,
+ "HasCpuPowerMetering": true,
+ "HasDimmPowerMetering": true,
+ "HasGpuPowerMetering": false,
+ "HasPowerMetering": true,
+ "HighEfficiencyMode": "Balanced",
+ "Links": {
+ "PowerMeter": {
+ "@odata.id": "/redfish/v1/Chassis/1/Power/PowerMeter"
+ },
+ "FastPowerMeter": {
+ "@odata.id": "/redfish/v1/Chassis/1/Power/FastPowerMeter"
+ },
+ "FederatedGroupCapping": {
+ "@odata.id": "/redfish/v1/Chassis/1/Power/FederatedGroupCapping"
+ }
+ },
+ "MinimumSafelyAchievableCap": null,
+ "MinimumSafelyAchievableCapValid": false,
+ "SNMPPowerThresholdAlert": {
+ "DurationInMin": 0,
+ "ThresholdWatts": 0,
+ "Trigger": "Disabled"
+ }
+ }
+ },
+ "PowerControl": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerControl/0",
+ "MemberId": "0",
+ "PowerCapacityWatts": 1600,
+ "PowerConsumedWatts": 221,
+ "PowerMetrics": {
+ "AverageConsumedWatts": 221,
+ "IntervalInMin": 20,
+ "MaxConsumedWatts": 252,
+ "MinConsumedWatts": 220
+ }
+ }
+ ],
+ "PowerSupplies": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerSupplies/0",
+ "FirmwareVersion": "1.02",
+ "LastPowerOutputWatts": 0,
+ "LineInputVoltage": 205,
+ "LineInputVoltageType": "ACHighLine",
+ "Manufacturer": "CHCNY",
+ "MemberId": "0",
+ "Model": "865414-B21",
+ "Name": "HpeServerPowerSupply",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerPowerSupply.HpeServerPowerSupply",
+ "@odata.type": "#HpeServerPowerSupply.v2_0_0.HpeServerPowerSupply",
+ "AveragePowerOutputWatts": 0,
+ "BayNumber": 1,
+ "HotplugCapable": true,
+ "MaxPowerOutputWatts": 143,
+ "Mismatched": false,
+ "PowerSupplyStatus": {
+ "State": "Ok"
+ },
+ "iPDUCapable": false
+ }
+ },
+ "PowerCapacityWatts": 800,
+ "PowerSupplyType": "AC",
+ "SerialNumber": "5WEBP0B8JAQ2K9",
+ "SparePartNumber": "866730-001",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerSupplies/1",
+ "FirmwareVersion": "1.02",
+ "LastPowerOutputWatts": 90,
+ "LineInputVoltage": 205,
+ "LineInputVoltageType": "ACHighLine",
+ "Manufacturer": "CHCNY",
+ "MemberId": "1",
+ "Model": "865414-B21",
+ "Name": "HpeServerPowerSupply",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerPowerSupply.HpeServerPowerSupply",
+ "@odata.type": "#HpeServerPowerSupply.v2_0_0.HpeServerPowerSupply",
+ "AveragePowerOutputWatts": 90,
+ "BayNumber": 2,
+ "HotplugCapable": true,
+ "MaxPowerOutputWatts": 99,
+ "Mismatched": false,
+ "PowerSupplyStatus": {
+ "State": "Ok"
+ },
+ "iPDUCapable": false
+ }
+ },
+ "PowerCapacityWatts": 800,
+ "PowerSupplyType": "AC",
+ "SerialNumber": "5WEBP0B8JAQ2KL",
+ "SparePartNumber": "866730-001",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ],
+ "Redundancy": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#Redundancy/0",
+ "MaxNumSupported": 2,
+ "MemberId": "0",
+ "MinNumNeeded": 2,
+ "Mode": "Failover",
+ "Name": "PowerSupply Redundancy Group 1",
+ "RedundancySet": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerSupplies/0"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Power#PowerSupplies/1"
+ }
+ ],
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ]
+}
+{
diff --git a/plugins/inputs/redfish/testdata/hp_systems.json b/plugins/inputs/redfish/testdata/hp_systems.json
new file mode 100644
index 0000000000000..2280f1bfa0343
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/hp_systems.json
@@ -0,0 +1,319 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#ComputerSystem.ComputerSystem",
+ "@odata.etag": "W/\"43E302D1\"",
+ "@odata.id": "/redfish/v1/Systems/1/",
+ "@odata.type": "#ComputerSystem.v1_4_0.ComputerSystem",
+ "Id": "1",
+ "Actions": {
+ "#ComputerSystem.Reset": {
+ "ResetType@Redfish.AllowableValues": [
+ "On",
+ "ForceOff",
+ "ForceRestart",
+ "Nmi",
+ "PushPowerButton"
+ ],
+ "target": "/redfish/v1/Systems/1/Actions/ComputerSystem.Reset/"
+ }
+ },
+ "AssetTag": "",
+ "Bios": {
+ "@odata.id": "/redfish/v1/systems/1/bios/"
+ },
+ "BiosVersion": "U32 v2.10 (05/21/2019)",
+ "Boot": {
+ "BootSourceOverrideEnabled": "Disabled",
+ "BootSourceOverrideMode": "UEFI",
+ "BootSourceOverrideTarget": "None",
+ "BootSourceOverrideTarget@Redfish.AllowableValues": [
+ "None",
+ "Cd",
+ "Hdd",
+ "Usb",
+ "SDCard",
+ "Utilities",
+ "Diags",
+ "BiosSetup",
+ "Pxe",
+ "UefiShell",
+ "UefiHttp",
+ "UefiTarget"
+ ],
+ "UefiTargetBootSourceOverride": "None",
+ "UefiTargetBootSourceOverride@Redfish.AllowableValues": [
+ "HD(1,GPT,0E3A0969-7AFC-4B55-8B24-AEFA09F33D2D,0x800,0x12C000)/\\EFI\\redhat\\shimx64.efi",
+ "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)",
+ "PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(8030E0421B1C,0x1)/IPv4(0.0.0.0)/Uri()",
+ "PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(8030E0421B1C,0x1)/IPv4(0.0.0.0)",
+ "PciRoot(0x3)/Pci(0x2,0x0)/Pci(0x0,0x0)/MAC(48DF37959430,0x1)/IPv4(0.0.0.0)/Uri()",
+ "PciRoot(0x3)/Pci(0x2,0x0)/Pci(0x0,0x0)/MAC(48DF37959430,0x1)/IPv4(0.0.0.0)",
+ "PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(8030E0421B1C,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)/Uri()",
+ "PciRoot(0x3)/Pci(0x2,0x0)/Pci(0x0,0x0)/MAC(48DF37959430,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)/Uri()",
+ "PciRoot(0x3)/Pci(0x2,0x0)/Pci(0x0,0x0)/MAC(48DF37959430,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)",
+ "PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(8030E0421B1C,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)",
+ "PciRoot(0x9)/Pci(0x0,0x0)/Pci(0x0,0x0)/MAC(B88303866AE8,0x1)/IPv4(0.0.0.0)/Uri()",
+ "PciRoot(0x9)/Pci(0x0,0x0)/Pci(0x0,0x0)/MAC(B88303866AE8,0x1)/IPv4(0.0.0.0)",
+ "PciRoot(0x9)/Pci(0x0,0x0)/Pci(0x0,0x0)/MAC(B88303866AE8,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)/Uri()",
+ "PciRoot(0x9)/Pci(0x0,0x0)/Pci(0x0,0x0)/MAC(B88303866AE8,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)",
+ "PciRoot(0x0)/Pci(0x14,0x0)/USB(0x13,0x0)",
+ "PciRoot(0x3)/Pci(0x0,0x0)/Pci(0x0,0x0)/Scsi(0x0,0x0)"
+ ]
+ },
+ "EthernetInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/1/EthernetInterfaces/"
+ },
+ "HostName": "tpa-hostname",
+ "IndicatorLED": "Off",
+ "Links": {
+ "ManagedBy": [
+ {
+ "@odata.id": "/redfish/v1/Managers/1/"
+ }
+ ],
+ "Chassis": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/"
+ }
+ ]
+ },
+ "LogServices": {
+ "@odata.id": "/redfish/v1/Systems/1/LogServices/"
+ },
+ "Manufacturer": "HPE",
+ "Memory": {
+ "@odata.id": "/redfish/v1/Systems/1/Memory/"
+ },
+ "MemoryDomains": {
+ "@odata.id": "/redfish/v1/Systems/1/MemoryDomains/"
+ },
+ "MemorySummary": {
+ "Status": {
+ "HealthRollup": "OK"
+ },
+ "TotalSystemMemoryGiB": 384,
+ "TotalSystemPersistentMemoryGiB": 0
+ },
+ "Model": "ProLiant DL360 Gen10",
+ "Name": "Computer System",
+ "NetworkInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/1/NetworkInterfaces/"
+ },
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeComputerSystemExt.HpeComputerSystemExt",
+ "@odata.type": "#HpeComputerSystemExt.v2_6_1.HpeComputerSystemExt",
+ "Actions": {
+ "#HpeComputerSystemExt.PowerButton": {
+ "PushType@Redfish.AllowableValues": [
+ "Press",
+ "PressAndHold"
+ ],
+ "target": "/redfish/v1/Systems/1/Actions/Oem/Hpe/HpeComputerSystemExt.PowerButton/"
+ },
+ "#HpeComputerSystemExt.SecureSystemErase": {
+ "target": "/redfish/v1/Systems/1/Actions/Oem/Hpe/HpeComputerSystemExt.SecureSystemErase/"
+ },
+ "#HpeComputerSystemExt.SystemReset": {
+ "ResetType@Redfish.AllowableValues": [
+ "ColdBoot",
+ "AuxCycle"
+ ],
+ "target": "/redfish/v1/Systems/1/Actions/Oem/Hpe/HpeComputerSystemExt.SystemReset/"
+ }
+ },
+ "AggregateHealthStatus": {
+ "AgentlessManagementService": "Unavailable",
+ "BiosOrHardwareHealth": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "FanRedundancy": "Redundant",
+ "Fans": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "Memory": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "Network": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "PowerSupplies": {
+ "PowerSuppliesMismatch": false,
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "PowerSupplyRedundancy": "Redundant",
+ "Processors": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "SmartStorageBattery": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "Storage": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "Temperatures": {
+ "Status": {
+ "Health": "OK"
+ }
+ }
+ },
+ "Bios": {
+ "Backup": {
+ "Date": "05/21/2019",
+ "Family": "U32",
+ "VersionString": "U32 v2.10 (05/21/2019)"
+ },
+ "Current": {
+ "Date": "05/21/2019",
+ "Family": "U32",
+ "VersionString": "U32 v2.10 (05/21/2019)"
+ },
+ "UefiClass": 2
+ },
+ "CurrentPowerOnTimeSeconds": 29290,
+ "DeviceDiscoveryComplete": {
+ "AMSDeviceDiscovery": "NoAMS",
+ "DeviceDiscovery": "vMainDeviceDiscoveryComplete",
+ "SmartArrayDiscovery": "Complete"
+ },
+ "ElapsedEraseTimeInMinutes": 0,
+ "EndOfPostDelaySeconds": null,
+ "EstimatedEraseTimeInMinutes": 0,
+ "IntelligentProvisioningAlwaysOn": true,
+ "IntelligentProvisioningIndex": 9,
+ "IntelligentProvisioningLocation": "System Board",
+ "IntelligentProvisioningVersion": "3.30.213",
+ "IsColdBooting": false,
+ "Links": {
+ "PCIDevices": {
+ "@odata.id": "/redfish/v1/Systems/1/PCIDevices/"
+ },
+ "PCISlots": {
+ "@odata.id": "/redfish/v1/Systems/1/PCISlots/"
+ },
+ "NetworkAdapters": {
+ "@odata.id": "/redfish/v1/Systems/1/BaseNetworkAdapters/"
+ },
+ "SmartStorage": {
+ "@odata.id": "/redfish/v1/Systems/1/SmartStorage/"
+ },
+ "USBPorts": {
+ "@odata.id": "/redfish/v1/Systems/1/USBPorts/"
+ },
+ "USBDevices": {
+ "@odata.id": "/redfish/v1/Systems/1/USBDevices/"
+ },
+ "EthernetInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/1/EthernetInterfaces/"
+ },
+ "WorkloadPerformanceAdvisor": {
+ "@odata.id": "/redfish/v1/Systems/1/WorkloadPerformanceAdvisor/"
+ }
+ },
+ "PCAPartNumber": "847479-002",
+ "PCASerialNumber": "PWUFL0ARHCF3KR",
+ "PostDiscoveryCompleteTimeStamp": "2020-02-24T02:43:43Z",
+ "PostDiscoveryMode": null,
+ "PostMode": null,
+ "PostState": "FinishedPost",
+ "PowerAllocationLimit": 1600,
+ "PowerAutoOn": "Restore",
+ "PowerOnDelay": "Minimum",
+ "PowerOnMinutes": 95715,
+ "PowerRegulatorMode": "Max",
+ "PowerRegulatorModesSupported": [
+ "OSControl",
+ "Dynamic",
+ "Max",
+ "Min"
+ ],
+ "ProcessorJitterControl": {
+ "ConfiguredFrequencyLimitMHz": 0,
+ "Mode": "Disabled"
+ },
+ "SMBIOS": {
+ "extref": "/smbios"
+ },
+ "ServerFQDN": "TPAVCPAR088S4.vici.verizon.com",
+ "SmartStorageConfig": [
+ {
+ "@odata.id": "/redfish/v1/systems/1/smartstorageconfig/"
+ }
+ ],
+ "SystemROMAndiLOEraseComponentStatus": {
+ "BIOSSettingsEraseStatus": "Idle",
+ "iLOSettingsEraseStatus": "Idle"
+ },
+ "SystemROMAndiLOEraseStatus": "Idle",
+ "SystemUsage": {
+ "AvgCPU0Freq": 0,
+ "AvgCPU1Freq": 6,
+ "CPU0Power": 50,
+ "CPU1Power": 51,
+ "CPUICUtil": 0,
+ "CPUUtil": 0,
+ "IOBusUtil": 0,
+ "JitterCount": 35,
+ "MemoryBusUtil": 0
+ },
+ "UserDataEraseComponentStatus": {},
+ "UserDataEraseStatus": "Idle",
+ "VirtualProfile": "Inactive"
+ }
+ },
+ "PowerState": "On",
+ "ProcessorSummary": {
+ "Count": 2,
+ "Model": "Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz",
+ "Status": {
+ "HealthRollup": "OK"
+ }
+ },
+ "Processors": {
+ "@odata.id": "/redfish/v1/Systems/1/Processors/"
+ },
+ "SKU": "867959-B21",
+ "SecureBoot": {
+ "@odata.id": "/redfish/v1/Systems/1/SecureBoot/"
+ },
+ "SerialNumber": "MXQ93003RB",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "Storage": {
+ "@odata.id": "/redfish/v1/Systems/1/Storage/"
+ },
+ "SystemType": "Physical",
+ "TrustedModules": [
+ {
+ "FirmwareVersion": "73.0",
+ "InterfaceType": "TPM1_2",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeTrustedModuleExt.HpeTrustedModuleExt",
+ "@odata.type": "#HpeTrustedModuleExt.v2_0_0.HpeTrustedModuleExt",
+ "VendorName": "STMicro"
+ }
+ },
+ "Status": {
+ "State": "Disabled"
+ }
+ }
+ ]
+}
diff --git a/plugins/inputs/redfish/testdata/hp_systemsinvalid.json b/plugins/inputs/redfish/testdata/hp_systemsinvalid.json
new file mode 100644
index 0000000000000..d07c1e5a05e9b
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/hp_systemsinvalid.json
@@ -0,0 +1,320 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#ComputerSystem.ComputerSystem",
+ "@odata.etag": "W/\"43E302D1\"",
+ "@odata.id": "/redfish/v1/Systems/1/",
+ "@odata.type": "#ComputerSystem.v1_4_0.ComputerSystem",
+ "Id": "1",
+ "Actions": {
+ "#ComputerSystem.Reset": {
+ "ResetType@Redfish.AllowableValues": [
+ "On",
+ "ForceOff",
+ "ForceRestart",
+ "Nmi",
+ "PushPowerButton"
+ ],
+ "target": "/redfish/v1/Systems/1/Actions/ComputerSystem.Reset/"
+ }
+ },
+ "AssetTag": "",
+ "Bios": {
+ "@odata.id": "/redfish/v1/systems/1/bios/"
+ },
+ "BiosVersion": "U32 v2.10 (05/21/2019)",
+ "Boot": {
+ "BootSourceOverrideEnabled": "Disabled",
+ "BootSourceOverrideMode": "UEFI",
+ "BootSourceOverrideTarget": "None",
+ "BootSourceOverrideTarget@Redfish.AllowableValues": [
+ "None",
+ "Cd",
+ "Hdd",
+ "Usb",
+ "SDCard",
+ "Utilities",
+ "Diags",
+ "BiosSetup",
+ "Pxe",
+ "UefiShell",
+ "UefiHttp",
+ "UefiTarget"
+ ],
+ "UefiTargetBootSourceOverride": "None",
+ "UefiTargetBootSourceOverride@Redfish.AllowableValues": [
+ "HD(1,GPT,0E3A0969-7AFC-4B55-8B24-AEFA09F33D2D,0x800,0x12C000)/\\EFI\\redhat\\shimx64.efi",
+ "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)",
+ "PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(8030E0421B1C,0x1)/IPv4(0.0.0.0)/Uri()",
+ "PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(8030E0421B1C,0x1)/IPv4(0.0.0.0)",
+ "PciRoot(0x3)/Pci(0x2,0x0)/Pci(0x0,0x0)/MAC(48DF37959430,0x1)/IPv4(0.0.0.0)/Uri()",
+ "PciRoot(0x3)/Pci(0x2,0x0)/Pci(0x0,0x0)/MAC(48DF37959430,0x1)/IPv4(0.0.0.0)",
+ "PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(8030E0421B1C,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)/Uri()",
+ "PciRoot(0x3)/Pci(0x2,0x0)/Pci(0x0,0x0)/MAC(48DF37959430,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)/Uri()",
+ "PciRoot(0x3)/Pci(0x2,0x0)/Pci(0x0,0x0)/MAC(48DF37959430,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)",
+ "PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(8030E0421B1C,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)",
+ "PciRoot(0x9)/Pci(0x0,0x0)/Pci(0x0,0x0)/MAC(B88303866AE8,0x1)/IPv4(0.0.0.0)/Uri()",
+ "PciRoot(0x9)/Pci(0x0,0x0)/Pci(0x0,0x0)/MAC(B88303866AE8,0x1)/IPv4(0.0.0.0)",
+ "PciRoot(0x9)/Pci(0x0,0x0)/Pci(0x0,0x0)/MAC(B88303866AE8,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)/Uri()",
+ "PciRoot(0x9)/Pci(0x0,0x0)/Pci(0x0,0x0)/MAC(B88303866AE8,0x1)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)",
+ "PciRoot(0x0)/Pci(0x14,0x0)/USB(0x13,0x0)",
+ "PciRoot(0x3)/Pci(0x0,0x0)/Pci(0x0,0x0)/Scsi(0x0,0x0)"
+ ]
+ },
+ "EthernetInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/1/EthernetInterfaces/"
+ },
+ "HostName": "tpa-hostname",
+ "IndicatorLED": "Off",
+ "Links": {
+ "ManagedBy": [
+ {
+ "@odata.id": "/redfish/v1/Managers/1/"
+ }
+ ],
+ "Chassis": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/"
+ }
+ ]
+ },
+ "LogServices": {
+ "@odata.id": "/redfish/v1/Systems/1/LogServices/"
+ },
+ "Manufacturer": "HPE",
+ "Memory": {
+ "@odata.id": "/redfish/v1/Systems/1/Memory/"
+ },
+ "MemoryDomains": {
+ "@odata.id": "/redfish/v1/Systems/1/MemoryDomains/"
+ },
+ "MemorySummary": {
+ "Status": {
+ "HealthRollup": "OK"
+ },
+ "TotalSystemMemoryGiB": 384,
+ "TotalSystemPersistentMemoryGiB": 0
+ },
+ "Model": "ProLiant DL360 Gen10",
+ "Name": "Computer System",
+ "NetworkInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/1/NetworkInterfaces/"
+ },
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeComputerSystemExt.HpeComputerSystemExt",
+ "@odata.type": "#HpeComputerSystemExt.v2_6_1.HpeComputerSystemExt",
+ "Actions": {
+ "#HpeComputerSystemExt.PowerButton": {
+ "PushType@Redfish.AllowableValues": [
+ "Press",
+ "PressAndHold"
+ ],
+ "target": "/redfish/v1/Systems/1/Actions/Oem/Hpe/HpeComputerSystemExt.PowerButton/"
+ },
+ "#HpeComputerSystemExt.SecureSystemErase": {
+ "target": "/redfish/v1/Systems/1/Actions/Oem/Hpe/HpeComputerSystemExt.SecureSystemErase/"
+ },
+ "#HpeComputerSystemExt.SystemReset": {
+ "ResetType@Redfish.AllowableValues": [
+ "ColdBoot",
+ "AuxCycle"
+ ],
+ "target": "/redfish/v1/Systems/1/Actions/Oem/Hpe/HpeComputerSystemExt.SystemReset/"
+ }
+ },
+ "AggregateHealthStatus": {
+ "AgentlessManagementService": "Unavailable",
+ "BiosOrHardwareHealth": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "FanRedundancy": "Redundant",
+ "Fans": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "Memory": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "Network": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "PowerSupplies": {
+ "PowerSuppliesMismatch": false,
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "PowerSupplyRedundancy": "Redundant",
+ "Processors": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "SmartStorageBattery": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "Storage": {
+ "Status": {
+ "Health": "OK"
+ }
+ },
+ "Temperatures": {
+ "Status": {
+ "Health": "OK"
+ }
+ }
+ },
+ "Bios": {
+ "Backup": {
+ "Date": "05/21/2019",
+ "Family": "U32",
+ "VersionString": "U32 v2.10 (05/21/2019)"
+ },
+ "Current": {
+ "Date": "05/21/2019",
+ "Family": "U32",
+ "VersionString": "U32 v2.10 (05/21/2019)"
+ },
+ "UefiClass": 2
+ },
+ "CurrentPowerOnTimeSeconds": 29290,
+ "DeviceDiscoveryComplete": {
+ "AMSDeviceDiscovery": "NoAMS",
+ "DeviceDiscovery": "vMainDeviceDiscoveryComplete",
+ "SmartArrayDiscovery": "Complete"
+ },
+ "ElapsedEraseTimeInMinutes": 0,
+ "EndOfPostDelaySeconds": null,
+ "EstimatedEraseTimeInMinutes": 0,
+ "IntelligentProvisioningAlwaysOn": true,
+ "IntelligentProvisioningIndex": 9,
+ "IntelligentProvisioningLocation": "System Board",
+ "IntelligentProvisioningVersion": "3.30.213",
+ "IsColdBooting": false,
+ "Links": {
+ "PCIDevices": {
+ "@odata.id": "/redfish/v1/Systems/1/PCIDevices/"
+ },
+ "PCISlots": {
+ "@odata.id": "/redfish/v1/Systems/1/PCISlots/"
+ },
+ "NetworkAdapters": {
+ "@odata.id": "/redfish/v1/Systems/1/BaseNetworkAdapters/"
+ },
+ "SmartStorage": {
+ "@odata.id": "/redfish/v1/Systems/1/SmartStorage/"
+ },
+ "USBPorts": {
+ "@odata.id": "/redfish/v1/Systems/1/USBPorts/"
+ },
+ "USBDevices": {
+ "@odata.id": "/redfish/v1/Systems/1/USBDevices/"
+ },
+ "EthernetInterfaces": {
+ "@odata.id": "/redfish/v1/Systems/1/EthernetInterfaces/"
+ },
+ "WorkloadPerformanceAdvisor": {
+ "@odata.id": "/redfish/v1/Systems/1/WorkloadPerformanceAdvisor/"
+ }
+ },
+ "PCAPartNumber": "847479-002",
+ "PCASerialNumber": "PWUFL0ARHCF3KR",
+ "PostDiscoveryCompleteTimeStamp": "2020-02-24T02:43:43Z",
+ "PostDiscoveryMode": null,
+ "PostMode": null,
+ "PostState": "FinishedPost",
+ "PowerAllocationLimit": 1600,
+ "PowerAutoOn": "Restore",
+ "PowerOnDelay": "Minimum",
+ "PowerOnMinutes": 95715,
+ "PowerRegulatorMode": "Max",
+ "PowerRegulatorModesSupported": [
+ "OSControl",
+ "Dynamic",
+ "Max",
+ "Min"
+ ],
+ "ProcessorJitterControl": {
+ "ConfiguredFrequencyLimitMHz": 0,
+ "Mode": "Disabled"
+ },
+ "SMBIOS": {
+ "extref": "/smbios"
+ },
+ "ServerFQDN": "TPAVCPAR088S4.vici.verizon.com",
+ "SmartStorageConfig": [
+ {
+ "@odata.id": "/redfish/v1/systems/1/smartstorageconfig/"
+ }
+ ],
+ "SystemROMAndiLOEraseComponentStatus": {
+ "BIOSSettingsEraseStatus": "Idle",
+ "iLOSettingsEraseStatus": "Idle"
+ },
+ "SystemROMAndiLOEraseStatus": "Idle",
+ "SystemUsage": {
+ "AvgCPU0Freq": 0,
+ "AvgCPU1Freq": 6,
+ "CPU0Power": 50,
+ "CPU1Power": 51,
+ "CPUICUtil": 0,
+ "CPUUtil": 0,
+ "IOBusUtil": 0,
+ "JitterCount": 35,
+ "MemoryBusUtil": 0
+ },
+ "UserDataEraseComponentStatus": {},
+ "UserDataEraseStatus": "Idle",
+ "VirtualProfile": "Inactive"
+ }
+ },
+ "PowerState": "On",
+ "ProcessorSummary": {
+ "Count": 2,
+ "Model": "Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz",
+ "Status": {
+ "HealthRollup": "OK"
+ }
+ },
+ "Processors": {
+ "@odata.id": "/redfish/v1/Systems/1/Processors/"
+ },
+ "SKU": "867959-B21",
+ "SecureBoot": {
+ "@odata.id": "/redfish/v1/Systems/1/SecureBoot/"
+ },
+ "SerialNumber": "MXQ93003RB",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "Storage": {
+ "@odata.id": "/redfish/v1/Systems/1/Storage/"
+ },
+ "SystemType": "Physical",
+ "TrustedModules": [
+ {
+ "FirmwareVersion": "73.0",
+ "InterfaceType": "TPM1_2",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeTrustedModuleExt.HpeTrustedModuleExt",
+ "@odata.type": "#HpeTrustedModuleExt.v2_0_0.HpeTrustedModuleExt",
+ "VendorName": "STMicro"
+ }
+ },
+ "Status": {
+ "State": "Disabled"
+ }
+ }
+ ]
+}
+{
diff --git a/plugins/inputs/redfish/testdata/hp_thermal.json b/plugins/inputs/redfish/testdata/hp_thermal.json
new file mode 100644
index 0000000000000..47c2ed1393f8a
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/hp_thermal.json
@@ -0,0 +1,116 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Thermal.Thermal",
+ "@odata.etag": "W/\"14E8662D\"",
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal",
+ "@odata.type": "#Thermal.v1_1_0.Thermal",
+ "Id": "Thermal",
+ "Fans": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Fans/0",
+ "MemberId": "0",
+ "Name": "Fan 1",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerFan.HpeServerFan",
+ "@odata.type": "#HpeServerFan.v2_0_0.HpeServerFan",
+ "HotPluggable": true,
+ "Location": "System",
+ "Redundant": true
+ }
+ },
+ "Reading": 23,
+ "ReadingUnits": "Percent",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Fans/1",
+ "MemberId": "1",
+ "Name": "Fan 2",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerFan.HpeServerFan",
+ "@odata.type": "#HpeServerFan.v2_0_0.HpeServerFan",
+ "HotPluggable": true,
+ "Location": "System",
+ "Redundant": true
+ }
+ },
+ "Reading": 23,
+ "ReadingUnits": "Percent",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Fans/2",
+ "MemberId": "2",
+ "Name": "Fan 3",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerFan.HpeServerFan",
+ "@odata.type": "#HpeServerFan.v2_0_0.HpeServerFan",
+ "HotPluggable": true,
+ "Location": "System",
+ "Redundant": true
+ }
+ },
+ "Reading": 23,
+ "ReadingUnits": "Percent",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ],
+ "Name": "Thermal",
+ "Temperatures": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Temperatures/0",
+ "MemberId": "0",
+ "Name": "01-Inlet Ambient",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeSeaOfSensors.HpeSeaOfSensors",
+ "@odata.type": "#HpeSeaOfSensors.v2_0_0.HpeSeaOfSensors",
+ "LocationXmm": 15,
+ "LocationYmm": 0
+ }
+ },
+ "PhysicalContext": "Intake",
+ "ReadingCelsius": 19,
+ "SensorNumber": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": 42,
+ "UpperThresholdFatal": 47
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Temperatures/42",
+ "MemberId": "42",
+ "Name": "44-P/S 2 Zone",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeSeaOfSensors.HpeSeaOfSensors",
+ "@odata.type": "#HpeSeaOfSensors.v2_0_0.HpeSeaOfSensors",
+ "LocationXmm": 4,
+ "LocationYmm": 7
+ }
+ },
+ "PhysicalContext": "PowerSupply",
+ "ReadingCelsius": 34,
+ "SensorNumber": 43,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": 75,
+ "UpperThresholdFatal": 80
+ }
+ ]
+}
diff --git a/plugins/inputs/redfish/testdata/hp_thermalinvalid.json b/plugins/inputs/redfish/testdata/hp_thermalinvalid.json
new file mode 100644
index 0000000000000..e7f246310ec6f
--- /dev/null
+++ b/plugins/inputs/redfish/testdata/hp_thermalinvalid.json
@@ -0,0 +1,117 @@
+{
+ "@odata.context": "/redfish/v1/$metadata#Thermal.Thermal",
+ "@odata.etag": "W/\"14E8662D\"",
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal",
+ "@odata.type": "#Thermal.v1_1_0.Thermal",
+ "Id": "Thermal",
+ "Fans": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Fans/0",
+ "MemberId": "0",
+ "Name": "Fan 1",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerFan.HpeServerFan",
+ "@odata.type": "#HpeServerFan.v2_0_0.HpeServerFan",
+ "HotPluggable": true,
+ "Location": "System",
+ "Redundant": true
+ }
+ },
+ "Reading": 23,
+ "ReadingUnits": "Percent",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Fans/1",
+ "MemberId": "1",
+ "Name": "Fan 2",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerFan.HpeServerFan",
+ "@odata.type": "#HpeServerFan.v2_0_0.HpeServerFan",
+ "HotPluggable": true,
+ "Location": "System",
+ "Redundant": true
+ }
+ },
+ "Reading": 23,
+ "ReadingUnits": "Percent",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Fans/2",
+ "MemberId": "2",
+ "Name": "Fan 3",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeServerFan.HpeServerFan",
+ "@odata.type": "#HpeServerFan.v2_0_0.HpeServerFan",
+ "HotPluggable": true,
+ "Location": "System",
+ "Redundant": true
+ }
+ },
+ "Reading": 23,
+ "ReadingUnits": "Percent",
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ }
+ }
+ ],
+ "Name": "Thermal",
+ "Temperatures": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Temperatures/0",
+ "MemberId": "0",
+ "Name": "01-Inlet Ambient",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeSeaOfSensors.HpeSeaOfSensors",
+ "@odata.type": "#HpeSeaOfSensors.v2_0_0.HpeSeaOfSensors",
+ "LocationXmm": 15,
+ "LocationYmm": 0
+ }
+ },
+ "PhysicalContext": "Intake",
+ "ReadingCelsius": 19,
+ "SensorNumber": 1,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": 42,
+ "UpperThresholdFatal": 47
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/1/Thermal#Temperatures/42",
+ "MemberId": "42",
+ "Name": "44-P/S 2 Zone",
+ "Oem": {
+ "Hpe": {
+ "@odata.context": "/redfish/v1/$metadata#HpeSeaOfSensors.HpeSeaOfSensors",
+ "@odata.type": "#HpeSeaOfSensors.v2_0_0.HpeSeaOfSensors",
+ "LocationXmm": 4,
+ "LocationYmm": 7
+ }
+ },
+ "PhysicalContext": "PowerSupply",
+ "ReadingCelsius": 34,
+ "SensorNumber": 43,
+ "Status": {
+ "Health": "OK",
+ "State": "Enabled"
+ },
+ "UpperThresholdCritical": 75,
+ "UpperThresholdFatal": 80
+ }
+ ]
+}
+{
diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md
index 79122f22851f7..f62b9db6e3f61 100644
--- a/plugins/inputs/redis/README.md
+++ b/plugins/inputs/redis/README.md
@@ -1,8 +1,8 @@
-# Telegraf Plugin: Redis
+# Redis Input Plugin
### Configuration:
-```
+```toml
# Read Redis's basic status information
[[inputs.redis]]
## specify servers via a url matching:
@@ -80,8 +80,8 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a
- instantaneous_ops_per_sec(int, number)
- total_net_input_bytes(int, bytes)
- total_net_output_bytes(int, bytes)
- - instantaneous_input_kbps(float, bytes)
- - instantaneous_output_kbps(float, bytes)
+ - instantaneous_input_kbps(float, KB/sec)
+ - instantaneous_output_kbps(float, KB/sec)
- rejected_connections(int, number)
- sync_full(int, number)
- sync_partial_ok(int, number)
@@ -120,6 +120,23 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a
- expires(int, number)
- avg_ttl(int, number)
+- redis_cmdstat
+ Every Redis used command will have 3 new fields:
+ - calls(int, number)
+ - usec(int, mircoseconds)
+ - usec_per_call(float, microseconds)
+
+- redis_replication
+ - tags:
+ - replication_role
+ - replica_ip
+ - replica_port
+ - state (either "online", "wait_bgsave", or "send_bulk")
+
+ - fields:
+ - lag(int, number)
+ - offset(int, number)
+
### Tags:
- All measurements have the following tags:
@@ -130,10 +147,13 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a
- The redis_keyspace measurement has an additional database tag:
- database
+- The redis_cmdstat measurement has an additional tag:
+ - command
+
### Example Output:
Using this configuration:
-```
+```toml
[[inputs.redis]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
@@ -161,3 +181,8 @@ redis_keyspace:
```
> redis_keyspace,database=db1,host=host,server=localhost,port=6379,replication_role=master keys=1i,expires=0i,avg_ttl=0i 1493101350000000000
```
+
+redis_command:
+```
+> redis_cmdstat,command=publish,host=host,port=6379,replication_role=master,server=localhost calls=68113i,usec=325146i,usec_per_call=4.77 1559227136000000000
+```
diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go
index cd438397c2318..5e32afef5c65f 100644
--- a/plugins/inputs/redis/redis.go
+++ b/plugins/inputs/redis/redis.go
@@ -4,8 +4,8 @@ import (
"bufio"
"fmt"
"io"
- "log"
"net/url"
+ "regexp"
"strconv"
"strings"
"sync"
@@ -13,7 +13,7 @@ import (
"github.com/go-redis/redis"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -22,6 +22,8 @@ type Redis struct {
Password string
tls.ClientConfig
+ Log telegraf.Logger
+
clients []Client
initialized bool
}
@@ -37,7 +39,7 @@ type RedisClient struct {
}
func (r *RedisClient) Info() *redis.StringCmd {
- return r.client.Info()
+ return r.client.Info("ALL")
}
func (r *RedisClient) BaseTags() map[string]string {
@@ -48,6 +50,8 @@ func (r *RedisClient) BaseTags() map[string]string {
return tags
}
+var replicationSlaveMetricPrefix = regexp.MustCompile(`^slave\d+`)
+
var sampleConfig = `
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
@@ -98,13 +102,13 @@ func (r *Redis) init(acc telegraf.Accumulator) error {
for i, serv := range r.Servers {
if !strings.HasPrefix(serv, "tcp://") && !strings.HasPrefix(serv, "unix://") {
- log.Printf("W! [inputs.redis]: server URL found without scheme; please update your configuration file")
+ r.Log.Warn("Server URL found without scheme; please update your configuration file")
serv = "tcp://" + serv
}
u, err := url.Parse(serv)
if err != nil {
- return fmt.Errorf("Unable to parse to address %q: %v", serv, err)
+ return fmt.Errorf("unable to parse to address %q: %s", serv, err.Error())
}
password := ""
@@ -248,11 +252,25 @@ func gatherInfoOutput(
gatherKeyspaceLine(name, kline, acc, tags)
continue
}
+ if section == "Commandstats" {
+ kline := strings.TrimSpace(parts[1])
+ gatherCommandstateLine(name, kline, acc, tags)
+ continue
+ }
+ if section == "Replication" && replicationSlaveMetricPrefix.MatchString(name) {
+ kline := strings.TrimSpace(parts[1])
+ gatherReplicationLine(name, kline, acc, tags)
+ continue
+ }
+
metric = name
}
val := strings.TrimSpace(parts[1])
+ // Some percentage values have a "%" suffix that we need to get rid of before int/float conversion
+ val = strings.TrimSuffix(val, "%")
+
// Try parsing as int
if ival, err := strconv.ParseInt(val, 10, 64); err == nil {
switch name {
@@ -321,6 +339,95 @@ func gatherKeyspaceLine(
}
}
+// Parse the special cmdstat lines.
+// Example:
+// cmdstat_publish:calls=33791,usec=208789,usec_per_call=6.18
+// Tag: cmdstat=publish; Fields: calls=33791i,usec=208789i,usec_per_call=6.18
+func gatherCommandstateLine(
+ name string,
+ line string,
+ acc telegraf.Accumulator,
+ global_tags map[string]string,
+) {
+ if !strings.HasPrefix(name, "cmdstat") {
+ return
+ }
+
+ fields := make(map[string]interface{})
+ tags := make(map[string]string)
+ for k, v := range global_tags {
+ tags[k] = v
+ }
+ tags["command"] = strings.TrimPrefix(name, "cmdstat_")
+ parts := strings.Split(line, ",")
+ for _, part := range parts {
+ kv := strings.Split(part, "=")
+ if len(kv) != 2 {
+ continue
+ }
+
+ switch kv[0] {
+ case "calls":
+ fallthrough
+ case "usec":
+ ival, err := strconv.ParseInt(kv[1], 10, 64)
+ if err == nil {
+ fields[kv[0]] = ival
+ }
+ case "usec_per_call":
+ fval, err := strconv.ParseFloat(kv[1], 64)
+ if err == nil {
+ fields[kv[0]] = fval
+ }
+ }
+ }
+ acc.AddFields("redis_cmdstat", fields, tags)
+}
+
+// Parse the special Replication line
+// Example:
+// slave0:ip=127.0.0.1,port=7379,state=online,offset=4556468,lag=0
+// This line will only be visible when a node has a replica attached.
+func gatherReplicationLine(
+ name string,
+ line string,
+ acc telegraf.Accumulator,
+ global_tags map[string]string,
+) {
+ fields := make(map[string]interface{})
+ tags := make(map[string]string)
+ for k, v := range global_tags {
+ tags[k] = v
+ }
+
+ tags["replica_id"] = strings.TrimLeft(name, "slave")
+ tags["replication_role"] = "slave"
+
+ parts := strings.Split(line, ",")
+ for _, part := range parts {
+ kv := strings.Split(part, "=")
+ if len(kv) != 2 {
+ continue
+ }
+
+ switch kv[0] {
+ case "ip":
+ tags["replica_ip"] = kv[1]
+ case "port":
+ tags["replica_port"] = kv[1]
+ case "state":
+ tags[kv[0]] = kv[1]
+ default:
+ ival, err := strconv.ParseInt(kv[1], 10, 64)
+ if err == nil {
+ fields[kv[0]] = ival
+ }
+ }
+ }
+
+ acc.AddFields("redis_replication", fields, tags)
+}
+
func init() {
inputs.Add("redis", func() telegraf.Input {
return &Redis{}
diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go
index fd16bbdd91eb4..637b464f95e99 100644
--- a/plugins/inputs/redis/redis_test.go
+++ b/plugins/inputs/redis/redis_test.go
@@ -20,6 +20,7 @@ func TestRedisConnect(t *testing.T) {
addr := fmt.Sprintf(testutil.GetLocalHost() + ":6379")
r := &Redis{
+ Log: testutil.Logger{},
Servers: []string{addr},
}
@@ -49,6 +50,8 @@ func TestRedis_ParseMetrics(t *testing.T) {
"used_memory_rss": int64(811008),
"used_memory_peak": int64(1003936),
"used_memory_lua": int64(33792),
+ "used_memory_peak_perc": float64(93.58),
+ "used_memory_dataset_perc": float64(20.27),
"mem_fragmentation_ratio": float64(0.81),
"loading": int64(0),
"rdb_changes_since_last_save": int64(0),
@@ -80,7 +83,7 @@ func TestRedis_ParseMetrics(t *testing.T) {
"pubsub_channels": int64(0),
"pubsub_patterns": int64(0),
"latest_fork_usec": int64(0),
- "connected_slaves": int64(0),
+ "connected_slaves": int64(2),
"master_repl_offset": int64(0),
"repl_backlog_active": int64(0),
"repl_backlog_size": int64(1048576),
@@ -116,6 +119,52 @@ func TestRedis_ParseMetrics(t *testing.T) {
}
acc.AssertContainsTaggedFields(t, "redis", fields, tags)
acc.AssertContainsTaggedFields(t, "redis_keyspace", keyspaceFields, keyspaceTags)
+
+ cmdstatSetTags := map[string]string{"host": "redis.net", "replication_role": "master", "command": "set"}
+ cmdstatSetFields := map[string]interface{}{
+ "calls": int64(261265),
+ "usec": int64(1634157),
+ "usec_per_call": float64(6.25),
+ }
+ acc.AssertContainsTaggedFields(t, "redis_cmdstat", cmdstatSetFields, cmdstatSetTags)
+
+ cmdstatCommandTags := map[string]string{"host": "redis.net", "replication_role": "master", "command": "command"}
+ cmdstatCommandFields := map[string]interface{}{
+ "calls": int64(1),
+ "usec": int64(990),
+ "usec_per_call": float64(990.0),
+ }
+ acc.AssertContainsTaggedFields(t, "redis_cmdstat", cmdstatCommandFields, cmdstatCommandTags)
+
+ replicationTags := map[string]string{
+ "host": "redis.net",
+ "replication_role": "slave",
+ "replica_id": "0",
+ "replica_ip": "127.0.0.1",
+ "replica_port": "7379",
+ "state": "online",
+ }
+ replicationFields := map[string]interface{}{
+ "lag": int64(0),
+ "offset": int64(4556468),
+ }
+
+ acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags)
+
+ replicationTags = map[string]string{
+ "host": "redis.net",
+ "replication_role": "slave",
+ "replica_id": "1",
+ "replica_ip": "127.0.0.1",
+ "replica_port": "8379",
+ "state": "send_bulk",
+ }
+ replicationFields = map[string]interface{}{
+ "lag": int64(1),
+ "offset": int64(0),
+ }
+
+ acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags)
}
const testOutput = `# Server
@@ -152,6 +201,8 @@ used_memory_peak_human:980.41K
used_memory_lua:33792
mem_fragmentation_ratio:0.81
mem_allocator:libc
+used_memory_peak_perc:93.58%
+used_memory_dataset_perc:20.27%
# Persistence
loading:0
@@ -189,7 +240,9 @@ latest_fork_usec:0
# Replication
role:master
-connected_slaves:0
+connected_slaves:2
+slave0:ip=127.0.0.1,port=7379,state=online,offset=4556468,lag=0
+slave1:ip=127.0.0.1,port=8379,state=send_bulk,offset=0,lag=1
master_replid:8c4d7b768b26826825ceb20ff4a2c7c54616350b
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:0
@@ -205,6 +258,10 @@ used_cpu_user:0.05
used_cpu_sys_children:0.00
used_cpu_user_children:0.00
+# Commandstats
+cmdstat_set:calls=261265,usec=1634157,usec_per_call=6.25
+cmdstat_command:calls=1,usec=990,usec_per_call=990.00
+
# Keyspace
db0:keys=2,expires=0,avg_ttl=0
diff --git a/plugins/inputs/rethinkdb/README.md b/plugins/inputs/rethinkdb/README.md
new file mode 100644
index 0000000000000..b1946644ea13a
--- /dev/null
+++ b/plugins/inputs/rethinkdb/README.md
@@ -0,0 +1,61 @@
+# RethinkDB Input Plugin
+
+Collect metrics from [RethinkDB](https://www.rethinkdb.com/).
+
+### Configuration
+
+This section contains the default TOML to configure the plugin. You can
+generate it using `telegraf --usage rethinkdb`.
+
+```toml
+[[inputs.rethinkdb]]
+ ## An array of URI to gather stats about. Specify an ip or hostname
+ ## with optional port add password. ie,
+ ## rethinkdb://user:auth_key@10.10.3.30:28105,
+ ## rethinkdb://10.10.3.33:18832,
+ ## 10.0.0.1:10000, etc.
+ servers = ["127.0.0.1:28015"]
+
+ ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
+ ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
+ # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
+
+ ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
+ ## have to be named "rethinkdb".
+ # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
+```
+
+### Metrics
+
+- rethinkdb
+ - tags:
+ - type
+ - ns
+ - rethinkdb_host
+ - rethinkdb_hostname
+ - fields:
+ - cache_bytes_in_use (integer, bytes)
+ - disk_read_bytes_per_sec (integer, reads)
+ - disk_read_bytes_total (integer, bytes)
+ - disk_written_bytes_per_sec (integer, bytes)
+ - disk_written_bytes_total (integer, bytes)
+ - disk_usage_data_bytes (integer, bytes)
+ - disk_usage_garbage_bytes (integer, bytes)
+ - disk_usage_metadata_bytes (integer, bytes)
+ - disk_usage_preallocated_bytes (integer, bytes)
+
++ rethinkdb_engine
+ - tags:
+ - type
+ - ns
+ - rethinkdb_host
+ - rethinkdb_hostname
+ - fields:
+ - active_clients (integer, clients)
+ - clients (integer, clients)
+ - queries_per_sec (integer, queries)
+ - total_queries (integer, queries)
+ - read_docs_per_sec (integer, reads)
+ - total_reads (integer, reads)
+ - written_docs_per_sec (integer, writes)
+ - total_writes (integer, writes)
diff --git a/plugins/inputs/riak/README.md b/plugins/inputs/riak/README.md
index c36624d273dd7..a435eea4d7f63 100644
--- a/plugins/inputs/riak/README.md
+++ b/plugins/inputs/riak/README.md
@@ -1,4 +1,4 @@
-# Riak Plugin
+# Riak Input Plugin
The Riak plugin gathers metrics from one or more riak instances.
diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go
index 9ddbbfa651351..19f6222890360 100644
--- a/plugins/inputs/riak/riak.go
+++ b/plugins/inputs/riak/riak.go
@@ -127,7 +127,7 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
// Successful responses will always return status code 200
if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("riak responded with unexepcted status code %d", resp.StatusCode)
+ return fmt.Errorf("riak responded with unexpected status code %d", resp.StatusCode)
}
// Decode the response JSON into a new stats struct
diff --git a/plugins/inputs/salesforce/README.md b/plugins/inputs/salesforce/README.md
index 5ee0f6a3db4bd..6883f3a90b85f 100644
--- a/plugins/inputs/salesforce/README.md
+++ b/plugins/inputs/salesforce/README.md
@@ -10,7 +10,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/
[[inputs.salesforce]]
username = "your_username"
password = "your_password"
- ## (Optional) security tokjen
+ ## (Optional) security token
security_token = "your_security_token"
## (Optional) environment type (sandbox or production)
## default is: production
@@ -21,7 +21,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/
### Measurements & Fields:
-Salesforce provide one measurment named "salesforce".
+Salesforce provide one measurement named "salesforce".
Each entry is converted to snake\_case and 2 fields are created.
- \_max represents the limit threshold
diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go
index 096550db5982f..b66266d3f17d2 100644
--- a/plugins/inputs/salesforce/salesforce.go
+++ b/plugins/inputs/salesforce/salesforce.go
@@ -5,6 +5,7 @@ import (
"encoding/xml"
"errors"
"fmt"
+ "io"
"io/ioutil"
"net/http"
"net/url"
@@ -165,7 +166,7 @@ func (s *Salesforce) getLoginEndpoint() (string, error) {
}
}
-// Authenticate with Salesfroce
+// Authenticate with Salesforce
func (s *Salesforce) login() error {
if s.Username == "" || s.Password == "" {
return errors.New("missing username or password")
@@ -200,6 +201,11 @@ func (s *Salesforce) login() error {
return err
}
defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
+ body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200))
+ return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body)
+ }
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
diff --git a/plugins/inputs/sensors/README.md b/plugins/inputs/sensors/README.md
index 9075bda727846..d9bcfe2e4544d 100644
--- a/plugins/inputs/sensors/README.md
+++ b/plugins/inputs/sensors/README.md
@@ -1,4 +1,4 @@
-# sensors Input Plugin
+# LM Sensors Input Plugin
Collect [lm-sensors](https://en.wikipedia.org/wiki/Lm_sensors) metrics - requires the lm-sensors
package installed.
@@ -6,7 +6,7 @@ package installed.
This plugin collects sensor metrics with the `sensors` executable from the lm-sensor package.
### Configuration:
-```
+```toml
# Monitor sensors, requires lm-sensors package
[[inputs.sensors]]
## Remove numbers from field names.
@@ -18,7 +18,7 @@ This plugin collects sensor metrics with the `sensors` executable from the lm-se
```
### Measurements & Fields:
-Fields are created dynamicaly depending on the sensors. All fields are float.
+Fields are created dynamically depending on the sensors. All fields are float.
### Tags:
diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go
index 864da812123fe..1df88466be2e9 100644
--- a/plugins/inputs/sensors/sensors.go
+++ b/plugins/inputs/sensors/sensors.go
@@ -60,7 +60,7 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error {
fields := map[string]interface{}{}
chip := ""
cmd := execCommand(s.path, "-A", "-u")
- out, err := internal.CombinedOutputTimeout(cmd, s.Timeout.Duration)
+ out, err := internal.StdOutputTimeout(cmd, s.Timeout.Duration)
if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
}
diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md
new file mode 100644
index 0000000000000..66d556e17c694
--- /dev/null
+++ b/plugins/inputs/sflow/README.md
@@ -0,0 +1,121 @@
+# SFlow Input Plugin
+
+The SFlow Input Plugin provides support for acting as an SFlow V5 collector in
+accordance with the specification from [sflow.org](https://sflow.org/).
+
+Currently only Flow Samples of Ethernet / IPv4 & IPv4 TCP & UDP headers are
+turned into metrics. Counters and other header samples are ignored.
+
+#### Series Cardinality Warning
+
+This plugin may produce a high number of series which, when not controlled
+for, will cause high load on your database. Use the following techniques to
+avoid cardinality issues:
+
+- Use [metric filtering][] options to exclude unneeded measurements and tags.
+- Write to a database with an appropriate [retention policy][].
+- Limit series cardinality in your database using the
+ [max-series-per-database][] and [max-values-per-tag][] settings.
+- Consider using the [Time Series Index][tsi].
+- Monitor your databases [series cardinality][].
+- Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques.
+
+### Configuration
+
+```toml
+[[inputs.sflow]]
+ ## Address to listen for sFlow packets.
+ ## example: service_address = "udp://:6343"
+ ## service_address = "udp4://:6343"
+ ## service_address = "udp6://:6343"
+ service_address = "udp://:6343"
+
+ ## Set the size of the operating system's receive buffer.
+ ## example: read_buffer_size = "64KiB"
+ # read_buffer_size = ""
+```
+
+### Metrics
+
+- sflow
+ - tags:
+ - agent_address (IP address of the agent that obtained the sflow sample and sent it to this collector)
+ - source_id_type(source_id_type field of flow_sample or flow_sample_expanded structures)
+ - source_id_index(source_id_index field of flow_sample or flow_sample_expanded structures)
+ - input_ifindex (value (input) field of flow_sample or flow_sample_expanded structures)
+ - output_ifindex (value (output) field of flow_sample or flow_sample_expanded structures)
+ - sample_direction (source_id_index, netif_index_in and netif_index_out)
+ - header_protocol (header_protocol field of sampled_header structures)
+ - ether_type (eth_type field of an ETHERNET-ISO88023 header)
+ - src_ip (source_ipaddr field of IPv4 or IPv6 structures)
+ - src_port (src_port field of TCP or UDP structures)
+ - src_port_name (src_port)
+ - src_mac (source_mac_addr field of an ETHERNET-ISO88023 header)
+ - src_vlan (src_vlan field of extended_switch structure)
+ - src_priority (src_priority field of extended_switch structure)
+ - src_mask_len (src_mask_len field of extended_router structure)
+ - dst_ip (destination_ipaddr field of IPv4 or IPv6 structures)
+ - dst_port (dst_port field of TCP or UDP structures)
+ - dst_port_name (dst_port)
+ - dst_mac (destination_mac_addr field of an ETHERNET-ISO88023 header)
+ - dst_vlan (dst_vlan field of extended_switch structure)
+ - dst_priority (dst_priority field of extended_switch structure)
+ - dst_mask_len (dst_mask_len field of extended_router structure)
+ - next_hop (next_hop field of extended_router structure)
+ - ip_version (ip_ver field of IPv4 or IPv6 structures)
+ - ip_protocol (ip_protocol field of IPv4 or IPv6 structures)
+ - ip_dscp (ip_dscp field of IPv4 or IPv6 structures)
+ - ip_ecn (ecn field of IPv4 or IPv6 structures)
+ - tcp_urgent_pointer (urgent_pointer field of TCP structure)
+ - fields:
+ - bytes (integer, the product of frame_length and packets)
+ - drops (integer, drops field of flow_sample or flow_sample_expanded structures)
+ - packets (integer, sampling_rate field of flow_sample or flow_sample_expanded structures)
+ - frame_length (integer, frame_length field of sampled_header structures)
+ - header_size (integer, header_size field of sampled_header structures)
+ - ip_fragment_offset (integer, ip_ver field of IPv4 structures)
+ - ip_header_length (integer, ip_ver field of IPv4 structures)
+ - ip_total_length (integer, ip_total_len field of IPv4 structures)
+ - ip_ttl (integer, ip_ttl field of IPv4 structures or ip_hop_limit field IPv6 structures)
+ - tcp_header_length (integer, size field of TCP structure. This value is specified in 32-bit words. It must be multiplied by 4 to produce a value in bytes.)
+ - tcp_window_size (integer, window_size field of TCP structure)
+ - udp_length (integer, length field of UDP structures)
+ - ip_flags (integer, ip_ver field of IPv4 structures)
+ - tcp_flags (integer, TCP flags of TCP IP header (IPv4 or IPv6))
+
+### Troubleshooting
+
+The [sflowtool][] utility can be used to print sFlow packets, and compared
+against the metrics produced by Telegraf.
+```
+sflowtool -p 6343
+```
+
+If opening an issue, in addition to the output of sflowtool it will also be
+helpful to collect a packet capture. Adjust the interface, host and port as
+needed:
+```
+$ sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343
+```
+
+[sflowtool]: https://github.com/sflow/sflowtool
+
+### Example Output
+```
+sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=40042,ether_type=IPv4,header_protocol=ETHERNET-ISO88023,input_ifindex=6,ip_dscp=27,ip_ecn=0,output_ifindex=1073741823,source_id_index=3,source_id_type=0,src_ip=10.0.0.1,src_mac=ff:ff:ff:ff:ff:ff,src_port=443 bytes=1570i,drops=0i,frame_length=157i,header_length=128i,ip_flags=2i,ip_fragment_offset=0i,ip_total_length=139i,ip_ttl=42i,sampling_rate=10i,tcp_header_length=0i,tcp_urgent_pointer=0i,tcp_window_size=14i 1584473704793580447
+```
+
+### Reference Documentation
+
+This sflow implementation was built from the reference document
+[sflow.org/sflow_version_5.txt](sflow_version_5)
+
+
+[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering
+[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/
+[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000
+[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000
+[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/
+[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality
+[influx-docs]: https://docs.influxdata.com/influxdb/latest/
+[sflow_version_5]: https://sflow.org/sflow_version_5.txt
diff --git a/plugins/inputs/sflow/binaryio/minreader.go b/plugins/inputs/sflow/binaryio/minreader.go
new file mode 100644
index 0000000000000..35ccdbcf243ab
--- /dev/null
+++ b/plugins/inputs/sflow/binaryio/minreader.go
@@ -0,0 +1,37 @@
+package binaryio
+
+import "io"
+
+// MinimumReader is the implementation for MinReader.
+type MinimumReader struct {
+ R io.Reader
+ MinNumberOfBytesToRead int64 // Min number of bytes we need to read from the reader
+}
+
+// MinReader reads from R but ensures there is at least N bytes read from the reader.
+// The reader should call Close() when they are done reading.
+// Closing the MinReader will read and discard any unread bytes up to MinNumberOfBytesToRead.
+// CLosing the MinReader does NOT close the underlying reader.
+// The underlying implementation is a MinimumReader, which implements ReaderCloser.
+func MinReader(r io.Reader, minNumberOfBytesToRead int64) *MinimumReader {
+ return &MinimumReader{
+ R: r,
+ MinNumberOfBytesToRead: minNumberOfBytesToRead,
+ }
+}
+
+func (r *MinimumReader) Read(p []byte) (n int, err error) {
+ n, err = r.R.Read(p)
+ r.MinNumberOfBytesToRead -= int64(n)
+ return n, err
+}
+
+// Close does not close the underlying reader, only the MinimumReader
+func (r *MinimumReader) Close() error {
+ if r.MinNumberOfBytesToRead > 0 {
+ b := make([]byte, r.MinNumberOfBytesToRead)
+ _, err := r.R.Read(b)
+ return err
+ }
+ return nil
+}
diff --git a/plugins/inputs/sflow/binaryio/minreader_test.go b/plugins/inputs/sflow/binaryio/minreader_test.go
new file mode 100644
index 0000000000000..081564b3edf03
--- /dev/null
+++ b/plugins/inputs/sflow/binaryio/minreader_test.go
@@ -0,0 +1,39 @@
+package binaryio
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestMinReader(t *testing.T) {
+ b := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+ r := bytes.NewBuffer(b)
+
+ mr := MinReader(r, 10)
+
+ toRead := make([]byte, 5)
+ n, err := mr.Read(toRead)
+ if err != nil {
+ t.Error(err)
+ }
+ if n != 5 {
+ t.Error("Expected n to be 5, but was ", n)
+ }
+ if string(toRead) != string([]byte{1, 2, 3, 4, 5}) {
+ t.Error("expected 5 specific bytes to be read")
+ }
+ err = mr.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ n, err = r.Read(toRead) // read from the outer stream
+ if err != nil {
+ t.Error(err)
+ }
+ if n != 5 {
+ t.Error("Expected n to be 5, but was ", n)
+ }
+ if string(toRead) != string([]byte{11, 12, 13, 14, 15}) {
+ t.Error("expected the last 5 bytes to be read")
+ }
+}
diff --git a/plugins/inputs/sflow/decoder_test.go b/plugins/inputs/sflow/decoder_test.go
new file mode 100644
index 0000000000000..c6e3916b88ffc
--- /dev/null
+++ b/plugins/inputs/sflow/decoder_test.go
@@ -0,0 +1,758 @@
+package sflow
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIPv4SW(t *testing.T) {
+ str := `00000005` + // version
+ `00000001` + //address type
+ `c0a80102` + // ip address
+ `00000010` + // sub agent id
+ `0000f3d4` + // sequence number
+ `0bfa047f` + // uptime
+ `00000002` + // sample count
+ `00000001` + // sample type
+ `000000d0` + // sample data length
+ `0001210a` + // sequence number
+ `000001fe` + // source id 00 = source id type, 0001fe = source id index
+ `00000400` + // sampling rate.. apparently this should be input if index????
+ `04842400` + // sample pool
+ `00000000` + // drops
+ `000001fe` + // input if index
+ `00000200` + // output if index
+ `00000002` + // flow records count
+ `00000001` + // FlowFormat
+ `00000090` + // flow length
+ `00000001` + // header protocol
+ `0000010b` + // Frame length
+ `00000004` + // stripped octets
+ `00000080` + // header length
+ `000c2936d3d6` + // dest mac
+ `94c691aa9760` + // source mac
+ `0800` + // etype code: ipv4
+ `4500` + // dscp + ecn
+ `00f9` + // total length
+ `f190` + // identification
+ `4000` + // fragment offset + flags
+ `40` + // ttl
+ `11` + // protocol
+ `b4f5` + // header checksum
+ `c0a80913` + // source ip
+ `c0a8090a` + // dest ip
+ `00a1` + // source port
+ `ba05` + // dest port
+ `00e5` + // udp length
+ // rest of header/flowSample we ignore
+ `641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101` +
+ // next flow record - ignored
+ `000003e90000001000000009000000000000000900000000` +
+ // next sample
+ `00000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000`
+ packet, err := hex.DecodeString(str)
+ require.NoError(t, err)
+
+ actual := []telegraf.Metric{}
+ dc := NewDecoder()
+ dc.OnPacket(func(p *V5Format) {
+ metrics, err := makeMetrics(p)
+ require.NoError(t, err)
+ actual = append(actual, metrics...)
+ })
+ buf := bytes.NewReader(packet)
+ err = dc.Decode(buf)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "192.168.1.2",
+ "dst_ip": "192.168.9.10",
+ "dst_mac": "00:0c:29:36:d3:d6",
+ "dst_port": "47621",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "510",
+ "output_ifindex": "512",
+ "sample_direction": "ingress",
+ "source_id_index": "510",
+ "source_id_type": "0",
+ "src_ip": "192.168.9.19",
+ "src_mac": "94:c6:91:aa:97:60",
+ "src_port": "161",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x042c00),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x010b),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0xf9),
+ "ip_ttl": uint64(0x40),
+ "sampling_rate": uint64(0x0400),
+ "udp_length": uint64(0xe5),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "192.168.1.2",
+ "dst_ip": "192.168.9.10",
+ "dst_mac": "00:0c:29:36:d3:d6",
+ "dst_port": "514",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "528",
+ "output_ifindex": "512",
+ "sample_direction": "ingress",
+ "source_id_index": "528",
+ "source_id_type": "0",
+ "src_ip": "192.168.8.21",
+ "src_mac": "fc:ec:da:44:00:8f",
+ "src_port": "39529",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x25c000),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x97),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x81),
+ "ip_ttl": uint64(0x3f),
+ "sampling_rate": uint64(0x4000),
+ "udp_length": uint64(0x6d),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
+}
+
+func BenchmarkDecodeIPv4SW(b *testing.B) {
+ packet, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000")
+ require.NoError(b, err)
+
+ dc := NewDecoder()
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ _, err = dc.DecodeOnePacket(bytes.NewBuffer(packet))
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+func TestExpandFlow(t *testing.T) {
+ packet, err := hex.DecodeString("00000005000000010a00015000000000000f58998ae119780000000300000003000000c4000b62a90000000000100c840000040024fb7e1e0000000000000000001017840000000000100c8400000001000000010000009000000001000005bc0000000400000080001b17000130001201f58d44810023710800450205a6305440007e06ee92ac100016d94d52f505997e701fa1e17aff62574a50100200355f000000ffff00000b004175746f72697a7a6174610400008040ffff000400008040050031303030320500313030302004000000000868a200000000000000000860a200000000000000000003000000c40003cecf000000000010170400004000a168ac1c000000000000000000101784000000000010170400000001000000010000009000000001000005f200000004000000800024e8324338d4ae52aa0b54810020060800450005dc5420400080061397c0a8060cc0a806080050efcfbb25bad9a21c839a501000fff54000008a55f70975a0ff88b05735597ae274bd81fcba17e6e9206b8ea0fb07d05fc27dad06cfe3fdba5d2fc4d057b0add711e596cbe5e9b4bbe8be59cd77537b7a89f7414a628b736d00000003000000c0000c547a0000000000100c04000004005bc3c3b50000000000000000001017840000000000100c0400000001000000010000008c000000010000007e000000040000007a001b17000130001201f58d448100237108004500006824ea4000ff32c326d94d5105501018f02e88d003000001dd39b1d025d1c68689583b2ab21522d5b5a959642243804f6d51e63323091cc04544285433eb3f6b29e1046a6a2fa7806319d62041d8fa4bd25b7cd85b8db54202054a077ac11de84acbe37a550004")
+ require.NoError(t, err)
+
+ dc := NewDecoder()
+ p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet))
+ require.NoError(t, err)
+ actual, err := makeMetrics(p)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.0.1.80",
+ "dst_ip": "217.77.82.245",
+ "dst_mac": "00:1b:17:00:01:30",
+ "dst_port": "32368",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "1054596",
+ "output_ifindex": "1051780",
+ "sample_direction": "egress",
+ "source_id_index": "1051780",
+ "source_id_type": "0",
+ "src_ip": "172.16.0.22",
+ "src_mac": "00:12:01:f5:8d:44",
+ "src_port": "1433",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x16f000),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x05bc),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x05a6),
+ "ip_ttl": uint64(0x7e),
+ "sampling_rate": uint64(0x0400),
+ "tcp_header_length": uint64(0x14),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0x0200),
+ "ip_dscp": "0",
+ "ip_ecn": "2",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.0.1.80",
+ "dst_ip": "192.168.6.8",
+ "dst_mac": "00:24:e8:32:43:38",
+ "dst_port": "61391",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "1054596",
+ "output_ifindex": "1054468",
+ "sample_direction": "egress",
+ "source_id_index": "1054468",
+ "source_id_type": "0",
+ "src_ip": "192.168.6.12",
+ "src_mac": "d4:ae:52:aa:0b:54",
+ "src_port": "80",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x017c8000),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x05f2),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x05dc),
+ "ip_ttl": uint64(0x80),
+ "sampling_rate": uint64(0x4000),
+ "tcp_header_length": uint64(0x14),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0xff),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.0.1.80",
+ "dst_ip": "80.16.24.240",
+ "dst_mac": "00:1b:17:00:01:30",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "1054596",
+ "output_ifindex": "1051652",
+ "sample_direction": "egress",
+ "source_id_index": "1051652",
+ "source_id_type": "0",
+ "src_ip": "217.77.81.5",
+ "src_mac": "00:12:01:f5:8d:44",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x01f800),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x7e),
+ "header_length": uint64(0x7a),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x68),
+ "ip_ttl": uint64(0xff),
+ "sampling_rate": uint64(0x0400),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
+}
+
+func TestIPv4SWRT(t *testing.T) {
+ packet, err := hex.DecodeString("000000050000000189dd4f010000000000003d4f21151ad40000000600000001000000bc354b97090000020c000013b175792bea000000000000028f0000020c0000000300000001000000640000000100000058000000040000005408b2587a57624c16fc0b61a5080045000046c3e440003a1118a0052aada7569e5ab367a6e35b0032d7bbf1f2fb2eb2490a97f87abc31e135834be367000002590000ffffffffffffffff02add830d51e0aec14cf000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e32a000000160000000b00000001000000a88b8ffb57000002a2000013b12e344fd800000000000002a20000028f0000000300000001000000500000000100000042000000040000003e4c16fc0b6202c03e0fdecafe080045000030108000007d11fe45575185a718693996f0570e8c001c20614ad602003fd6d4afa6a6d18207324000271169b00000000003e90000001000000000000000000000000000000000000003ea000000100000000189dd4f210000000f0000001800000001000000e8354b970a0000020c000013b175793f9b000000000000028f0000020c00000003000000010000009000000001000001a500000004000000800231466d0b2c4c16fc0b61a5080045000193198f40003a114b75052aae1f5f94c778678ef24d017f50ea7622287c30799e1f7d45932d01ca92c46d930000927c0000ffffffffffffffff02ad0eea6498953d1c7ebb6dbdf0525c80e1a9a62bacfea92f69b7336c2f2f60eba0593509e14eef167eb37449f05ad70b8241c1a46d000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000160000001000000001000000e8354b970b0000020c000013b17579534c000000000000028f0000020c00000003000000010000009000000001000000b500000004000000800231466d0b2c4c16fc0b61a50800450000a327c240003606fd67b93c706a021ff365045fe8a0976d624df8207083501800edb31b0000485454502f312e3120323030204f4b0d0a5365727665723a2050726f746f636f6c20485454500d0a436f6e74656e742d4c656e6774683a20313430340d0a436f6e6e656374696f6e3a20000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000170000001000000001000000e8354b970c0000020c000013b1757966fd000000000000028f0000020c000000030000000100000090000000010000018e00000004000000800231466d0b2c4c16fc0b61a508004500017c7d2c40003a116963052abd8d021c940e67e7e0d501682342dbe7936bd47ef487dee5591ec1b24d83622e000072250000ffffffffffffffff02ad0039d8ba86a90017071d76b177de4d8c4e23bcaaaf4d795f77b032f959e0fb70234d4c28922d4e08dd3330c66e34bff51cc8ade5000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000160000001000000001000000e80d6146ac000002a1000013b17880b49d00000000000002a10000028f00000003000000010000009000000001000005ee00000004000000804c16fc0b6201d8b122766a2c0800450005dc04574000770623a11fcd80a218691d4cf2fe01bbd4f47482065fd63a5010fabd7987000052a20002c8c43ea91ca1eaa115663f5218a37fbb409dfbbedff54731ef41199b35535905ac2366a05a803146ced544abf45597f3714327d59f99e30c899c39fc5a4b67d12087bf8db2bc000003e90000001000000000000000000000000000000000000003ea000000100000000189dd4f210000001000000018")
+ require.NoError(t, err)
+
+ dc := NewDecoder()
+ p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet))
+ require.NoError(t, err)
+ actual, err := makeMetrics(p)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "137.221.79.1",
+ "dst_ip": "86.158.90.179",
+ "dst_mac": "08:b2:58:7a:57:62",
+ "dst_port": "58203",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "655",
+ "output_ifindex": "524",
+ "sample_direction": "egress",
+ "source_id_index": "524",
+ "source_id_type": "0",
+ "src_ip": "5.42.173.167",
+ "src_mac": "4c:16:fc:0b:61:a5",
+ "src_port": "26534",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x06c4d8),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x58),
+ "header_length": uint64(0x54),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x46),
+ "ip_ttl": uint64(0x3a),
+ "sampling_rate": uint64(0x13b1),
+ "udp_length": uint64(0x32),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "137.221.79.1",
+ "dst_ip": "24.105.57.150",
+ "dst_mac": "4c:16:fc:0b:62:02",
+ "dst_port": "3724",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "674",
+ "output_ifindex": "655",
+ "sample_direction": "ingress",
+ "source_id_index": "674",
+ "source_id_type": "0",
+ "src_ip": "87.81.133.167",
+ "src_mac": "c0:3e:0f:de:ca:fe",
+ "src_port": "61527",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x0513a2),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x42),
+ "header_length": uint64(0x3e),
+ "ip_flags": uint64(0x00),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x30),
+ "ip_ttl": uint64(0x7d),
+ "sampling_rate": uint64(0x13b1),
+ "udp_length": uint64(0x1c),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "137.221.79.1",
+ "dst_ip": "95.148.199.120",
+ "dst_mac": "02:31:46:6d:0b:2c",
+ "dst_port": "62029",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "655",
+ "output_ifindex": "524",
+ "sample_direction": "egress",
+ "source_id_index": "524",
+ "source_id_type": "0",
+ "src_ip": "5.42.174.31",
+ "src_mac": "4c:16:fc:0b:61:a5",
+ "src_port": "26510",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x206215),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x01a5),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x0193),
+ "ip_ttl": uint64(0x3a),
+ "sampling_rate": uint64(0x13b1),
+ "udp_length": uint64(0x017f),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "137.221.79.1",
+ "dst_ip": "2.31.243.101",
+ "dst_mac": "02:31:46:6d:0b:2c",
+ "dst_port": "59552",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "655",
+ "output_ifindex": "524",
+ "sample_direction": "egress",
+ "source_id_index": "524",
+ "source_id_type": "0",
+ "src_ip": "185.60.112.106",
+ "src_mac": "4c:16:fc:0b:61:a5",
+ "src_port": "1119",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x0dec25),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0xb5),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0xa3),
+ "ip_ttl": uint64(0x36),
+ "sampling_rate": uint64(0x13b1),
+ "tcp_header_length": uint64(0x14),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0xed),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "137.221.79.1",
+ "dst_ip": "2.28.148.14",
+ "dst_mac": "02:31:46:6d:0b:2c",
+ "dst_port": "57557",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "655",
+ "output_ifindex": "524",
+ "sample_direction": "egress",
+ "source_id_index": "524",
+ "source_id_type": "0",
+ "src_ip": "5.42.189.141",
+ "src_mac": "4c:16:fc:0b:61:a5",
+ "src_port": "26599",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x1e9d2e),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x018e),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x017c),
+ "ip_ttl": uint64(0x3a),
+ "sampling_rate": uint64(0x13b1),
+ "udp_length": uint64(0x0168),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "137.221.79.1",
+ "dst_ip": "24.105.29.76",
+ "dst_mac": "4c:16:fc:0b:62:01",
+ "dst_port": "443",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "673",
+ "output_ifindex": "655",
+ "sample_direction": "ingress",
+ "source_id_index": "673",
+ "source_id_type": "0",
+ "src_ip": "31.205.128.162",
+ "src_mac": "d8:b1:22:76:6a:2c",
+ "src_port": "62206",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x74c38e),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x05ee),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x05dc),
+ "ip_ttl": uint64(0x77),
+ "sampling_rate": uint64(0x13b1),
+ "tcp_header_length": uint64(0x14),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0xfabd),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
+}
+
+func TestIPv6SW(t *testing.T) {
+ packet, err := hex.DecodeString("00000005000000010ae0648100000002000093d824ac82340000000100000001000000d000019f94000001010000100019f94000000000000000010100000000000000020000000100000090000000010000058c00000008000000800008e3fffc10d4f4be04612486dd60000000054e113a2607f8b0400200140000000000000008262000edc000e804a25e30c581af36fa01bbfa6f054e249810b584bcbf12926c2e29a779c26c72db483e8191524fe2288bfdaceaf9d2e724d04305706efcfdef70db86873bbacf29698affe4e7d6faa21d302f9b4b023291a05a000003e90000001000000001000000000000000100000000")
+ require.NoError(t, err)
+
+ dc := NewDecoder()
+ p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet))
+ require.NoError(t, err)
+ actual, err := makeMetrics(p)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.224.100.129",
+ "dst_ip": "2620:ed:c000:e804:a25e:30c5:81af:36fa",
+ "dst_mac": "00:08:e3:ff:fc:10",
+ "dst_port": "64111",
+ "ether_type": "IPv6",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "257",
+ "output_ifindex": "0",
+ "sample_direction": "ingress",
+ "source_id_index": "257",
+ "source_id_type": "0",
+ "src_ip": "2607:f8b0:4002:14::8",
+ "src_mac": "d4:f4:be:04:61:24",
+ "src_port": "443",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x58c000),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x058c),
+ "header_length": uint64(0x80),
+ "sampling_rate": uint64(0x1000),
+ "payload_length": uint64(0x054e),
+ "udp_length": uint64(0x054e),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
+}
+
+func TestExpandFlowCounter(t *testing.T) {
+ packet, err := hex.DecodeString("00000005000000010a00015000000000000f58898ae0fa380000000700000004000000ec00006ece0000000000101784000000030000000200000034000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000058001017840000000600000002540be400000000010000000300007b8ebd37b97e61ff94860803e8e908ffb2b500000000000000000000000000018e7c31ee7ba4195f041874579ff021ba936300000000000000000000000100000007000000380011223344550003f8b15645e7e7d6960000002fe2fc02fc01edbf580000000000000000000000000000000001dcb9cf000000000000000000000004000000ec00006ece0000000000100184000000030000000200000034000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000058001001840000000600000002540be400000000010000000300000841131d1fd9f850bfb103617cb401e6598900000000000000000000000000000bec1902e5da9212e3e96d7996e922513250000000000000000000000001000000070000003800112233445500005c260acbddb3000100000003e2fc02fc01ee414f0000000000000000000000000000000001dccdd30000000000000000000000030000008400004606000000000010030400004000ad9dc19b0000000000000000001017840000000000100304000000010000000100000050000000010000004400000004000000400012815116c4001517cf426d8100200608004500002895da40008006d74bc0a8060ac0a8064f04ef04aab1797122cf7eaf4f5010ffff7727000000000000000000000003000000b0001bd698000000000010148400000400700b180f000000000000000000101504000000000010148400000001000000010000007c000000010000006f000000040000006b001b17000131f0f755b9afc081000439080045000059045340005206920c1f0d4703d94d52e201bbf14977d1e9f15498af36801800417f1100000101080afdf3c70400e043871503010020ff268cfe2e2fd5fffe1d3d704a91d57b895f174c4b4428c66679d80a307294303f00000003000000c40003ceca000000000010170400004000a166aa7a000000000000000000101784000000000010170400000001000000010000009000000001000005f200000004000000800024e8369e2bd4ae52aa0b54810020060800450005dc4c71400080061b45c0a8060cc0a806090050f855692a7a94a1154ae1801001046b6a00000101080a6869a48d151016d046a84a7aa1c6743fa05179f7ecbd4e567150cb6f2077ff89480ae730637d26d2237c08548806f672c7476eb1b5a447b42cb9ce405994d152fa3e000000030000008c001bd699000000000010148400000400700b180f0000000000000000001015040000000000101484000000010000000100000058000000010000004a0000000400000046001b17000131f0f755b9afc0810004390800450000340ce040003a06bea5c1ce8793d94d528f00504c3b08b18f275b83d5df8010054586ad00000101050a5b83d5de5b83d5df11d800000003000000c400004e07000000000010028400004000c7ec97f2000000000000000000100784000000000010028400000001000000010000009000000001000005f2000000040000008000005e0001ff005056800dd18100000a0800450005dc5a42400040066ef70a000ac8c0a8967201bbe17c81597908caf8a05f5010010328610000f172263da0ba5d6223c079b8238bc841256bf17c4ffb08ad11c4fbff6f87ae1624a6b057b8baa9342114e5f5b46179083020cb560c4e9eadcec6dfd83e102ddbc27024803eb5")
+ require.NoError(t, err)
+
+ dc := NewDecoder()
+ p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet))
+ require.NoError(t, err)
+ actual, err := makeMetrics(p)
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.0.1.80",
+ "dst_ip": "192.168.6.79",
+ "dst_mac": "00:12:81:51:16:c4",
+ "dst_port": "1194",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "1054596",
+ "output_ifindex": "1049348",
+ "sample_direction": "egress",
+ "source_id_index": "1049348",
+ "source_id_type": "0",
+ "src_ip": "192.168.6.10",
+ "src_mac": "00:15:17:cf:42:6d",
+ "src_port": "1263",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x110000),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x44),
+ "header_length": uint64(0x40),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x28),
+ "ip_ttl": uint64(0x80),
+ "sampling_rate": uint64(0x4000),
+ "tcp_header_length": uint64(0x14),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0xffff),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.0.1.80",
+ "dst_ip": "217.77.82.226",
+ "dst_mac": "00:1b:17:00:01:31",
+ "dst_port": "61769",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "1053956",
+ "output_ifindex": "1053828",
+ "sample_direction": "egress",
+ "source_id_index": "1053828",
+ "source_id_type": "0",
+ "src_ip": "31.13.71.3",
+ "src_mac": "f0:f7:55:b9:af:c0",
+ "src_port": "443",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x01bc00),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x6f),
+ "header_length": uint64(0x6b),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x59),
+ "ip_ttl": uint64(0x52),
+ "sampling_rate": uint64(0x0400),
+ "tcp_header_length": uint64(0x20),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0x41),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.0.1.80",
+ "dst_ip": "192.168.6.9",
+ "dst_mac": "00:24:e8:36:9e:2b",
+ "dst_port": "63573",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "1054596",
+ "output_ifindex": "1054468",
+ "sample_direction": "egress",
+ "source_id_index": "1054468",
+ "source_id_type": "0",
+ "src_ip": "192.168.6.12",
+ "src_mac": "d4:ae:52:aa:0b:54",
+ "src_port": "80",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x017c8000),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x05f2),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x05dc),
+ "ip_ttl": uint64(0x80),
+ "sampling_rate": uint64(0x4000),
+ "tcp_header_length": uint64(0x20),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0x0104),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.0.1.80",
+ "dst_ip": "217.77.82.143",
+ "dst_mac": "00:1b:17:00:01:31",
+ "dst_port": "19515",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "1053956",
+ "output_ifindex": "1053828",
+ "sample_direction": "egress",
+ "source_id_index": "1053828",
+ "source_id_type": "0",
+ "src_ip": "193.206.135.147",
+ "src_mac": "f0:f7:55:b9:af:c0",
+ "src_port": "80",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x012800),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x4a),
+ "header_length": uint64(0x46),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x34),
+ "ip_ttl": uint64(0x3a),
+ "sampling_rate": uint64(0x0400),
+ "tcp_header_length": uint64(0x20),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0x0545),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "10.0.1.80",
+ "dst_ip": "192.168.150.114",
+ "dst_mac": "00:00:5e:00:01:ff",
+ "dst_port": "57724",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "1050500",
+ "output_ifindex": "1049220",
+ "sample_direction": "egress",
+ "source_id_index": "1049220",
+ "source_id_type": "0",
+ "src_ip": "10.0.10.200",
+ "src_mac": "00:50:56:80:0d:d1",
+ "src_port": "443",
+ },
+ map[string]interface{}{
+ "bytes": uint64(0x017c8000),
+ "drops": uint64(0x00),
+ "frame_length": uint64(0x05f2),
+ "header_length": uint64(0x80),
+ "ip_flags": uint64(0x02),
+ "ip_fragment_offset": uint64(0x00),
+ "ip_total_length": uint64(0x05dc),
+ "ip_ttl": uint64(0x40),
+ "sampling_rate": uint64(0x4000),
+ "tcp_header_length": uint64(0x14),
+ "tcp_urgent_pointer": uint64(0x00),
+ "tcp_window_size": uint64(0x0103),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
+}
+
+func TestFlowExpandCounter(t *testing.T) {
+ packet, err := hex.DecodeString("00000005000000010a000150000000000006d14d8ae0fe200000000200000004000000ac00006d15000000004b00ca000000000200000002000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000584b00ca0000000001000000000000000000000001000000010000308ae33bb950eb92a8a3004d0bb406899571000000000000000000000000000012f7ed9c9db8c24ed90604eaf0bd04636edb00000000000000000000000100000004000000ac00006d15000000004b0054000000000200000002000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000584b00540000000001000000003b9aca000000000100000003000067ba8e64fd23fa65f26d0215ec4a0021086600000000000000000000000000002002c3b21045c2378ad3001fb2f300061872000000000000000000000001")
+ require.NoError(t, err)
+
+ dc := NewDecoder()
+ p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet))
+ require.NoError(t, err)
+ actual, err := makeMetrics(p)
+ require.NoError(t, err)
+
+ // we don't do anything with samples yet
+ expected := []telegraf.Metric{}
+ testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
+}
diff --git a/plugins/inputs/sflow/metricencoder.go b/plugins/inputs/sflow/metricencoder.go
new file mode 100644
index 0000000000000..ffc9d8e023849
--- /dev/null
+++ b/plugins/inputs/sflow/metricencoder.go
@@ -0,0 +1,46 @@
+package sflow
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+)
+
+func makeMetrics(p *V5Format) ([]telegraf.Metric, error) {
+ now := time.Now()
+ metrics := []telegraf.Metric{}
+ tags := map[string]string{
+ "agent_address": p.AgentAddress.String(),
+ }
+ fields := map[string]interface{}{}
+ for _, sample := range p.Samples {
+ tags["input_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.InputIfIndex), 10)
+ tags["output_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.OutputIfIndex), 10)
+ tags["sample_direction"] = sample.SampleData.SampleDirection
+ tags["source_id_index"] = strconv.FormatUint(uint64(sample.SampleData.SourceIDIndex), 10)
+ tags["source_id_type"] = strconv.FormatUint(uint64(sample.SampleData.SourceIDType), 10)
+ fields["drops"] = sample.SampleData.Drops
+ fields["sampling_rate"] = sample.SampleData.SamplingRate
+
+ for _, flowRecord := range sample.SampleData.FlowRecords {
+ if flowRecord.FlowData != nil {
+ tags2 := flowRecord.FlowData.GetTags()
+ fields2 := flowRecord.FlowData.GetFields()
+ for k, v := range tags {
+ tags2[k] = v
+ }
+ for k, v := range fields {
+ fields2[k] = v
+ }
+ m, err := metric.New("sflow", tags2, fields2, now)
+ if err != nil {
+ return nil, err
+ }
+ metrics = append(metrics, m)
+ }
+ }
+ }
+ return metrics, nil
+}
diff --git a/plugins/inputs/sflow/packetdecoder.go b/plugins/inputs/sflow/packetdecoder.go
new file mode 100644
index 0000000000000..9e6b2a4fec40e
--- /dev/null
+++ b/plugins/inputs/sflow/packetdecoder.go
@@ -0,0 +1,483 @@
+package sflow
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs/sflow/binaryio"
+ "github.com/pkg/errors"
+)
+
+type PacketDecoder struct {
+ onPacket func(p *V5Format)
+ Log telegraf.Logger
+}
+
+func NewDecoder() *PacketDecoder {
+ return &PacketDecoder{}
+}
+
+func (d *PacketDecoder) debug(args ...interface{}) {
+ if d.Log != nil {
+ d.Log.Debug(args...)
+ }
+}
+
+func (d *PacketDecoder) OnPacket(f func(p *V5Format)) {
+ d.onPacket = f
+}
+
+func (d *PacketDecoder) Decode(r io.Reader) error {
+ var err error
+ var packet *V5Format
+ for err == nil {
+ packet, err = d.DecodeOnePacket(r)
+ if err != nil {
+ break
+ }
+ d.onPacket(packet)
+ }
+ if err != nil && errors.Cause(err) == io.EOF {
+ return nil
+ }
+ return err
+}
+
+type AddressType uint32 // must be uint32
+
+const (
+ AddressTypeUnknown AddressType = 0
+ AddressTypeIPV4 AddressType = 1
+ AddressTypeIPV6 AddressType = 2
+)
+
+func (d *PacketDecoder) DecodeOnePacket(r io.Reader) (*V5Format, error) {
+ p := &V5Format{}
+ err := read(r, &p.Version, "version")
+ if err != nil {
+ return nil, err
+ }
+ if p.Version != 5 {
+ return nil, fmt.Errorf("Version %d not supported, only version 5", p.Version)
+ }
+ var addressIPType AddressType
+ if err = read(r, &addressIPType, "address ip type"); err != nil {
+ return nil, err
+ }
+ switch addressIPType {
+ case AddressTypeUnknown:
+ p.AgentAddress.IP = make([]byte, 0)
+ case AddressTypeIPV4:
+ p.AgentAddress.IP = make([]byte, 4)
+ case AddressTypeIPV6:
+ p.AgentAddress.IP = make([]byte, 16)
+ default:
+ return nil, fmt.Errorf("Unknown address IP type %d", addressIPType)
+ }
+ if err = read(r, &p.AgentAddress.IP, "Agent Address IP"); err != nil {
+ return nil, err
+ }
+ if err = read(r, &p.SubAgentID, "SubAgentID"); err != nil {
+ return nil, err
+ }
+ if err = read(r, &p.SequenceNumber, "SequenceNumber"); err != nil {
+ return nil, err
+ }
+ if err = read(r, &p.Uptime, "Uptime"); err != nil {
+ return nil, err
+ }
+
+ p.Samples, err = d.decodeSamples(r)
+ return p, err
+}
+
+func (d *PacketDecoder) decodeSamples(r io.Reader) ([]Sample, error) {
+ result := []Sample{}
+ // # of samples
+ var numOfSamples uint32
+ if err := read(r, &numOfSamples, "sample count"); err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < int(numOfSamples); i++ {
+ sam, err := d.decodeSample(r)
+ if err != nil {
+ return result, err
+ }
+ result = append(result, sam)
+ }
+
+ return result, nil
+}
+
+func (d *PacketDecoder) decodeSample(r io.Reader) (Sample, error) {
+ var err error
+ sam := Sample{}
+ if err := read(r, &sam.SampleType, "SampleType"); err != nil {
+ return sam, err
+ }
+ sampleDataLen := uint32(0)
+ if err := read(r, &sampleDataLen, "Sample data length"); err != nil {
+ return sam, err
+ }
+ mr := binaryio.MinReader(r, int64(sampleDataLen))
+ defer mr.Close()
+
+ switch sam.SampleType {
+ case SampleTypeFlowSample:
+ sam.SampleData, err = d.decodeFlowSample(mr)
+ case SampleTypeFlowSampleExpanded:
+ sam.SampleData, err = d.decodeFlowSampleExpanded(mr)
+ default:
+ d.debug("Unknown sample type: ", sam.SampleType)
+ }
+ return sam, err
+}
+
+type InterfaceFormatType uint8 // sflow_version_5.txt line 1497
+const (
+ InterfaceFormatTypeSingleInterface InterfaceFormatType = 0
+ InterfaceFormatTypePacketDiscarded InterfaceFormatType = 1
+)
+
+func (d *PacketDecoder) decodeFlowSample(r io.Reader) (t SampleDataFlowSampleExpanded, err error) {
+ if err := read(r, &t.SequenceNumber, "SequenceNumber"); err != nil {
+ return t, err
+ }
+ var sourceID uint32
+ if err := read(r, &sourceID, "SourceID"); err != nil { // source_id sflow_version_5.txt line: 1622
+ return t, err
+ }
+ // split source id to source id type and source id index
+ t.SourceIDIndex = sourceID & 0x00ffffff // sflow_version_5.txt line: 1468
+ t.SourceIDType = sourceID >> 24 // source_id_type sflow_version_5.txt Line 1465
+ if err := read(r, &t.SamplingRate, "SamplingRate"); err != nil {
+ return t, err
+ }
+ if err := read(r, &t.SamplePool, "SamplePool"); err != nil {
+ return t, err
+ }
+ if err := read(r, &t.Drops, "Drops"); err != nil { // sflow_version_5.txt line 1636
+ return t, err
+ }
+ if err := read(r, &t.InputIfIndex, "InputIfIndex"); err != nil {
+ return t, err
+ }
+ t.InputIfFormat = t.InputIfIndex >> 30
+ t.InputIfIndex = t.InputIfIndex & 0x3FFFFFFF
+
+ if err := read(r, &t.OutputIfIndex, "OutputIfIndex"); err != nil {
+ return t, err
+ }
+ t.OutputIfFormat = t.OutputIfIndex >> 30
+ t.OutputIfIndex = t.OutputIfIndex & 0x3FFFFFFF
+
+ switch t.SourceIDIndex {
+ case t.OutputIfIndex:
+ t.SampleDirection = "egress"
+ case t.InputIfIndex:
+ t.SampleDirection = "ingress"
+ }
+
+ t.FlowRecords, err = d.decodeFlowRecords(r, t.SamplingRate)
+ return t, err
+}
+
+func (d *PacketDecoder) decodeFlowSampleExpanded(r io.Reader) (t SampleDataFlowSampleExpanded, err error) {
+ if err := read(r, &t.SequenceNumber, "SequenceNumber"); err != nil { // sflow_version_5.txt line 1701
+ return t, err
+ }
+ if err := read(r, &t.SourceIDType, "SourceIDType"); err != nil { // sflow_version_5.txt line: 1706 + 16878
+ return t, err
+ }
+ if err := read(r, &t.SourceIDIndex, "SourceIDIndex"); err != nil { // sflow_version_5.txt line: 1689
+ return t, err
+ }
+ if err := read(r, &t.SamplingRate, "SamplingRate"); err != nil { // sflow_version_5.txt line: 1707
+ return t, err
+ }
+ if err := read(r, &t.SamplePool, "SamplePool"); err != nil { // sflow_version_5.txt line: 1708
+ return t, err
+ }
+ if err := read(r, &t.Drops, "Drops"); err != nil { // sflow_version_5.txt line: 1712
+ return t, err
+ }
+ if err := read(r, &t.InputIfFormat, "InputIfFormat"); err != nil { // sflow_version_5.txt line: 1727
+ return t, err
+ }
+ if err := read(r, &t.InputIfIndex, "InputIfIndex"); err != nil {
+ return t, err
+ }
+ if err := read(r, &t.OutputIfFormat, "OutputIfFormat"); err != nil { // sflow_version_5.txt line: 1728
+ return t, err
+ }
+ if err := read(r, &t.OutputIfIndex, "OutputIfIndex"); err != nil {
+ return t, err
+ }
+
+ switch t.SourceIDIndex {
+ case t.OutputIfIndex:
+ t.SampleDirection = "egress"
+ case t.InputIfIndex:
+ t.SampleDirection = "ingress"
+ }
+
+ t.FlowRecords, err = d.decodeFlowRecords(r, t.SamplingRate)
+ return t, err
+}
+
+func (d *PacketDecoder) decodeFlowRecords(r io.Reader, samplingRate uint32) (recs []FlowRecord, err error) {
+ var flowDataLen uint32
+ var count uint32
+ if err := read(r, &count, "FlowRecord count"); err != nil {
+ return recs, err
+ }
+ for i := uint32(0); i < count; i++ {
+ fr := FlowRecord{}
+ if err := read(r, &fr.FlowFormat, "FlowFormat"); err != nil { // sflow_version_5.txt line 1597
+ return recs, err
+ }
+ if err := read(r, &flowDataLen, "Flow data length"); err != nil {
+ return recs, err
+ }
+
+ mr := binaryio.MinReader(r, int64(flowDataLen))
+
+ switch fr.FlowFormat {
+ case FlowFormatTypeRawPacketHeader: // sflow_version_5.txt line 1938
+ fr.FlowData, err = d.decodeRawPacketHeaderFlowData(mr, samplingRate)
+ default:
+ d.debug("Unknown flow format: ", fr.FlowFormat)
+ }
+ if err != nil {
+ mr.Close()
+ return recs, err
+ }
+
+ recs = append(recs, fr)
+ mr.Close()
+ }
+
+ return recs, err
+}
+
+func (d *PacketDecoder) decodeRawPacketHeaderFlowData(r io.Reader, samplingRate uint32) (h RawPacketHeaderFlowData, err error) {
+ if err := read(r, &h.HeaderProtocol, "HeaderProtocol"); err != nil { // sflow_version_5.txt line 1940
+ return h, err
+ }
+ if err := read(r, &h.FrameLength, "FrameLength"); err != nil { // sflow_version_5.txt line 1942
+ return h, err
+ }
+ h.Bytes = h.FrameLength * samplingRate
+
+ if err := read(r, &h.StrippedOctets, "StrippedOctets"); err != nil { // sflow_version_5.txt line 1967
+ return h, err
+ }
+ if err := read(r, &h.HeaderLength, "HeaderLength"); err != nil {
+ return h, err
+ }
+
+ mr := binaryio.MinReader(r, int64(h.HeaderLength))
+ defer mr.Close()
+
+ switch h.HeaderProtocol {
+ case HeaderProtocolTypeEthernetISO88023:
+ h.Header, err = d.decodeEthHeader(mr)
+ default:
+ d.debug("Unknown header protocol type: ", h.HeaderProtocol)
+ }
+
+ return h, err
+}
+
+// ethHeader answers a decode Directive that will decode an ethernet frame header
+// according to https://en.wikipedia.org/wiki/Ethernet_frame
+func (d *PacketDecoder) decodeEthHeader(r io.Reader) (h EthHeader, err error) {
+ // we may have to read out StrippedOctets bytes and throw them away first?
+ if err := read(r, &h.DestinationMAC, "DestinationMAC"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.SourceMAC, "SourceMAC"); err != nil {
+ return h, err
+ }
+ var tagOrEType uint16
+ if err := read(r, &tagOrEType, "tagOrEtype"); err != nil {
+ return h, err
+ }
+ switch tagOrEType {
+ case 0x8100: // could be?
+ var discard uint16
+ if err := read(r, &discard, "unknown"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.EtherTypeCode, "EtherTypeCode"); err != nil {
+ return h, err
+ }
+ default:
+ h.EtherTypeCode = tagOrEType
+ }
+ h.EtherType = ETypeMap[h.EtherTypeCode]
+ switch h.EtherType {
+ case "IPv4":
+ h.IPHeader, err = d.decodeIPv4Header(r)
+ case "IPv6":
+ h.IPHeader, err = d.decodeIPv6Header(r)
+ default:
+ }
+ if err != nil {
+ return h, err
+ }
+ return h, err
+}
+
+// https://en.wikipedia.org/wiki/IPv4#Header
+func (d *PacketDecoder) decodeIPv4Header(r io.Reader) (h IPV4Header, err error) {
+ if err := read(r, &h.Version, "Version"); err != nil {
+ return h, err
+ }
+ h.InternetHeaderLength = h.Version & 0x0F
+ h.Version = h.Version & 0xF0
+ if err := read(r, &h.DSCP, "DSCP"); err != nil {
+ return h, err
+ }
+ h.ECN = h.DSCP & 0x03
+ h.DSCP = h.DSCP >> 2
+ if err := read(r, &h.TotalLength, "TotalLength"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.Identification, "Identification"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.FragmentOffset, "FragmentOffset"); err != nil {
+ return h, err
+ }
+ h.Flags = uint8(h.FragmentOffset >> 13)
+ h.FragmentOffset = h.FragmentOffset & 0x1FFF
+ if err := read(r, &h.TTL, "TTL"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.Protocol, "Protocol"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.HeaderChecksum, "HeaderChecksum"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.SourceIP, "SourceIP"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.DestIP, "DestIP"); err != nil {
+ return h, err
+ }
+ switch h.Protocol {
+ case IPProtocolTCP:
+ h.ProtocolHeader, err = d.decodeTCPHeader(r)
+ case IPProtocolUDP:
+ h.ProtocolHeader, err = d.decodeUDPHeader(r)
+ default:
+ d.debug("Unknown IP protocol: ", h.Protocol)
+ }
+ return h, err
+}
+
+// https://en.wikipedia.org/wiki/IPv6_packet
+func (d *PacketDecoder) decodeIPv6Header(r io.Reader) (h IPV6Header, err error) {
+ var fourByteBlock uint32
+ if err := read(r, &fourByteBlock, "IPv6 header octet 0"); err != nil {
+ return h, err
+ }
+ version := fourByteBlock >> 28
+ if version != 0x6 {
+ return h, fmt.Errorf("Unexpected IPv6 header version 0x%x", version)
+ }
+ h.DSCP = uint8((fourByteBlock & 0xFC00000) >> 22)
+ h.ECN = uint8((fourByteBlock & 0x300000) >> 20)
+
+ // flowLabel := fourByteBlock & 0xFFFFF // not currently being used.
+ if err := read(r, &h.PayloadLength, "PayloadLength"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.NextHeaderProto, "NextHeaderProto"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.HopLimit, "HopLimit"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.SourceIP, "SourceIP"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.DestIP, "DestIP"); err != nil {
+ return h, err
+ }
+ switch h.NextHeaderProto {
+ case IPProtocolTCP:
+ h.ProtocolHeader, err = d.decodeTCPHeader(r)
+ case IPProtocolUDP:
+ h.ProtocolHeader, err = d.decodeUDPHeader(r)
+ default:
+ // not handled
+ d.debug("Unknown IP protocol: ", h.NextHeaderProto)
+ }
+ return h, err
+}
+
+// https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
+func (d *PacketDecoder) decodeTCPHeader(r io.Reader) (h TCPHeader, err error) {
+ if err := read(r, &h.SourcePort, "SourcePort"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.DestinationPort, "DestinationPort"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.Sequence, "Sequence"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.AckNumber, "AckNumber"); err != nil {
+ return h, err
+ }
+ // Next up: bit reading!
+ // data offset 4 bits
+ // reserved 3 bits
+ // flags 9 bits
+ var dataOffsetAndReservedAndFlags uint16
+ if err := read(r, &dataOffsetAndReservedAndFlags, "TCP Header Octet offset 12"); err != nil {
+ return h, err
+ }
+ h.TCPHeaderLength = uint8((dataOffsetAndReservedAndFlags >> 12) * 4)
+ h.Flags = dataOffsetAndReservedAndFlags & 0x1FF
+ // done bit reading
+
+ if err := read(r, &h.TCPWindowSize, "TCPWindowSize"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.Checksum, "Checksum"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.TCPUrgentPointer, "TCPUrgentPointer"); err != nil {
+ return h, err
+ }
+
+ return h, err
+}
+
+func (d *PacketDecoder) decodeUDPHeader(r io.Reader) (h UDPHeader, err error) {
+ if err := read(r, &h.SourcePort, "SourcePort"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.DestinationPort, "DestinationPort"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.UDPLength, "UDPLength"); err != nil {
+ return h, err
+ }
+ if err := read(r, &h.Checksum, "Checksum"); err != nil {
+ return h, err
+ }
+ return h, err
+}
+
+func read(r io.Reader, data interface{}, name string) error {
+ err := binary.Read(r, binary.BigEndian, data)
+ return errors.Wrapf(err, "failed to read %s", name)
+}
diff --git a/plugins/inputs/sflow/packetdecoder_test.go b/plugins/inputs/sflow/packetdecoder_test.go
new file mode 100644
index 0000000000000..f078eaf310e8b
--- /dev/null
+++ b/plugins/inputs/sflow/packetdecoder_test.go
@@ -0,0 +1,207 @@
+package sflow
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestUDPHeader(t *testing.T) {
+ octets := bytes.NewBuffer([]byte{
+ 0x00, 0x01, // src_port
+ 0x00, 0x02, // dst_port
+ 0x00, 0x03, // udp_length
+ 0x00, 0x00, // checksum
+ })
+
+ dc := NewDecoder()
+ actual, err := dc.decodeUDPHeader(octets)
+ require.NoError(t, err)
+
+ expected := UDPHeader{
+ SourcePort: 1,
+ DestinationPort: 2,
+ UDPLength: 3,
+ }
+
+ require.Equal(t, expected, actual)
+}
+
+func BenchmarkUDPHeader(b *testing.B) {
+ octets := bytes.NewBuffer([]byte{
+ 0x00, 0x01, // src_port
+ 0x00, 0x02, // dst_port
+ 0x00, 0x03, // udp_length
+ 0x00, 0x00, // checksum
+ })
+
+ dc := NewDecoder()
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ dc.decodeUDPHeader(octets)
+ }
+}
+
+func TestIPv4Header(t *testing.T) {
+ octets := bytes.NewBuffer(
+ []byte{
+ 0x45, // version + IHL
+ 0x00, // ip_dscp + ip_ecn
+ 0x00, 0x00, // total length
+ 0x00, 0x00, // identification
+ 0x00, 0x00, // flags + frag offset
+ 0x00, // ttl
+ 0x11, // protocol; 0x11 = udp
+ 0x00, 0x00, // header checksum
+ 0x7f, 0x00, 0x00, 0x01, // src ip
+ 0x7f, 0x00, 0x00, 0x02, // dst ip
+ 0x00, 0x01, // src_port
+ 0x00, 0x02, // dst_port
+ 0x00, 0x03, // udp_length
+ 0x00, 0x00, // checksum
+ },
+ )
+ dc := NewDecoder()
+ actual, err := dc.decodeIPv4Header(octets)
+ require.NoError(t, err)
+
+ expected := IPV4Header{
+ Version: 0x40,
+ InternetHeaderLength: 0x05,
+ DSCP: 0,
+ ECN: 0,
+ TotalLength: 0,
+ Identification: 0,
+ Flags: 0,
+ FragmentOffset: 0,
+ TTL: 0,
+ Protocol: 0x11,
+ HeaderChecksum: 0,
+ SourceIP: [4]byte{127, 0, 0, 1},
+ DestIP: [4]byte{127, 0, 0, 2},
+ ProtocolHeader: UDPHeader{
+ SourcePort: 1,
+ DestinationPort: 2,
+ UDPLength: 3,
+ Checksum: 0,
+ },
+ }
+
+ require.Equal(t, expected, actual)
+}
+
+// Using the same Directive instance, prior paths through the parse tree should
+// not affect the latest parse.
+func TestIPv4HeaderSwitch(t *testing.T) {
+ octets := bytes.NewBuffer(
+ []byte{
+ 0x45, // version + IHL
+ 0x00, // ip_dscp + ip_ecn
+ 0x00, 0x00, // total length
+ 0x00, 0x00, // identification
+ 0x00, 0x00, // flags + frag offset
+ 0x00, // ttl
+ 0x11, // protocol; 0x11 = udp
+ 0x00, 0x00, // header checksum
+ 0x7f, 0x00, 0x00, 0x01, // src ip
+ 0x7f, 0x00, 0x00, 0x02, // dst ip
+ 0x00, 0x01, // src_port
+ 0x00, 0x02, // dst_port
+ 0x00, 0x03, // udp_length
+ 0x00, 0x00, // checksum
+ },
+ )
+ dc := NewDecoder()
+ _, err := dc.decodeIPv4Header(octets)
+ require.NoError(t, err)
+
+ octets = bytes.NewBuffer(
+ []byte{
+ 0x45, // version + IHL
+ 0x00, // ip_dscp + ip_ecn
+ 0x00, 0x00, // total length
+ 0x00, 0x00, // identification
+ 0x00, 0x00, // flags + frag offset
+ 0x00, // ttl
+ 0x06, // protocol; 0x06 = tcp
+ 0x00, 0x00, // header checksum
+ 0x7f, 0x00, 0x00, 0x01, // src ip
+ 0x7f, 0x00, 0x00, 0x02, // dst ip
+ 0x00, 0x01, // src_port
+ 0x00, 0x02, // dst_port
+ 0x00, 0x00, 0x00, 0x00, // sequence
+ 0x00, 0x00, 0x00, 0x00, // ack_number
+ 0x00, 0x00, // tcp_header_length
+ 0x00, 0x00, // tcp_window_size
+ 0x00, 0x00, // checksum
+ 0x00, 0x00, // tcp_urgent_pointer
+ },
+ )
+ dc = NewDecoder()
+ actual, err := dc.decodeIPv4Header(octets)
+ require.NoError(t, err)
+
+ expected := IPV4Header{
+ Version: 64,
+ InternetHeaderLength: 5,
+ Protocol: 6,
+ SourceIP: [4]byte{127, 0, 0, 1},
+ DestIP: [4]byte{127, 0, 0, 2},
+ ProtocolHeader: TCPHeader{
+ SourcePort: 1,
+ DestinationPort: 2,
+ },
+ }
+
+ require.Equal(t, expected, actual)
+}
+
+func TestUnknownProtocol(t *testing.T) {
+ octets := bytes.NewBuffer(
+ []byte{
+ 0x45, // version + IHL
+ 0x00, // ip_dscp + ip_ecn
+ 0x00, 0x00, // total length
+ 0x00, 0x00, // identification
+ 0x00, 0x00, // flags + frag offset
+ 0x00, // ttl
+ 0x99, // protocol
+ 0x00, 0x00, // header checksum
+ 0x7f, 0x00, 0x00, 0x01, // src ip
+ 0x7f, 0x00, 0x00, 0x02, // dst ip
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ },
+ )
+ dc := NewDecoder()
+ actual, err := dc.decodeIPv4Header(octets)
+ require.NoError(t, err)
+
+ expected := IPV4Header{
+ Version: 64,
+ InternetHeaderLength: 5,
+ Protocol: 153,
+ SourceIP: [4]byte{127, 0, 0, 1},
+ DestIP: [4]byte{127, 0, 0, 2},
+ }
+
+ require.Equal(t, expected, actual)
+}
diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go
new file mode 100644
index 0000000000000..2e3fbc0cf73f5
--- /dev/null
+++ b/plugins/inputs/sflow/sflow.go
@@ -0,0 +1,158 @@
+package sflow
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net"
+ "net/url"
+ "strings"
+ "sync"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+const sampleConfig = `
+ ## Address to listen for sFlow packets.
+ ## example: service_address = "udp://:6343"
+ ## service_address = "udp4://:6343"
+ ## service_address = "udp6://:6343"
+ service_address = "udp://:6343"
+
+ ## Set the size of the operating system's receive buffer.
+ ## example: read_buffer_size = "64KiB"
+ # read_buffer_size = ""
+`
+
+const (
+ maxPacketSize = 64 * 1024
+)
+
+type SFlow struct {
+ ServiceAddress string `toml:"service_address"`
+ ReadBufferSize internal.Size `toml:"read_buffer_size"`
+
+ Log telegraf.Logger `toml:"-"`
+
+ addr net.Addr
+ decoder *PacketDecoder
+ closer io.Closer
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+}
+
+// Description answers a description of this input plugin
+func (s *SFlow) Description() string {
+ return "SFlow V5 Protocol Listener"
+}
+
+// SampleConfig answers a sample configuration
+func (s *SFlow) SampleConfig() string {
+ return sampleConfig
+}
+
+func (s *SFlow) Init() error {
+ s.decoder = NewDecoder()
+ s.decoder.Log = s.Log
+ return nil
+}
+
+// Start starts this sFlow listener listening on the configured network for sFlow packets
+func (s *SFlow) Start(acc telegraf.Accumulator) error {
+ s.decoder.OnPacket(func(p *V5Format) {
+ metrics, err := makeMetrics(p)
+ if err != nil {
+ s.Log.Errorf("Failed to make metric from packet: %s", err)
+ return
+ }
+ for _, m := range metrics {
+ acc.AddMetric(m)
+ }
+ })
+
+ u, err := url.Parse(s.ServiceAddress)
+ if err != nil {
+ return err
+ }
+
+ conn, err := listenUDP(u.Scheme, u.Host)
+ if err != nil {
+ return err
+ }
+ s.closer = conn
+ s.addr = conn.LocalAddr()
+
+ if s.ReadBufferSize.Size > 0 {
+ conn.SetReadBuffer(int(s.ReadBufferSize.Size))
+ }
+
+ s.Log.Infof("Listening on %s://%s", s.addr.Network(), s.addr.String())
+
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ s.read(acc, conn)
+ }()
+
+ return nil
+}
+
+// Gather is a NOOP for sFlow as it receives, asynchronously, sFlow network packets
+func (s *SFlow) Gather(_ telegraf.Accumulator) error {
+ return nil
+}
+
+func (s *SFlow) Stop() {
+ if s.closer != nil {
+ s.closer.Close()
+ }
+ s.wg.Wait()
+}
+
+func (s *SFlow) Address() net.Addr {
+ return s.addr
+}
+
+func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) {
+ buf := make([]byte, maxPacketSize)
+ for {
+ n, _, err := conn.ReadFrom(buf)
+ if err != nil {
+ if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
+ acc.AddError(err)
+ }
+ break
+ }
+ s.process(acc, buf[:n])
+ }
+}
+
+func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) {
+
+ if err := s.decoder.Decode(bytes.NewBuffer(buf)); err != nil {
+ acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err))
+ }
+}
+
+func listenUDP(network string, address string) (*net.UDPConn, error) {
+ switch network {
+ case "udp", "udp4", "udp6":
+ addr, err := net.ResolveUDPAddr(network, address)
+ if err != nil {
+ return nil, err
+ }
+ return net.ListenUDP(network, addr)
+ default:
+ return nil, fmt.Errorf("unsupported network type: %s", network)
+ }
+}
+
+// init registers this SFlow input plug in with the Telegraf framework
+func init() {
+ inputs.Add("sflow", func() telegraf.Input {
+ return &SFlow{}
+ })
+}
diff --git a/plugins/inputs/sflow/sflow_test.go b/plugins/inputs/sflow/sflow_test.go
new file mode 100644
index 0000000000000..2df56c2ae97cd
--- /dev/null
+++ b/plugins/inputs/sflow/sflow_test.go
@@ -0,0 +1,135 @@
+package sflow
+
+import (
+ "encoding/hex"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSFlow(t *testing.T) {
+ sflow := &SFlow{
+ ServiceAddress: "udp://127.0.0.1:0",
+ Log: testutil.Logger{},
+ }
+ err := sflow.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ err = sflow.Start(&acc)
+ require.NoError(t, err)
+ defer sflow.Stop()
+
+ client, err := net.Dial(sflow.Address().Network(), sflow.Address().String())
+ require.NoError(t, err)
+
+ packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000")
+ require.NoError(t, err)
+ client.Write(packetBytes)
+
+ acc.Wait(2)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "192.168.1.2",
+ "dst_ip": "192.168.9.10",
+ "dst_mac": "00:0c:29:36:d3:d6",
+ "dst_port": "47621",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "510",
+ "output_ifindex": "512",
+ "sample_direction": "ingress",
+ "source_id_index": "510",
+ "source_id_type": "0",
+ "src_ip": "192.168.9.19",
+ "src_mac": "94:c6:91:aa:97:60",
+ "src_port": "161",
+ },
+ map[string]interface{}{
+ "bytes": uint64(273408),
+ "drops": uint64(0),
+ "frame_length": uint64(267),
+ "header_length": uint64(128),
+ "ip_flags": uint64(2),
+ "ip_fragment_offset": uint64(0),
+ "ip_total_length": uint64(249),
+ "ip_ttl": uint64(64),
+ "sampling_rate": uint64(1024),
+ "udp_length": uint64(229),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "sflow",
+ map[string]string{
+ "agent_address": "192.168.1.2",
+ "dst_ip": "192.168.9.10",
+ "dst_mac": "00:0c:29:36:d3:d6",
+ "dst_port": "514",
+ "ether_type": "IPv4",
+ "header_protocol": "ETHERNET-ISO88023",
+ "input_ifindex": "528",
+ "output_ifindex": "512",
+ "sample_direction": "ingress",
+ "source_id_index": "528",
+ "source_id_type": "0",
+ "src_ip": "192.168.8.21",
+ "src_mac": "fc:ec:da:44:00:8f",
+ "src_port": "39529",
+ },
+ map[string]interface{}{
+ "bytes": uint64(2473984),
+ "drops": uint64(0),
+ "frame_length": uint64(151),
+ "header_length": uint64(128),
+ "ip_flags": uint64(2),
+ "ip_fragment_offset": uint64(0),
+ "ip_total_length": uint64(129),
+ "ip_ttl": uint64(63),
+ "sampling_rate": uint64(16384),
+ "udp_length": uint64(109),
+ "ip_dscp": "0",
+ "ip_ecn": "0",
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime())
+}
+
+func BenchmarkSFlow(b *testing.B) {
+ sflow := &SFlow{
+ ServiceAddress: "udp://127.0.0.1:0",
+ Log: testutil.Logger{},
+ }
+ err := sflow.Init()
+ require.NoError(b, err)
+
+ var acc testutil.Accumulator
+ err = sflow.Start(&acc)
+ require.NoError(b, err)
+ defer sflow.Stop()
+
+ client, err := net.Dial(sflow.Address().Network(), sflow.Address().String())
+ require.NoError(b, err)
+
+ packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000")
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ client.Write(packetBytes)
+ acc.Wait(2)
+ }
+}
diff --git a/plugins/inputs/sflow/types.go b/plugins/inputs/sflow/types.go
new file mode 100644
index 0000000000000..a48857803b40d
--- /dev/null
+++ b/plugins/inputs/sflow/types.go
@@ -0,0 +1,285 @@
+package sflow
+
+import (
+ "net"
+ "strconv"
+)
+
+const (
+ AddressTypeIPv6 uint32 = 2 // sflow_version_5.txt line: 1384
+ AddressTypeIPv4 uint32 = 1 // sflow_version_5.txt line: 1383
+
+ IPProtocolTCP uint8 = 6
+ IPProtocolUDP uint8 = 17
+
+ metricName = "sflow"
+)
+
+var ETypeMap = map[uint16]string{
+ 0x0800: "IPv4",
+ 0x86DD: "IPv6",
+}
+
+var IPvMap = map[uint32]string{
+ 1: "IPV4", // sflow_version_5.txt line: 1383
+ 2: "IPV6", // sflow_version_5.txt line: 1384
+}
+
+type ContainsMetricData interface {
+ GetTags() map[string]string
+ GetFields() map[string]interface{}
+}
+
+// V5Format answers and decoder.Directive capable of decoding sFlow v5 packets in accordance
+// with SFlow v5 specification at https://sflow.org/sflow_version_5.txt
+type V5Format struct {
+ Version uint32
+ AgentAddress net.IPAddr
+ SubAgentID uint32
+ SequenceNumber uint32
+ Uptime uint32
+ Samples []Sample
+}
+
+type SampleType uint32
+
+const (
+ SampleTypeFlowSample SampleType = 1 // sflow_version_5.txt line: 1614
+ SampleTypeFlowSampleExpanded SampleType = 3 // sflow_version_5.txt line: 1698
+)
+
+type SampleData interface{}
+
+type Sample struct {
+ SampleType SampleType
+ SampleData SampleDataFlowSampleExpanded
+}
+
+type SampleDataFlowSampleExpanded struct {
+ SequenceNumber uint32
+ SourceIDType uint32
+ SourceIDIndex uint32
+ SamplingRate uint32
+ SamplePool uint32
+ Drops uint32
+ SampleDirection string // ingress/egress
+ InputIfFormat uint32
+ InputIfIndex uint32
+ OutputIfFormat uint32
+ OutputIfIndex uint32
+ FlowRecords []FlowRecord
+}
+
+type FlowFormatType uint32
+
+const (
+ FlowFormatTypeRawPacketHeader FlowFormatType = 1 // sflow_version_5.txt line: 1938
+)
+
+type FlowData ContainsMetricData
+
+type FlowRecord struct {
+ FlowFormat FlowFormatType
+ FlowData FlowData
+}
+
+type HeaderProtocolType uint32
+
+const (
+ HeaderProtocolTypeEthernetISO88023 HeaderProtocolType = 1
+ HeaderProtocolTypeISO88024TokenBus HeaderProtocolType = 2
+ HeaderProtocolTypeISO88025TokenRing HeaderProtocolType = 3
+ HeaderProtocolTypeFDDI HeaderProtocolType = 4
+ HeaderProtocolTypeFrameRelay HeaderProtocolType = 5
+ HeaderProtocolTypeX25 HeaderProtocolType = 6
+ HeaderProtocolTypePPP HeaderProtocolType = 7
+ HeaderProtocolTypeSMDS HeaderProtocolType = 8
+ HeaderProtocolTypeAAL5 HeaderProtocolType = 9
+ HeaderProtocolTypeAAL5IP HeaderProtocolType = 10 /* e.g. Cisco AAL5 mux */
+ HeaderProtocolTypeIPv4 HeaderProtocolType = 11
+ HeaderProtocolTypeIPv6 HeaderProtocolType = 12
+ HeaderProtocolTypeMPLS HeaderProtocolType = 13
+ HeaderProtocolTypePOS HeaderProtocolType = 14 /* RFC 1662, 2615 */
+)
+
+var HeaderProtocolMap = map[HeaderProtocolType]string{
+ HeaderProtocolTypeEthernetISO88023: "ETHERNET-ISO88023", // sflow_version_5.txt line: 1920
+}
+
+type Header ContainsMetricData
+
+type RawPacketHeaderFlowData struct {
+ HeaderProtocol HeaderProtocolType
+ FrameLength uint32
+ Bytes uint32
+ StrippedOctets uint32
+ HeaderLength uint32
+ Header Header
+}
+
+func (h RawPacketHeaderFlowData) GetTags() map[string]string {
+ t := h.Header.GetTags()
+ t["header_protocol"] = HeaderProtocolMap[h.HeaderProtocol]
+ return t
+}
+func (h RawPacketHeaderFlowData) GetFields() map[string]interface{} {
+ f := h.Header.GetFields()
+ f["bytes"] = h.Bytes
+ f["frame_length"] = h.FrameLength
+ f["header_length"] = h.HeaderLength
+ return f
+}
+
+type IPHeader ContainsMetricData
+
+type EthHeader struct {
+ DestinationMAC [6]byte
+ SourceMAC [6]byte
+ TagProtocolIdentifier uint16
+ TagControlInformation uint16
+ EtherTypeCode uint16
+ EtherType string
+ IPHeader IPHeader
+}
+
+func (h EthHeader) GetTags() map[string]string {
+ t := h.IPHeader.GetTags()
+ t["src_mac"] = net.HardwareAddr(h.SourceMAC[:]).String()
+ t["dst_mac"] = net.HardwareAddr(h.DestinationMAC[:]).String()
+ t["ether_type"] = h.EtherType
+ return t
+}
+func (h EthHeader) GetFields() map[string]interface{} {
+ return h.IPHeader.GetFields()
+}
+
+type ProtocolHeader ContainsMetricData
+
+// https://en.wikipedia.org/wiki/IPv4#Header
+type IPV4Header struct {
+ Version uint8 // 4 bit
+ InternetHeaderLength uint8 // 4 bit
+ DSCP uint8
+ ECN uint8
+ TotalLength uint16
+ Identification uint16
+ Flags uint8
+ FragmentOffset uint16
+ TTL uint8
+ Protocol uint8 // https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers
+ HeaderChecksum uint16
+ SourceIP [4]byte
+ DestIP [4]byte
+ ProtocolHeader ProtocolHeader
+}
+
+func (h IPV4Header) GetTags() map[string]string {
+ var t map[string]string
+ if h.ProtocolHeader != nil {
+ t = h.ProtocolHeader.GetTags()
+ } else {
+ t = map[string]string{}
+ }
+ t["src_ip"] = net.IP(h.SourceIP[:]).String()
+ t["dst_ip"] = net.IP(h.DestIP[:]).String()
+ return t
+}
+func (h IPV4Header) GetFields() map[string]interface{} {
+ var f map[string]interface{}
+ if h.ProtocolHeader != nil {
+ f = h.ProtocolHeader.GetFields()
+ } else {
+ f = map[string]interface{}{}
+ }
+ f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10)
+ f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10)
+ f["ip_flags"] = h.Flags
+ f["ip_fragment_offset"] = h.FragmentOffset
+ f["ip_total_length"] = h.TotalLength
+ f["ip_ttl"] = h.TTL
+ return f
+}
+
+// https://en.wikipedia.org/wiki/IPv6_packet
+type IPV6Header struct {
+ DSCP uint8
+ ECN uint8
+ PayloadLength uint16
+ NextHeaderProto uint8 // tcp/udp?
+ HopLimit uint8
+ SourceIP [16]byte
+ DestIP [16]byte
+ ProtocolHeader ProtocolHeader
+}
+
+func (h IPV6Header) GetTags() map[string]string {
+ var t map[string]string
+ if h.ProtocolHeader != nil {
+ t = h.ProtocolHeader.GetTags()
+ } else {
+ t = map[string]string{}
+ }
+ t["src_ip"] = net.IP(h.SourceIP[:]).String()
+ t["dst_ip"] = net.IP(h.DestIP[:]).String()
+ return t
+}
+func (h IPV6Header) GetFields() map[string]interface{} {
+ var f map[string]interface{}
+ if h.ProtocolHeader != nil {
+ f = h.ProtocolHeader.GetFields()
+ } else {
+ f = map[string]interface{}{}
+ }
+ f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10)
+ f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10)
+ f["payload_length"] = h.PayloadLength
+ return f
+}
+
+// https://en.wikipedia.org/wiki/Transmission_Control_Protocol
+type TCPHeader struct {
+ SourcePort uint16
+ DestinationPort uint16
+ Sequence uint32
+ AckNumber uint32
+ TCPHeaderLength uint8
+ Flags uint16
+ TCPWindowSize uint16
+ Checksum uint16
+ TCPUrgentPointer uint16
+}
+
+func (h TCPHeader) GetTags() map[string]string {
+ t := map[string]string{
+ "dst_port": strconv.FormatUint(uint64(h.DestinationPort), 10),
+ "src_port": strconv.FormatUint(uint64(h.SourcePort), 10),
+ }
+ return t
+}
+func (h TCPHeader) GetFields() map[string]interface{} {
+ return map[string]interface{}{
+ "tcp_header_length": h.TCPHeaderLength,
+ "tcp_urgent_pointer": h.TCPUrgentPointer,
+ "tcp_window_size": h.TCPWindowSize,
+ }
+}
+
+type UDPHeader struct {
+ SourcePort uint16
+ DestinationPort uint16
+ UDPLength uint16
+ Checksum uint16
+}
+
+func (h UDPHeader) GetTags() map[string]string {
+ t := map[string]string{
+ "dst_port": strconv.FormatUint(uint64(h.DestinationPort), 10),
+ "src_port": strconv.FormatUint(uint64(h.SourcePort), 10),
+ }
+ return t
+}
+func (h UDPHeader) GetFields() map[string]interface{} {
+ return map[string]interface{}{
+ "udp_length": h.UDPLength,
+ }
+}
diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md
index c60e11e35b6f5..47320aeac2ebf 100644
--- a/plugins/inputs/smart/README.md
+++ b/plugins/inputs/smart/README.md
@@ -3,6 +3,8 @@
Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs)[1] that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures.
See smartmontools (https://www.smartmontools.org/).
+SMART information is separated between different measurements: `smart_device` is used for general information, while `smart_attribute` stores the detailed attribute information if `attributes = true` is enabled in the plugin configuration.
+
If no devices are specified, the plugin will scan for SMART devices via the following command:
```
@@ -24,44 +26,46 @@ To enable SMART on a storage device run:
smartctl -s on
```
-### Configuration:
+### Configuration
```toml
# Read metrics from storage devices supporting S.M.A.R.T.
[[inputs.smart]]
## Optionally specify the path to the smartctl executable
# path = "/usr/bin/smartctl"
- #
+
## On most platforms smartctl requires root access.
## Setting 'use_sudo' to true will make use of sudo to run smartctl.
## Sudo must be configured to to allow the telegraf user to run smartctl
- ## with out password.
+ ## without a password.
# use_sudo = false
- #
+
## Skip checking disks in this power mode. Defaults to
## "standby" to not wake up disks that have stoped rotating.
- ## See --nockeck in the man pages for smartctl.
+ ## See --nocheck in the man pages for smartctl.
## smartctl version 5.41 and 5.42 have faulty detection of
## power mode and might require changing this value to
- ## "never" depending on your storage device.
+ ## "never" depending on your disks.
# nocheck = "standby"
- #
- ## Gather detailed metrics for each SMART Attribute.
- ## Defaults to "false"
- ##
+
+ ## Gather all returned S.M.A.R.T. attribute metrics and the detailed
+ ## information from each drive into the `smart_attribute` measurement.
# attributes = false
- #
+
## Optionally specify devices to exclude from reporting.
# excludes = [ "/dev/pass6" ]
- #
+
## Optionally specify devices and device type, if unset
## a scan (smartctl --scan) for S.M.A.R.T. devices will
## done and all found will be included except for the
## excluded in excludes.
# devices = [ "/dev/ada0 -d atacam" ]
+
+ ## Timeout for the smartctl command to complete.
+ # timeout = "30s"
```
-### Permissions:
+### Permissions
It's important to note that this plugin references smartctl, which may require additional permissions to execute successfully.
Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo.
@@ -82,15 +86,14 @@ telegraf ALL=(ALL) NOPASSWD: SMARTCTL
Defaults!SMARTCTL !logfile, !syslog, !pam_session
```
-### Metrics:
+### Metrics
- smart_device:
- tags:
- capacity
- device
- - device_model
- enabled
- - health
+ - model
- serial_no
- wwn
- fields:
@@ -103,10 +106,13 @@ Defaults!SMARTCTL !logfile, !syslog, !pam_session
- smart_attribute:
- tags:
+ - capacity
- device
+ - enabled
- fail
- flags
- id
+ - model
- name
- serial_no
- wwn
@@ -144,10 +150,24 @@ devices can be referenced by the WWN in the following location:
To run `smartctl` with `sudo` create a wrapper script and use `path` in
the configuration to execute that.
-### Output
+### Troubleshooting
+
+If this plugin is not working as expected for your SMART enabled device,
+please run these commands and include the output in a bug report:
+```
+smartctl --scan
+```
+
+Run the following command replacing your configuration setting for NOCHECK and
+the DEVICE from the previous command:
+```
+smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHECK --format=brief -d DEVICE
+```
+
+### Example Output
```
smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000
-smart_attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=199,name=UDMA_CRC_Error_Count,flags=-O-RC-,fail=-,host=mbpro.local,device=rdisk0 threshold=0i,raw_value=0i,exit_status=0i,value=200i,worst=200i 1502536854000000000
-smart_attribute,device=rdisk0,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=240,name=Unknown_SSD_Attribute,flags=-O---K,fail=-,host=mbpro.local exit_status=0i,value=100i,worst=100i,threshold=0i,raw_value=0i 1502536854000000000
+smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O-RC-,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=UDMA_CRC_Error_Count,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=200i,worst=200i 1502536854000000000
+smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O---K,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=Unknown_SSD_Attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=100i,worst=100i 1502536854000000000
```
diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go
index 46912d487d2fc..63d16aad3e35e 100644
--- a/plugins/inputs/smart/smart.go
+++ b/plugins/inputs/smart/smart.go
@@ -18,34 +18,135 @@ import (
)
var (
- execCommand = exec.Command // execCommand is used to mock commands in tests.
-
// Device Model: APPLE SSD SM256E
- modelInInfo = regexp.MustCompile("^Device Model:\\s+(.*)$")
+ // Product: HUH721212AL5204
+ // Model Number: TS128GMTE850
+ modelInfo = regexp.MustCompile("^(Device Model|Product|Model Number):\\s+(.*)$")
// Serial Number: S0X5NZBC422720
- serialInInfo = regexp.MustCompile("^Serial Number:\\s+(.*)$")
+ serialInfo = regexp.MustCompile("(?i)^Serial Number:\\s+(.*)$")
// LU WWN Device Id: 5 002538 655584d30
- wwnInInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$")
+ wwnInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$")
// User Capacity: 251,000,193,024 bytes [251 GB]
- usercapacityInInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$")
+ usercapacityInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$")
// SMART support is: Enabled
- smartEnabledInInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$")
+ smartEnabledInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$")
// SMART overall-health self-assessment test result: PASSED
+ // SMART Health Status: OK
// PASSED, FAILED, UNKNOWN
- smartOverallHealth = regexp.MustCompile("^SMART overall-health self-assessment test result:\\s+(\\w+).*$")
+ smartOverallHealth = regexp.MustCompile("^(SMART overall-health self-assessment test result|SMART Health Status):\\s+(\\w+).*$")
+
+ // sasNvmeAttr is a SAS or NVME SMART attribute
+ sasNvmeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`)
// ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
// 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0
// 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0
// 192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716
- attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$")
+ attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9-]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$")
deviceFieldIds = map[string]string{
"1": "read_error_rate",
"7": "seek_error_rate",
+ "190": "temp_c",
"194": "temp_c",
"199": "udma_crc_errors",
}
+
+ sasNvmeAttributes = map[string]struct {
+ ID string
+ Name string
+ Parse func(fields, deviceFields map[string]interface{}, str string) error
+ }{
+ "Accumulated start-stop cycles": {
+ ID: "4",
+ Name: "Start_Stop_Count",
+ },
+ "Accumulated load-unload cycles": {
+ ID: "193",
+ Name: "Load_Cycle_Count",
+ },
+ "Current Drive Temperature": {
+ ID: "194",
+ Name: "Temperature_Celsius",
+ Parse: parseTemperature,
+ },
+ "Temperature": {
+ ID: "194",
+ Name: "Temperature_Celsius",
+ Parse: parseTemperature,
+ },
+ "Power Cycles": {
+ ID: "12",
+ Name: "Power_Cycle_Count",
+ },
+ "Power On Hours": {
+ ID: "9",
+ Name: "Power_On_Hours",
+ },
+ "Media and Data Integrity Errors": {
+ Name: "Media_and_Data_Integrity_Errors",
+ },
+ "Error Information Log Entries": {
+ Name: "Error_Information_Log_Entries",
+ },
+ "Critical Warning": {
+ Name: "Critical_Warning",
+ Parse: func(fields, _ map[string]interface{}, str string) error {
+ var value int64
+ if _, err := fmt.Sscanf(str, "0x%x", &value); err != nil {
+ return err
+ }
+
+ fields["raw_value"] = value
+
+ return nil
+ },
+ },
+ "Available Spare": {
+ Name: "Available_Spare",
+ Parse: parsePercentageInt,
+ },
+ "Available Spare Threshold": {
+ Name: "Available_Spare_Threshold",
+ Parse: parsePercentageInt,
+ },
+ "Percentage Used": {
+ Name: "Percentage_Used",
+ Parse: parsePercentageInt,
+ },
+ "Data Units Read": {
+ Name: "Data_Units_Read",
+ Parse: parseDataUnits,
+ },
+ "Data Units Written": {
+ Name: "Data_Units_Written",
+ Parse: parseDataUnits,
+ },
+ "Host Read Commands": {
+ Name: "Host_Read_Commands",
+ Parse: parseCommaSeparatedInt,
+ },
+ "Host Write Commands": {
+ Name: "Host_Write_Commands",
+ Parse: parseCommaSeparatedInt,
+ },
+ "Controller Busy Time": {
+ Name: "Controller_Busy_Time",
+ Parse: parseCommaSeparatedInt,
+ },
+ "Unsafe Shutdowns": {
+ Name: "Unsafe_Shutdowns",
+ Parse: parseCommaSeparatedInt,
+ },
+ "Warning Comp. Temperature Time": {
+ Name: "Warning_Temperature_Time",
+ Parse: parseCommaSeparatedInt,
+ },
+ "Critical Comp. Temperature Time": {
+ Name: "Critical_Temperature_Time",
+ Parse: parseCommaSeparatedInt,
+ },
+ }
)
type Smart struct {
@@ -55,18 +156,19 @@ type Smart struct {
Excludes []string
Devices []string
UseSudo bool
+ Timeout internal.Duration
}
var sampleConfig = `
## Optionally specify the path to the smartctl executable
# path = "/usr/bin/smartctl"
- #
+
## On most platforms smartctl requires root access.
## Setting 'use_sudo' to true will make use of sudo to run smartctl.
## Sudo must be configured to to allow the telegraf user to run smartctl
- ## with out password.
+ ## without a password.
# use_sudo = false
- #
+
## Skip checking disks in this power mode. Defaults to
## "standby" to not wake up disks that have stoped rotating.
## See --nocheck in the man pages for smartctl.
@@ -74,22 +176,30 @@ var sampleConfig = `
## power mode and might require changing this value to
## "never" depending on your disks.
# nocheck = "standby"
- #
- ## Gather detailed metrics for each SMART Attribute.
- ## Defaults to "false"
- ##
+
+ ## Gather all returned S.M.A.R.T. attribute metrics and the detailed
+ ## information from each drive into the 'smart_attribute' measurement.
# attributes = false
- #
+
## Optionally specify devices to exclude from reporting.
# excludes = [ "/dev/pass6" ]
- #
+
## Optionally specify devices and device type, if unset
## a scan (smartctl --scan) for S.M.A.R.T. devices will
## done and all found will be included except for the
## excluded in excludes.
# devices = [ "/dev/ada0 -d atacam" ]
+
+ ## Timeout for the smartctl command to complete.
+ # timeout = "30s"
`
+func NewSmart() *Smart {
+ return &Smart{
+ Timeout: internal.Duration{Duration: time.Second * 30},
+ }
+}
+
func (m *Smart) SampleConfig() string {
return sampleConfig
}
@@ -117,21 +227,19 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error {
}
// Wrap with sudo
-func sudo(sudo bool, command string, args ...string) *exec.Cmd {
+var runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ cmd := exec.Command(command, args...)
if sudo {
- return execCommand("sudo", append([]string{"-n", command}, args...)...)
+ cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...)
}
-
- return execCommand(command, args...)
+ return internal.CombinedOutputTimeout(cmd, timeout.Duration)
}
// Scan for S.M.A.R.T. devices
func (m *Smart) scan() ([]string, error) {
-
- cmd := sudo(m.UseSudo, m.Path, "--scan")
- out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
+ out, err := runCmd(m.Timeout, m.UseSudo, m.Path, "--scan")
if err != nil {
- return []string{}, fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
+ return []string{}, fmt.Errorf("failed to run command '%s --scan': %s - %s", m.Path, err, string(out))
}
devices := []string{}
@@ -158,12 +266,11 @@ func excludedDev(excludes []string, deviceLine string) bool {
// Get info and attributes for each S.M.A.R.T. device
func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) {
-
var wg sync.WaitGroup
wg.Add(len(devices))
for _, device := range devices {
- go gatherDisk(acc, m.UseSudo, m.Attributes, m.Path, m.Nocheck, device, &wg)
+ go gatherDisk(acc, m.Timeout, m.UseSudo, m.Attributes, m.Path, m.Nocheck, device, &wg)
}
wg.Wait()
@@ -180,81 +287,78 @@ func exitStatus(err error) (int, error) {
return 0, err
}
-func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, nockeck, device string, wg *sync.WaitGroup) {
-
+func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) {
defer wg.Done()
// smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n
- args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nockeck, "--format=brief"}
+ args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nocheck, "--format=brief"}
args = append(args, strings.Split(device, " ")...)
- cmd := sudo(usesudo, smartctl, args...)
- out, e := internal.CombinedOutputTimeout(cmd, time.Second*5)
+ out, e := runCmd(timeout, usesudo, smartctl, args...)
outStr := string(out)
// Ignore all exit statuses except if it is a command line parse error
exitStatus, er := exitStatus(e)
if er != nil {
- acc.AddError(fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), e, outStr))
+ acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", smartctl, strings.Join(args, " "), e, outStr))
return
}
- device_tags := map[string]string{}
- device_node := strings.Split(device, " ")[0]
- device_tags["device"] = path.Base(device_node)
- device_fields := make(map[string]interface{})
- device_fields["exit_status"] = exitStatus
+ deviceTags := map[string]string{}
+ deviceNode := strings.Split(device, " ")[0]
+ deviceTags["device"] = path.Base(deviceNode)
+ deviceFields := make(map[string]interface{})
+ deviceFields["exit_status"] = exitStatus
scanner := bufio.NewScanner(strings.NewReader(outStr))
for scanner.Scan() {
line := scanner.Text()
- model := modelInInfo.FindStringSubmatch(line)
- if len(model) > 1 {
- device_tags["model"] = model[1]
+ model := modelInfo.FindStringSubmatch(line)
+ if len(model) > 2 {
+ deviceTags["model"] = model[2]
}
- serial := serialInInfo.FindStringSubmatch(line)
+ serial := serialInfo.FindStringSubmatch(line)
if len(serial) > 1 {
- device_tags["serial_no"] = serial[1]
+ deviceTags["serial_no"] = serial[1]
}
- wwn := wwnInInfo.FindStringSubmatch(line)
+ wwn := wwnInfo.FindStringSubmatch(line)
if len(wwn) > 1 {
- device_tags["wwn"] = strings.Replace(wwn[1], " ", "", -1)
+ deviceTags["wwn"] = strings.Replace(wwn[1], " ", "", -1)
}
- capacity := usercapacityInInfo.FindStringSubmatch(line)
+ capacity := usercapacityInfo.FindStringSubmatch(line)
if len(capacity) > 1 {
- device_tags["capacity"] = strings.Replace(capacity[1], ",", "", -1)
+ deviceTags["capacity"] = strings.Replace(capacity[1], ",", "", -1)
}
- enabled := smartEnabledInInfo.FindStringSubmatch(line)
+ enabled := smartEnabledInfo.FindStringSubmatch(line)
if len(enabled) > 1 {
- device_tags["enabled"] = enabled[1]
+ deviceTags["enabled"] = enabled[1]
}
health := smartOverallHealth.FindStringSubmatch(line)
- if len(health) > 1 {
- device_fields["health_ok"] = (health[1] == "PASSED")
+ if len(health) > 2 {
+ deviceFields["health_ok"] = (health[2] == "PASSED" || health[2] == "OK")
}
- attr := attribute.FindStringSubmatch(line)
+ tags := map[string]string{}
+ fields := make(map[string]interface{})
- if len(attr) > 1 {
-
- if attributes {
- tags := map[string]string{}
- fields := make(map[string]interface{})
-
- device_node := strings.Split(device, " ")[0]
- tags["device"] = path.Base(device_node)
-
- if serial, ok := device_tags["serial_no"]; ok {
- tags["serial_no"] = serial
- }
- if wwn, ok := device_tags["wwn"]; ok {
- tags["wwn"] = wwn
+ if collectAttributes {
+ keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled"}
+ for _, key := range keys {
+ if value, ok := deviceTags[key]; ok {
+ tags[key] = value
}
+ }
+ }
+
+ attr := attribute.FindStringSubmatch(line)
+ if len(attr) > 1 {
+ // attribute has been found, add it only if collectAttributes is true
+ if collectAttributes {
tags["id"] = attr[1]
tags["name"] = attr[2]
tags["flags"] = attr[3]
@@ -282,16 +386,39 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, no
// save the raw value to a field.
if field, ok := deviceFieldIds[attr[1]]; ok {
if val, err := parseRawValue(attr[8]); err == nil {
- device_fields[field] = val
+ deviceFields[field] = val
+ }
+ }
+ } else {
+ // what was found is not a vendor attribute
+ if matches := sasNvmeAttr.FindStringSubmatch(line); len(matches) > 2 {
+ if attr, ok := sasNvmeAttributes[matches[1]]; ok {
+ tags["name"] = attr.Name
+ if attr.ID != "" {
+ tags["id"] = attr.ID
+ }
+
+ parse := parseCommaSeparatedInt
+ if attr.Parse != nil {
+ parse = attr.Parse
+ }
+
+ if err := parse(fields, deviceFields, matches[2]); err != nil {
+ continue
+ }
+ // if the field is classified as an attribute, only add it
+ // if collectAttributes is true
+ if collectAttributes {
+ acc.AddFields("smart_attribute", fields, tags)
+ }
}
}
}
}
- acc.AddFields("smart_device", device_fields, device_tags)
+ acc.AddFields("smart_device", deviceFields, deviceTags)
}
func parseRawValue(rawVal string) (int64, error) {
-
// Integer
if i, err := strconv.ParseInt(rawVal, 10, 64); err == nil {
return i, nil
@@ -332,15 +459,46 @@ func parseInt(str string) int64 {
return 0
}
-func init() {
- m := Smart{}
- path, _ := exec.LookPath("smartctl")
- if len(path) > 0 {
- m.Path = path
+func parseCommaSeparatedInt(fields, _ map[string]interface{}, str string) error {
+ i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64)
+ if err != nil {
+ return err
}
- m.Nocheck = "standby"
+ fields["raw_value"] = i
+
+ return nil
+}
+
+func parsePercentageInt(fields, deviceFields map[string]interface{}, str string) error {
+ return parseCommaSeparatedInt(fields, deviceFields, strings.TrimSuffix(str, "%"))
+}
+
+func parseDataUnits(fields, deviceFields map[string]interface{}, str string) error {
+ units := strings.Fields(str)[0]
+ return parseCommaSeparatedInt(fields, deviceFields, units)
+}
+
+func parseTemperature(fields, deviceFields map[string]interface{}, str string) error {
+ var temp int64
+ if _, err := fmt.Sscanf(str, "%d C", &temp); err != nil {
+ return err
+ }
+
+ fields["raw_value"] = temp
+ deviceFields["temp_c"] = temp
+
+ return nil
+}
+
+func init() {
inputs.Add("smart", func() telegraf.Input {
- return &m
+ m := NewSmart()
+ path, _ := exec.LookPath("smartctl")
+ if len(path) > 0 {
+ m.Path = path
+ }
+ m.Nocheck = "standby"
+ return m
})
}
diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go
index da658f5f91a74..465ce9317e1e2 100644
--- a/plugins/inputs/smart/smart_test.go
+++ b/plugins/inputs/smart/smart_test.go
@@ -1,75 +1,37 @@
package smart
import (
- "fmt"
- "os"
- "os/exec"
+ "errors"
+ "sync"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-var (
- mockScanData = `/dev/ada0 -d atacam # /dev/ada0, ATA device
-`
- mockInfoAttributeData = `smartctl 6.5 2016-05-07 r4318 [Darwin 16.4.0 x86_64] (local build)
-Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
-
-CHECK POWER MODE not implemented, ignoring -n option
-=== START OF INFORMATION SECTION ===
-Model Family: Apple SD/SM/TS...E/F SSDs
-Device Model: APPLE SSD SM256E
-Serial Number: S0X5NZBC422720
-LU WWN Device Id: 5 002538 043584d30
-Firmware Version: CXM09A1Q
-User Capacity: 251,000,193,024 bytes [251 GB]
-Sector Sizes: 512 bytes logical, 4096 bytes physical
-Rotation Rate: Solid State Device
-Device is: In smartctl database [for details use: -P show]
-ATA Version is: ATA8-ACS T13/1699-D revision 4c
-SATA Version is: SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s)
-Local Time is: Thu Feb 9 16:48:45 2017 CET
-SMART support is: Available - device has SMART capability.
-SMART support is: Enabled
+func TestGatherAttributes(t *testing.T) {
+ s := NewSmart()
+ s.Path = "smartctl"
+ s.Attributes = true
-=== START OF READ SMART DATA SECTION ===
-SMART overall-health self-assessment test result: PASSED
+ assert.Equal(t, time.Second*30, s.Timeout.Duration)
-=== START OF READ SMART DATA SECTION ===
-SMART Attributes Data Structure revision number: 1
-Vendor Specific SMART Attributes with Thresholds:
-ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
- 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0
- 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0
- 9 Power_On_Hours -O--CK 099 099 000 - 2988
- 12 Power_Cycle_Count -O--CK 085 085 000 - 14879
-169 Unknown_Attribute PO--C- 253 253 010 - 2044932921600
-173 Wear_Leveling_Count -O--CK 185 185 100 - 957808640337
-190 Airflow_Temperature_Cel -O---K 055 040 045 Past 45 (Min/Max 43/57 #2689)
-192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716
-194 Temperature_Celsius -O---K 066 021 000 - 34 (Min/Max 14/79)
-197 Current_Pending_Sector -O---K 100 100 000 - 0
-199 UDMA_CRC_Error_Count -O-RC- 200 200 000 - 0
-240 Head_Flying_Hours ------ 100 253 000 - 6585h+55m+23.234s
- ||||||_ K auto-keep
- |||||__ C event count
- ||||___ R error rate
- |||____ S speed/performance
- ||_____ O updated online
- |______ P prefailure warning
-`
-)
+ var acc testutil.Accumulator
-func TestGatherAttributes(t *testing.T) {
- s := &Smart{
- Path: "smartctl",
- Attributes: true,
+ runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ if len(args) > 0 {
+ if args[0] == "--scan" {
+ return []byte(mockScanData), nil
+ } else if args[0] == "--info" {
+ return []byte(mockInfoAttributeData), nil
+ }
+ }
+ return nil, errors.New("command not found")
}
- // overwriting exec commands with mock commands
- execCommand = fakeExecCommand
- var acc testutil.Accumulator
err := s.Gather(&acc)
@@ -90,8 +52,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "1",
"name": "Raw_Read_Error_Rate",
"flags": "-O-RC-",
@@ -108,8 +73,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "5",
"name": "Reallocated_Sector_Ct",
"flags": "PO--CK",
@@ -126,8 +94,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "9",
"name": "Power_On_Hours",
"flags": "-O--CK",
@@ -144,8 +115,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "12",
"name": "Power_Cycle_Count",
"flags": "-O--CK",
@@ -162,8 +136,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "169",
"name": "Unknown_Attribute",
"flags": "PO--C-",
@@ -180,8 +157,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "173",
"name": "Wear_Leveling_Count",
"flags": "-O--CK",
@@ -198,8 +178,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "190",
"name": "Airflow_Temperature_Cel",
"flags": "-O---K",
@@ -216,8 +199,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "192",
"name": "Power-Off_Retract_Count",
"flags": "-O--C-",
@@ -234,8 +220,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "194",
"name": "Temperature_Celsius",
"flags": "-O---K",
@@ -252,8 +241,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "197",
"name": "Current_Pending_Sector",
"flags": "-O---K",
@@ -270,8 +262,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "199",
"name": "UDMA_CRC_Error_Count",
"flags": "-O-RC-",
@@ -288,8 +283,11 @@ func TestGatherAttributes(t *testing.T) {
},
map[string]string{
"device": "ada0",
+ "model": "APPLE SSD SM256E",
"serial_no": "S0X5NZBC422720",
"wwn": "5002538043584d30",
+ "enabled": "Enabled",
+ "capacity": "251000193024",
"id": "240",
"name": "Head_Flying_Hours",
"flags": "------",
@@ -302,8 +300,6 @@ func TestGatherAttributes(t *testing.T) {
acc.AssertContainsTaggedFields(t, "smart_attribute", test.fields, test.tags)
}
- // tags = map[string]string{}
-
var testsAda0Device = []struct {
fields map[string]interface{}
tags map[string]string
@@ -330,16 +326,16 @@ func TestGatherAttributes(t *testing.T) {
for _, test := range testsAda0Device {
acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags)
}
-
}
func TestGatherNoAttributes(t *testing.T) {
- s := &Smart{
- Path: "smartctl",
- Attributes: false,
- }
+ s := NewSmart()
+ s.Path = "smartctl"
+ s.Attributes = false
+
+ assert.Equal(t, time.Second*30, s.Timeout.Duration)
+
// overwriting exec commands with mock commands
- execCommand = fakeExecCommand
var acc testutil.Accumulator
err := s.Gather(&acc)
@@ -348,8 +344,6 @@ func TestGatherNoAttributes(t *testing.T) {
assert.Equal(t, 5, acc.NFields(), "Wrong number of fields gathered")
acc.AssertDoesNotContainMeasurement(t, "smart_attribute")
- // tags = map[string]string{}
-
var testsAda0Device = []struct {
fields map[string]interface{}
tags map[string]string
@@ -376,51 +370,846 @@ func TestGatherNoAttributes(t *testing.T) {
for _, test := range testsAda0Device {
acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags)
}
-
}
func TestExcludedDev(t *testing.T) {
assert.Equal(t, true, excludedDev([]string{"/dev/pass6"}, "/dev/pass6 -d atacam"), "Should be excluded.")
assert.Equal(t, false, excludedDev([]string{}, "/dev/pass6 -d atacam"), "Shouldn't be excluded.")
assert.Equal(t, false, excludedDev([]string{"/dev/pass6"}, "/dev/pass1 -d atacam"), "Shouldn't be excluded.")
+}
+
+func TestGatherSATAInfo(t *testing.T) {
+ runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ return []byte(hgstSATAInfoData), nil
+ }
+ var (
+ acc = &testutil.Accumulator{}
+ wg = &sync.WaitGroup{}
+ )
+
+ wg.Add(1)
+ gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg)
+ assert.Equal(t, 101, acc.NFields(), "Wrong number of fields gathered")
+ assert.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered")
}
-// fackeExecCommand is a helper function that mock
-// the exec.Command call (and call the test binary)
-func fakeExecCommand(command string, args ...string) *exec.Cmd {
- cs := []string{"-test.run=TestHelperProcess", "--", command}
- cs = append(cs, args...)
- cmd := exec.Command(os.Args[0], cs...)
- cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
- return cmd
+func TestGatherSATAInfo65(t *testing.T) {
+ runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ return []byte(hgstSATAInfoData65), nil
+ }
+
+ var (
+ acc = &testutil.Accumulator{}
+ wg = &sync.WaitGroup{}
+ )
+
+ wg.Add(1)
+ gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg)
+ assert.Equal(t, 91, acc.NFields(), "Wrong number of fields gathered")
+ assert.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered")
}
-// TestHelperProcess isn't a real test. It's used to mock exec.Command
-// For example, if you run:
-// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- --scan
-// it returns below mockScanData.
-func TestHelperProcess(t *testing.T) {
- if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
- return
+func TestGatherHgstSAS(t *testing.T) {
+ runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ return []byte(hgstSASInfoData), nil
}
- args := os.Args
+ var (
+ acc = &testutil.Accumulator{}
+ wg = &sync.WaitGroup{}
+ )
- // Previous arguments are tests stuff, that looks like :
- // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
- cmd, arg1, args := args[3], args[4], args[5:]
+ wg.Add(1)
+ gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg)
+ assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered")
+ assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered")
+}
- if cmd == "smartctl" {
- if arg1 == "--scan" {
- fmt.Fprint(os.Stdout, mockScanData)
- }
- if arg1 == "--info" {
- fmt.Fprint(os.Stdout, mockInfoAttributeData)
- }
- } else {
- fmt.Fprint(os.Stdout, "command not found")
- os.Exit(1)
+func TestGatherHtSAS(t *testing.T) {
+ runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ return []byte(htSASInfoData), nil
+ }
+
+ var (
+ acc = &testutil.Accumulator{}
+ wg = &sync.WaitGroup{}
+ )
+
+ wg.Add(1)
+ gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "smart_attribute",
+ map[string]string{
+ "device": ".",
+ "serial_no": "PDWAR9GE",
+ "enabled": "Enabled",
+ "id": "194",
+ "model": "HUC103030CSS600",
+ "name": "Temperature_Celsius",
+ },
+ map[string]interface{}{
+ "raw_value": 36,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "smart_attribute",
+ map[string]string{
+ "device": ".",
+ "serial_no": "PDWAR9GE",
+ "enabled": "Enabled",
+ "id": "4",
+ "model": "HUC103030CSS600",
+ "name": "Start_Stop_Count",
+ },
+ map[string]interface{}{
+ "raw_value": 47,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "smart_device",
+ map[string]string{
+ "device": ".",
+ "serial_no": "PDWAR9GE",
+ "enabled": "Enabled",
+ "model": "HUC103030CSS600",
+ },
+ map[string]interface{}{
+ "exit_status": 0,
+ "health_ok": true,
+ "temp_c": 36,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime())
+}
+
+func TestGatherSSD(t *testing.T) {
+ runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ return []byte(ssdInfoData), nil
+ }
+
+ var (
+ acc = &testutil.Accumulator{}
+ wg = &sync.WaitGroup{}
+ )
+
+ wg.Add(1)
+ gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg)
+ assert.Equal(t, 105, acc.NFields(), "Wrong number of fields gathered")
+ assert.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered")
+}
+
+func TestGatherSSDRaid(t *testing.T) {
+ runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ return []byte(ssdRaidInfoData), nil
+ }
+
+ var (
+ acc = &testutil.Accumulator{}
+ wg = &sync.WaitGroup{}
+ )
+
+ wg.Add(1)
+ gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg)
+ assert.Equal(t, 74, acc.NFields(), "Wrong number of fields gathered")
+ assert.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered")
+}
+
+func TestGatherNvme(t *testing.T) {
+ runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) {
+ return []byte(nvmeInfoData), nil
+ }
+
+ var (
+ acc = &testutil.Accumulator{}
+ wg = &sync.WaitGroup{}
+ )
+
+ wg.Add(1)
+ gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric("smart_device",
+ map[string]string{
+ "device": ".",
+ "model": "TS128GMTE850",
+ "serial_no": "D704940282?",
+ },
+ map[string]interface{}{
+ "exit_status": 0,
+ "health_ok": true,
+ "temp_c": 38,
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "id": "9",
+ "name": "Power_On_Hours",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": 6038,
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "id": "12",
+ "name": "Power_Cycle_Count",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": 472,
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Media_and_Data_Integrity_Errors",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": 0,
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Error_Information_Log_Entries",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": 119699,
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Available_Spare",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": 100,
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Available_Spare_Threshold",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": 10,
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "id": "194",
+ "name": "Temperature_Celsius",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": 38,
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Critical_Warning",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(9),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Percentage_Used",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(16),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Data_Units_Read",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(11836935),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Data_Units_Written",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(62288091),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Host_Read_Commands",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(135924188),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Host_Write_Commands",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(7715573429),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Controller_Busy_Time",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(4042),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Unsafe_Shutdowns",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(355),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Warning_Temperature_Time",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(11),
+ },
+ time.Now(),
+ ),
+ testutil.MustMetric("smart_attribute",
+ map[string]string{
+ "device": ".",
+ "name": "Critical_Temperature_Time",
+ "serial_no": "D704940282?",
+ "model": "TS128GMTE850",
+ },
+ map[string]interface{}{
+ "raw_value": int64(7),
+ },
+ time.Now(),
+ ),
}
- os.Exit(0)
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(),
+ testutil.SortMetrics(), testutil.IgnoreTime())
}
+
+// smartctl output
+var (
+ // smartctl --scan
+ mockScanData = `/dev/ada0 -d atacam # /dev/ada0, ATA device
+`
+ // smartctl --info --health --attributes --tolerance=verypermissive -n standby --format=brief [DEVICE]
+ mockInfoAttributeData = `smartctl 6.5 2016-05-07 r4318 [Darwin 16.4.0 x86_64] (local build)
+Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
+
+CHECK POWER MODE not implemented, ignoring -n option
+=== START OF INFORMATION SECTION ===
+Model Family: Apple SD/SM/TS...E/F SSDs
+Device Model: APPLE SSD SM256E
+Serial Number: S0X5NZBC422720
+LU WWN Device Id: 5 002538 043584d30
+Firmware Version: CXM09A1Q
+User Capacity: 251,000,193,024 bytes [251 GB]
+Sector Sizes: 512 bytes logical, 4096 bytes physical
+Rotation Rate: Solid State Device
+Device is: In smartctl database [for details use: -P show]
+ATA Version is: ATA8-ACS T13/1699-D revision 4c
+SATA Version is: SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s)
+Local Time is: Thu Feb 9 16:48:45 2017 CET
+SMART support is: Available - device has SMART capability.
+SMART support is: Enabled
+
+=== START OF READ SMART DATA SECTION ===
+SMART overall-health self-assessment test result: PASSED
+
+=== START OF READ SMART DATA SECTION ===
+SMART Attributes Data Structure revision number: 1
+Vendor Specific SMART Attributes with Thresholds:
+ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
+ 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0
+ 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0
+ 9 Power_On_Hours -O--CK 099 099 000 - 2988
+ 12 Power_Cycle_Count -O--CK 085 085 000 - 14879
+169 Unknown_Attribute PO--C- 253 253 010 - 2044932921600
+173 Wear_Leveling_Count -O--CK 185 185 100 - 957808640337
+190 Airflow_Temperature_Cel -O---K 055 040 045 Past 45 (Min/Max 43/57 #2689)
+192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716
+194 Temperature_Celsius -O---K 066 021 000 - 34 (Min/Max 14/79)
+197 Current_Pending_Sector -O---K 100 100 000 - 0
+199 UDMA_CRC_Error_Count -O-RC- 200 200 000 - 0
+240 Head_Flying_Hours ------ 100 253 000 - 6585h+55m+23.234s
+ ||||||_ K auto-keep
+ |||||__ C event count
+ ||||___ R error rate
+ |||____ S speed/performance
+ ||_____ O updated online
+ |______ P prefailure warning
+`
+
+ htSASInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.18-12-pve] (local build)
+Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smar$montools.org
+
+=== START OF INFORMATION SECTION ===
+Vendor: HITACHI
+Product: HUC103030CSS600
+Revision: J350
+Compliance: SPC-4
+User Capacity: 300,$00,000,000 bytes [300 GB]
+Logical block size: 512 bytes
+Rotation Rate: 10020 rpm
+Form Factor: 2.5 inches
+Logical Unit id: 0x5000cca00a4bdbc8
+Serial number: PDWAR9GE
+Devicetype: disk
+Transport protocol: SAS (SPL-3)
+Local Time is: Wed Apr 17 15:01:28 2019 PDT
+SMART support is: Available - device has SMART capability.
+SMART support is: Enabled
+Temperature Warning: Disabled or Not Supported
+
+=== START OF READ SMART DATA SECTION ===
+SMART Health Status: OK
+
+Current Drive Temperature: 36 C
+Drive Trip Temperature: 85 C
+
+Manufactured in $eek 52 of year 2009
+Specified cycle count over device lifetime: 50000
+Accumulated start-stop cycles: 47
+Elements in grown defect list: 0
+
+Vendor (Seagate) cache information
+ Blocks sent to initiator= 7270983270400000
+`
+
+ hgstSASInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-46-generic] (local build)
+Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
+
+=== START OF INFORMATION SECTION ===
+Vendor: HGST
+Product: HUH721212AL5204
+Revision: C3Q1
+Compliance: SPC-4
+User Capacity: 12,000,138,625,024 bytes [12.0 TB]
+Logical block size: 512 bytes
+Physical block size: 4096 bytes
+LU is fully provisioned
+Rotation Rate: 7200 rpm
+Form Factor: 3.5 inches
+Logical Unit id: 0x5000cca27076bfe8
+Serial number: 8HJ39K3H
+Device type: disk
+Transport protocol: SAS (SPL-3)
+Local Time is: Thu Apr 18 13:25:03 2019 MSK
+SMART support is: Available - device has SMART capability.
+SMART support is: Enabled
+Temperature Warning: Enabled
+
+=== START OF READ SMART DATA SECTION ===
+SMART Health Status: OK
+
+Current Drive Temperature: 34 C
+Drive Trip Temperature: 85 C
+
+Manufactured in week 35 of year 2018
+Specified cycle count over device lifetime: 50000
+Accumulated start-stop cycles: 7
+Specified load-unload count over device lifetime: 600000
+Accumulated load-unload cycles: 39
+Elements in grown defect list: 0
+
+Vendor (Seagate) cache information
+ Blocks sent to initiator = 544135446528
+`
+
+ hgstSATAInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-46-generic] (local build)
+Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
+
+=== START OF INFORMATION SECTION ===
+Model Family: Hitachi/HGST Travelstar Z7K500
+Device Model: HGST HTE725050A7E630
+Serial Number: RCE50G20G81S9S
+LU WWN Device Id: 5 000cca 90bc3a98b
+Firmware Version: GS2OA3E0
+User Capacity: 500,107,862,016 bytes [500 GB]
+Sector Sizes: 512 bytes logical, 4096 bytes physical
+Rotation Rate: 7200 rpm
+Form Factor: 2.5 inches
+Device is: In smartctl database [for details use: -P show]
+ATA Version is: ATA8-ACS T13/1699-D revision 6
+SATA Version is: SATA 2.6, 6.0 Gb/s (current: 6.0 Gb/s)
+Local Time is: Thu Apr 18 13:27:51 2019 MSK
+SMART support is: Available - device has SMART capability.
+SMART support is: Enabled
+Power mode is: ACTIVE or IDLE
+
+=== START OF READ SMART DATA SECTION ===
+SMART overall-health self-assessment test result: PASSED
+
+SMART Attributes Data Structure revision number: 16
+Vendor Specific SMART Attributes with Thresholds:
+ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
+ 1 Raw_Read_Error_Rate PO-R-- 100 100 062 - 0
+ 2 Throughput_Performance P-S--- 100 100 040 - 0
+ 3 Spin_Up_Time POS--- 100 100 033 - 1
+ 4 Start_Stop_Count -O--C- 100 100 000 - 4
+ 5 Reallocated_Sector_Ct PO--CK 100 100 005 - 0
+ 7 Seek_Error_Rate PO-R-- 100 100 067 - 0
+ 8 Seek_Time_Performance P-S--- 100 100 040 - 0
+ 9 Power_On_Hours -O--C- 099 099 000 - 743
+ 10 Spin_Retry_Count PO--C- 100 100 060 - 0
+ 12 Power_Cycle_Count -O--CK 100 100 000 - 4
+191 G-Sense_Error_Rate -O-R-- 100 100 000 - 0
+192 Power-Off_Retract_Count -O--CK 100 100 000 - 2
+193 Load_Cycle_Count -O--C- 100 100 000 - 13
+194 Temperature_Celsius -O---- 250 250 000 - 24 (Min/Max 15/29)
+196 Reallocated_Event_Count -O--CK 100 100 000 - 0
+197 Current_Pending_Sector -O---K 100 100 000 - 0
+198 Offline_Uncorrectable ---R-- 100 100 000 - 0
+199 UDMA_CRC_Error_Count -O-R-- 200 200 000 - 0
+223 Load_Retry_Count -O-R-- 100 100 000 - 0
+ ||||||_ K auto-keep
+ |||||__ C event count
+ ||||___ R error rate
+ |||____ S speed/performance
+ ||_____ O updated online
+ |______ P prefailure warning
+`
+
+ hgstSATAInfoData65 = `smartctl 6.5 2016-01-24 r4214 [x86_64-linux-4.4.0-145-generic] (local build)
+Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
+
+=== START OF INFORMATION SECTION ===
+Model Family: HGST Deskstar NAS
+Device Model: HGST HDN724040ALE640
+Serial Number: PK1334PEK49SBS
+LU WWN Device Id: 5 000cca 250ec3c9c
+Firmware Version: MJAOA5E0
+User Capacity: 4,000,787,030,016 bytes [4.00 TB]
+Sector Sizes: 512 bytes logical, 4096 bytes physical
+Rotation Rate: 7200 rpm
+Form Factor: 3.5 inches
+Device is: In smartctl database [for details use: -P show]
+ATA Version is: ATA8-ACS T13/1699-D revision 4
+SATA Version is: SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s)
+Local Time is: Wed Apr 17 15:14:27 2019 PDT
+SMART support is: Available - device has SMART capability.
+SMART support is: Enabled
+Power mode is: ACTIVE or IDLE
+
+=== START OF READ SMART DATA SECTION ===
+SMART overall-health self-assessment test result: PASSED
+
+SMART Attributes Data Structure revision number: 16
+Vendor Specific SMART Attributes with Thresholds:
+ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
+ 1 Raw_Read_Error_Rate PO-R-- 100 100 016 - 0
+ 2 Throughput_Performance P-S--- 135 135 054 - 84
+ 3 Spin_Up_Time POS--- 125 125 024 - 621 (Average 619)
+ 4 Start_Stop_Count -O--C- 100 100 000 - 33
+ 5 Reallocated_Sector_Ct PO--CK 100 100 005 - 0
+ 7 Seek_Error_Rate PO-R-- 100 100 067 - 0
+ 8 Seek_Time_Performance P-S--- 119 119 020 - 35
+ 9 Power_On_Hours -O--C- 098 098 000 - 19371
+ 10 Spin_Retry_Count PO--C- 100 100 060 - 0
+ 12 Power_Cycle_Count -O--CK 100 100 000 - 33
+192 Power-Off_Retract_Count -O--CK 100 100 000 - 764
+193 Load_Cycle_Count -O--C- 100 100 000 - 764
+194 Temperature_Celsius -O---- 176 176 000 - 34 (Min/Max 21/53)
+196 Reallocated_Event_Count -O--CK 100 100 000 - 0
+197 Current_Pending_Sector -O---K 100 100 000 - 0
+198 Offline_Uncorrectable ---R-- 100 100 000 - 0
+199 UDMA_CRC_Error_Count -O-R-- 200 200 000 - 0
+ ||||||_ K auto-keep
+ |||||__ C event count
+ ||||___ R error rate
+ |||____ S speed/performance
+ ||_____ O updated online
+ |______ P prefailure warning
+`
+
+ ssdInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-33-generic] (local build)
+Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
+
+=== START OF INFORMATION SECTION ===
+Device Model: SanDisk Ultra II 240GB
+Serial Number: XXXXXXXX
+LU WWN Device Id: XXXXXXXX
+Firmware Version: XXXXXXX
+User Capacity: 240.057.409.536 bytes [240 GB]
+Sector Size: 512 bytes logical/physical
+Rotation Rate: Solid State Device
+Form Factor: 2.5 inches
+Device is: Not in smartctl database [for details use: -P showall]
+ATA Version is: ACS-2 T13/2015-D revision 3
+SATA Version is: SATA 3.2, 6.0 Gb/s (current: 6.0 Gb/s)
+Local Time is: Mon Sep 17 13:22:19 2018 CEST
+SMART support is: Available - device has SMART capability.
+SMART support is: Enabled
+Power mode is: ACTIVE or IDLE
+
+=== START OF READ SMART DATA SECTION ===
+SMART overall-health self-assessment test result: PASSED
+
+SMART Attributes Data Structure revision number: 4
+Vendor Specific SMART Attributes with Thresholds:
+ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
+ 5 Reallocated_Sector_Ct -O--CK 100 100 --- - 0
+ 9 Power_On_Hours -O--CK 100 100 --- - 6383
+ 12 Power_Cycle_Count -O--CK 100 100 --- - 19
+165 Unknown_Attribute -O--CK 100 100 --- - 59310806
+166 Unknown_Attribute -O--CK 100 100 --- - 1
+167 Unknown_Attribute -O--CK 100 100 --- - 57
+168 Unknown_Attribute -O--CK 100 100 --- - 43
+169 Unknown_Attribute -O--CK 100 100 --- - 221
+170 Unknown_Attribute -O--CK 100 100 --- - 0
+171 Unknown_Attribute -O--CK 100 100 --- - 0
+172 Unknown_Attribute -O--CK 100 100 --- - 0
+173 Unknown_Attribute -O--CK 100 100 --- - 13
+174 Unknown_Attribute -O--CK 100 100 --- - 4
+184 End-to-End_Error -O--CK 100 100 --- - 0
+187 Reported_Uncorrect -O--CK 100 100 --- - 0
+188 Command_Timeout -O--CK 100 100 --- - 0
+194 Temperature_Celsius -O---K 066 065 --- - 34 (Min/Max 19/65)
+199 UDMA_CRC_Error_Count -O--CK 100 100 --- - 0
+230 Unknown_SSD_Attribute -O--CK 100 100 --- - 2229110374919
+232 Available_Reservd_Space PO--CK 100 100 004 - 100
+233 Media_Wearout_Indicator -O--CK 100 100 --- - 3129
+234 Unknown_Attribute -O--CK 100 100 --- - 7444
+241 Total_LBAs_Written ----CK 253 253 --- - 4812
+242 Total_LBAs_Read ----CK 253 253 --- - 671
+244 Unknown_Attribute -O--CK 000 100 --- - 0
+ ||||||_ K auto-keep
+ |||||__ C event count
+ ||||___ R error rate
+ |||____ S speed/performance
+ ||_____ O updated online
+ |______ P prefailure warning
+`
+ ssdRaidInfoData = `smartctl 6.6 2017-11-05 r4594 [FreeBSD 11.1-RELEASE-p13 amd64] (local build)
+Copyright (C) 2002-17, Bruce Allen, Christian Franke, www.smartmontools.org
+
+CHECK POWER MODE: incomplete response, ATA output registers missing
+CHECK POWER MODE not implemented, ignoring -n option
+=== START OF INFORMATION SECTION ===
+Model Family: Samsung based SSDs
+Device Model: Samsung SSD 850 PRO 256GB
+Serial Number: S251NX0H869353L
+LU WWN Device Id: 5 002538 84027f72f
+Firmware Version: EXM02B6Q
+User Capacity: 256 060 514 304 bytes [256 GB]
+Sector Size: 512 bytes logical/physical
+Rotation Rate: Solid State Device
+Device is: In smartctl database [for details use: -P show]
+ATA Version is: ACS-2, ATA8-ACS T13/1699-D revision 4c
+SATA Version is: SATA 3.1, 6.0 Gb/s (current: 6.0 Gb/s)
+Local Time is: Fri Sep 21 17:49:16 2018 CEST
+SMART support is: Available - device has SMART capability.
+SMART support is: Enabled
+
+=== START OF READ SMART DATA SECTION ===
+SMART Status not supported: Incomplete response, ATA output registers missing
+SMART overall-health self-assessment test result: PASSED
+Warning: This result is based on an Attribute check.
+
+General SMART Values:
+Offline data collection status: (0x00) Offline data collection activity
+ was never started.
+ Auto Offline Data Collection: Disabled.
+Self-test execution status: ( 0) The previous self-test routine completed
+ without error or no self-test has ever
+ been run.
+Total time to complete Offline
+data collection: ( 0) seconds.
+Offline data collection
+capabilities: (0x53) SMART execute Offline immediate.
+ Auto Offline data collection on/off support.
+ Suspend Offline collection upon new
+ command.
+ No Offline surface scan supported.
+ Self-test supported.
+ No Conveyance Self-test supported.
+ Selective Self-test supported.
+SMART capabilities: (0x0003) Saves SMART data before entering
+ power-saving mode.
+ Supports SMART auto save timer.
+Error logging capability: (0x01) Error logging supported.
+ General Purpose Logging supported.
+Short self-test routine
+recommended polling time: ( 2) minutes.
+Extended self-test routine
+recommended polling time: ( 136) minutes.
+SCT capabilities: (0x003d) SCT Status supported.
+ SCT Error Recovery Control supported.
+ SCT Feature Control supported.
+ SCT Data Table supported.
+
+SMART Attributes Data Structure revision number: 1
+Vendor Specific SMART Attributes with Thresholds:
+ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE
+ 5 Reallocated_Sector_Ct PO--CK 099 099 010 - 1
+ 9 Power_On_Hours -O--CK 094 094 000 - 26732
+ 12 Power_Cycle_Count -O--CK 099 099 000 - 51
+177 Wear_Leveling_Count PO--C- 001 001 000 - 7282
+179 Used_Rsvd_Blk_Cnt_Tot PO--C- 099 099 010 - 1
+181 Program_Fail_Cnt_Total -O--CK 100 100 010 - 0
+182 Erase_Fail_Count_Total -O--CK 099 099 010 - 1
+183 Runtime_Bad_Block PO--C- 099 099 010 - 1
+187 Uncorrectable_Error_Cnt -O--CK 100 100 000 - 0
+190 Airflow_Temperature_Cel -O--CK 081 069 000 - 19
+195 ECC_Error_Rate -O-RC- 200 200 000 - 0
+199 CRC_Error_Count -OSRCK 100 100 000 - 0
+235 POR_Recovery_Count -O--C- 099 099 000 - 50
+241 Total_LBAs_Written -O--CK 099 099 000 - 61956393677
+ ||||||_ K auto-keep
+ |||||__ C event count
+ ||||___ R error rate
+ |||____ S speed/performance
+ ||_____ O updated online
+ |______ P prefailure warning
+
+SMART Error Log Version: 1
+No Errors Logged
+
+SMART Self-test log structure revision number 1
+Num Test_Description Status Remaining LifeTime(hours) LBA_of_first_error
+# 1 Short offline Completed without error 00% 26717 -
+# 2 Short offline Completed without error 00% 26693 -
+# 3 Short offline Completed without error 00% 26669 -
+# 4 Short offline Completed without error 00% 26645 -
+# 5 Short offline Completed without error 00% 26621 -
+# 6 Short offline Completed without error 00% 26596 -
+# 7 Extended offline Completed without error 00% 26574 -
+# 8 Short offline Completed without error 00% 26572 -
+# 9 Short offline Completed without error 00% 26548 -
+#10 Short offline Completed without error 00% 26524 -
+#11 Short offline Completed without error 00% 26500 -
+#12 Short offline Completed without error 00% 26476 -
+#13 Short offline Completed without error 00% 26452 -
+#14 Short offline Completed without error 00% 26428 -
+#15 Extended offline Completed without error 00% 26406 -
+#16 Short offline Completed without error 00% 26404 -
+#17 Short offline Completed without error 00% 26380 -
+#18 Short offline Completed without error 00% 26356 -
+#19 Short offline Completed without error 00% 26332 -
+#20 Short offline Completed without error 00% 26308 -
+
+SMART Selective self-test log data structure revision number 1
+ SPAN MIN_LBA MAX_LBA CURRENT_TEST_STATUS
+ 1 0 0 Not_testing
+ 2 0 0 Not_testing
+ 3 0 0 Not_testing
+ 4 0 0 Not_testing
+ 5 0 0 Not_testing
+Selective self-test flags (0x0):
+ After scanning selected spans, do NOT read-scan remainder of disk.
+If Selective self-test is pending on power-up, resume after 0 minute delay.
+`
+
+ nvmeInfoData = `smartctl 6.5 2016-05-07 r4318 [x86_64-linux-4.1.27-gvt-yocto-standard] (local build)
+Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org
+
+=== START OF INFORMATION SECTION ===
+Model Number: TS128GMTE850
+Serial Number: D704940282?
+Firmware Version: C2.3.13
+PCI Vendor/Subsystem ID: 0x126f
+IEEE OUI Identifier: 0x000000
+Controller ID: 1
+Number of Namespaces: 1
+Namespace 1 Size/Capacity: 128,035,676,160 [128 GB]
+Namespace 1 Formatted LBA Size: 512
+Local Time is: Fri Jun 15 11:41:35 2018 UTC
+
+=== START OF SMART DATA SECTION ===
+SMART overall-health self-assessment test result: PASSED
+
+SMART/Health Information (NVMe Log 0x02, NSID 0xffffffff)
+Critical Warning: 0x09
+Temperature: 38 Celsius
+Available Spare: 100%
+Available Spare Threshold: 10%
+Percentage Used: 16%
+Data Units Read: 11,836,935 [6.06 TB]
+Data Units Written: 62,288,091 [31.8 TB]
+Host Read Commands: 135,924,188
+Host Write Commands: 7,715,573,429
+Controller Busy Time: 4,042
+Power Cycles: 472
+Power On Hours: 6,038
+Unsafe Shutdowns: 355
+Media and Data Integrity Errors: 0
+Error Information Log Entries: 119,699
+Warning Comp. Temperature Time: 11
+Critical Comp. Temperature Time: 7
+`
+)
diff --git a/plugins/inputs/snmp/CONFIG-EXAMPLES.md b/plugins/inputs/snmp/CONFIG-EXAMPLES.md
deleted file mode 100644
index a0a52eeb327ef..0000000000000
--- a/plugins/inputs/snmp/CONFIG-EXAMPLES.md
+++ /dev/null
@@ -1,65 +0,0 @@
-Here are a few configuration examples for different use cases.
-
-### Switch/router interface metrics
-
-This setup will collect data on all interfaces from three different tables, `IF-MIB::ifTable`, `IF-MIB::ifXTable` and `EtherLike-MIB::dot3StatsTable`. It will also add the name from `IF-MIB::ifDescr` and use that as a tag. Depending on your needs and preferences you can easily use `IF-MIB::ifName` or `IF-MIB::ifAlias` instead or in addition. The values of these are typically:
-
- IF-MIB::ifName = Gi0/0/0
- IF-MIB::ifDescr = GigabitEthernet0/0/0
- IF-MIB::ifAlias = ### LAN ###
-
-This configuration also collects the hostname from the device (`RFC1213-MIB::sysName.0`) and adds as a tag. So each metric will both have the configured host/IP as `agent_host` as well as the device self-reported hostname as `hostname` and the name of the host that has collected these metrics as `host`.
-
-Here is the configuration that you add to your `telegraf.conf`:
-
-```
-[[inputs.snmp]]
- agents = [ "host.example.com" ]
- version = 2
- community = "public"
-
- [[inputs.snmp.field]]
- name = "hostname"
- oid = "RFC1213-MIB::sysName.0"
- is_tag = true
-
- [[inputs.snmp.field]]
- name = "uptime"
- oid = "DISMAN-EXPRESSION-MIB::sysUpTimeInstance"
-
- # IF-MIB::ifTable contains counters on input and output traffic as well as errors and discards.
- [[inputs.snmp.table]]
- name = "interface"
- inherit_tags = [ "hostname" ]
- oid = "IF-MIB::ifTable"
-
- # Interface tag - used to identify interface in metrics database
- [[inputs.snmp.table.field]]
- name = "ifDescr"
- oid = "IF-MIB::ifDescr"
- is_tag = true
-
- # IF-MIB::ifXTable contains newer High Capacity (HC) counters that do not overflow as fast for a few of the ifTable counters
- [[inputs.snmp.table]]
- name = "interface"
- inherit_tags = [ "hostname" ]
- oid = "IF-MIB::ifXTable"
-
- # Interface tag - used to identify interface in metrics database
- [[inputs.snmp.table.field]]
- name = "ifDescr"
- oid = "IF-MIB::ifDescr"
- is_tag = true
-
- # EtherLike-MIB::dot3StatsTable contains detailed ethernet-level information about what kind of errors have been logged on an interface (such as FCS error, frame too long, etc)
- [[inputs.snmp.table]]
- name = "interface"
- inherit_tags = [ "hostname" ]
- oid = "EtherLike-MIB::dot3StatsTable"
-
- # Interface tag - used to identify interface in metrics database
- [[inputs.snmp.table.field]]
- name = "ifDescr"
- oid = "IF-MIB::ifDescr"
- is_tag = true
-```
diff --git a/plugins/inputs/snmp/DEBUGGING.md b/plugins/inputs/snmp/DEBUGGING.md
deleted file mode 100644
index f357c58b51c52..0000000000000
--- a/plugins/inputs/snmp/DEBUGGING.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# Debugging & Testing SNMP Issues
-
-### Install net-snmp on your system:
-
-Mac:
-
-```
-brew install net-snmp
-```
-
-### Run an SNMP simulator docker image to get a full MIB on port 161:
-
-```
-docker run -d -p 161:161/udp xeemetric/snmp-simulator
-```
-
-### snmpget:
-
-snmpget corresponds to the inputs.snmp.field configuration.
-
-```bash
-$ # get an snmp field with fully-qualified MIB name.
-$ snmpget -v2c -c public localhost:161 system.sysUpTime.0
-DISMAN-EVENT-MIB::sysUpTimeInstance = Timeticks: (1643) 0:00:16.43
-
-$ # get an snmp field, outputting the numeric OID.
-$ snmpget -On -v2c -c public localhost:161 system.sysUpTime.0
-.1.3.6.1.2.1.1.3.0 = Timeticks: (1638) 0:00:16.38
-```
-
-### snmptranslate:
-
-snmptranslate can be used to translate an OID to a MIB name:
-
-```bash
-$ snmptranslate .1.3.6.1.2.1.1.3.0
-DISMAN-EVENT-MIB::sysUpTimeInstance
-```
-
-And to convert a partial MIB name to a fully qualified one:
-
-```bash
-$ snmptranslate -IR sysUpTime.0
-DISMAN-EVENT-MIB::sysUpTimeInstance
-```
-
-And to convert a MIB name to an OID:
-
-```bash
-$ snmptranslate -On -IR system.sysUpTime.0
-.1.3.6.1.2.1.1.3.0
-```
-
diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md
index dab28e9b0a976..0d2eb52ab44bb 100644
--- a/plugins/inputs/snmp/README.md
+++ b/plugins/inputs/snmp/README.md
@@ -1,180 +1,223 @@
-# SNMP Plugin
+# SNMP Input Plugin
-The SNMP input plugin gathers metrics from SNMP agents.
+The `snmp` input plugin uses polling to gather metrics from SNMP agents.
+Support for gathering individual OIDs as well as complete SNMP tables is
+included.
-## Configuration:
+### Prerequisites
-See additional SNMP plugin configuration examples [here](./CONFIG-EXAMPLES.md).
+This plugin uses the `snmptable` and `snmptranslate` programs from the
+[net-snmp][] project. These tools will need to be installed into the `PATH` in
+order to be located. Other utilities from the net-snmp project may be useful
+for troubleshooting, but are not directly used by the plugin.
-### Example:
+These programs will load available MIBs on the system. Typically the default
+directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a
+different location you may need to make the paths known to net-snmp. The
+location of these files can be configured in the `snmp.conf` or via the
+`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more
+information.
-SNMP data:
-```
-.1.0.0.0.1.1.0 octet_str "foo"
-.1.0.0.0.1.1.1 octet_str "bar"
-.1.0.0.0.1.102 octet_str "bad"
-.1.0.0.0.1.2.0 integer 1
-.1.0.0.0.1.2.1 integer 2
-.1.0.0.0.1.3.0 octet_str "0.123"
-.1.0.0.0.1.3.1 octet_str "0.456"
-.1.0.0.0.1.3.2 octet_str "9.999"
-.1.0.0.1.1 octet_str "baz"
-.1.0.0.1.2 uinteger 54321
-.1.0.0.1.3 uinteger 234
-```
-
-Telegraf config:
+### Configuration
```toml
[[inputs.snmp]]
- agents = [ "127.0.0.1:161" ]
- version = 2
- community = "public"
-
- name = "system"
- [[inputs.snmp.field]]
- name = "hostname"
- oid = ".1.0.0.1.1"
- is_tag = true
+ ## Agent addresses to retrieve values from.
+ ## example: agents = ["udp://127.0.0.1:161"]
+ ## agents = ["tcp://127.0.0.1:161"]
+ agents = ["udp://127.0.0.1:161"]
+
+ ## Timeout for each request.
+ # timeout = "5s"
+
+ ## SNMP version; can be 1, 2, or 3.
+ # version = 2
+
+ ## SNMP community string.
+ # community = "public"
+
+ ## Number of retries to attempt.
+ # retries = 3
+
+ ## The GETBULK max-repetitions parameter.
+ # max_repetitions = 10
+
+ ## SNMPv3 authentication and encryption options.
+ ##
+ ## Security Name.
+ # sec_name = "myuser"
+ ## Authentication protocol; one of "MD5", "SHA", or "".
+ # auth_protocol = "MD5"
+ ## Authentication password.
+ # auth_password = "pass"
+ ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+ # sec_level = "authNoPriv"
+ ## Context Name.
+ # context_name = ""
+ ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+ # priv_protocol = ""
+ ## Privacy password used for encrypted messages.
+ # priv_password = ""
+
+ ## Add fields and tables defining the variables you wish to collect. This
+ ## example collects the system uptime and interface variables. Reference the
+ ## full plugin documentation for configuration details.
[[inputs.snmp.field]]
+ oid = "RFC1213-MIB::sysUpTime.0"
name = "uptime"
- oid = ".1.0.0.1.2"
+
[[inputs.snmp.field]]
- name = "loadavg"
- oid = ".1.0.0.1.3"
- conversion = "float(2)"
+ oid = "RFC1213-MIB::sysName.0"
+ name = "source"
+ is_tag = true
[[inputs.snmp.table]]
- name = "remote_servers"
- inherit_tags = [ "hostname" ]
+ oid = "IF-MIB::ifTable"
+ name = "interface"
+ inherit_tags = ["source"]
+
[[inputs.snmp.table.field]]
- name = "server"
- oid = ".1.0.0.0.1.1"
+ oid = "IF-MIB::ifDescr"
+ name = "ifDescr"
is_tag = true
- [[inputs.snmp.table.field]]
- name = "connections"
- oid = ".1.0.0.0.1.2"
- [[inputs.snmp.table.field]]
- name = "latency"
- oid = ".1.0.0.0.1.3"
- conversion = "float"
```
-Resulting output:
-```
-* Plugin: snmp, Collection 1
-> system,agent_host=127.0.0.1,host=mylocalhost,hostname=baz loadavg=2.34,uptime=54321i 1468953135000000000
-> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency=0.123 1468953135000000000
-> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency=0.456 1468953135000000000
-```
+#### Configure SNMP Requests
-#### Configuration via MIB:
+This plugin provides two methods for configuring the SNMP requests: `fields`
+and `tables`. Use the `field` option to gather single ad-hoc variables.
+To collect SNMP tables, use the `table` option.
-This example uses the SNMP data above, but is configured via the MIB.
-The example MIB file can be found in the `testdata` directory. See the [MIB lookups](#mib-lookups) section for more information.
+##### Field
+
+Use a `field` to collect a variable by OID. Requests specified with this
+option operate similar to the `snmpget` utility.
-Telegraf config:
```toml
[[inputs.snmp]]
- agents = [ "127.0.0.1:161" ]
- version = 2
- community = "public"
+ # ... snip ...
[[inputs.snmp.field]]
- oid = "TEST::hostname"
- is_tag = true
-
- [[inputs.snmp.table]]
- oid = "TEST::testTable"
- inherit_tags = [ "hostname" ]
-```
-
-Resulting output:
-```
-* Plugin: snmp, Collection 1
-> testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency="0.123" 1468953135000000000
-> testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency="0.456" 1468953135000000000
+ ## Object identifier of the variable as a numeric or textual OID.
+ oid = "RFC1213-MIB::sysName.0"
+
+ ## Name of the field or tag to create. If not specified, it defaults to
+ ## the value of 'oid'. If 'oid' is numeric, an attempt to translate the
+ ## numeric OID into a textual OID will be made.
+ # name = ""
+
+ ## If true the variable will be added as a tag, otherwise a field will be
+ ## created.
+ # is_tag = false
+
+ ## Apply one of the following conversions to the variable value:
+ ## float(X) Convert the input value into a float and divides by the
+ ## Xth power of 10. Effectively just moves the decimal left
+ ## X places. For example a value of `123` with `float(2)`
+ ## will result in `1.23`.
+ ## float: Convert the value into a float with no adjustment. Same
+ ## as `float(0)`.
+ ## int: Convert the value into an integer.
+ ## hwaddr: Convert the value to a MAC address.
+ ## ipaddr: Convert the value to an IP address.
+ # conversion = ""
```
-### Config parameters
+##### Table
-* `agents`: Default: `[]`
-List of SNMP agents to connect to in the form of `IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`.
+Use a `table` to configure the collection of a SNMP table. SNMP requests
+formed with this option operate similarly way to the `snmptable` command.
-* `version`: Default: `2`
-SNMP protocol version to use.
+Control the handling of specific table columns using a nested `field`. These
+nested fields are specified similarly to a top-level `field`.
-* `community`: Default: `"public"`
-SNMP community to use.
+By default all columns of the SNMP table will be collected - it is not required
+to add a nested field for each column, only those which you wish to modify. To
+*only* collect certain columns, omit the `oid` from the `table` section and only
+include `oid` settings in `field` sections. For more complex include/exclude
+cases for columns use [metric filtering][].
-* `max_repetitions`: Default: `50`
-Maximum number of iterations for repeating variables.
+One [metric][] is created for each row of the SNMP table.
-* `sec_name`:
-Security name for authenticated SNMPv3 requests.
-
-* `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""`
-Authentication protocol for authenticated SNMPv3 requests.
-
-* `auth_password`:
-Authentication password for authenticated SNMPv3 requests.
-
-* `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"`
-Security level used for SNMPv3 messages.
-
-* `context_name`:
-Context name used for SNMPv3 requests.
+```toml
+[[inputs.snmp]]
+ # ... snip ...
-* `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""`
-Privacy protocol used for encrypted SNMPv3 messages.
+ [[inputs.snmp.table]]
+ ## Object identifier of the SNMP table as a numeric or textual OID.
+ oid = "IF-MIB::ifTable"
-* `priv_password`:
-Privacy password used for encrypted SNMPv3 messages.
+ ## Name of the field or tag to create. If not specified, it defaults to
+ ## the value of 'oid'. If 'oid' is numeric an attempt to translate the
+ ## numeric OID into a textual OID will be made.
+ # name = ""
+ ## Which tags to inherit from the top-level config and to use in the output
+ ## of this table's measurement.
+ ## example: inherit_tags = ["source"]
+ # inherit_tags = []
-* `name`:
-Output measurement name.
+ ## Add an 'index' tag with the table row number. Use this if the table has
+ ## no indexes or if you are excluding them. This option is normally not
+ ## required as any index columns are automatically added as tags.
+ # index_as_tag = false
-#### Field parameters:
-* `oid`:
-OID to get. May be a numeric or textual OID.
+ [[inputs.snmp.table.field]]
+ ## OID to get. May be a numeric or textual module-qualified OID.
+ oid = "IF-MIB::ifDescr"
-* `oid_index_suffix`:
-The OID sub-identifier to strip off so that the index can be matched against other fields in the table.
+ ## Name of the field or tag to create. If not specified, it defaults to
+ ## the value of 'oid'. If 'oid' is numeric an attempt to translate the
+ ## numeric OID into a textual OID will be made.
+ # name = ""
-* `oid_index_length`:
-Specifies the length of the index after the supplied table OID (in OID path segments). Truncates the index after this point to remove non-fixed value or length index suffixes.
+ ## Output this field as a tag.
+ # is_tag = false
-* `name`:
-Output field/tag name.
-If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made.
+ ## The OID sub-identifier to strip off so that the index can be matched
+ ## against other fields in the table.
+ # oid_index_suffix = ""
-* `is_tag`:
-Output this field as a tag.
+ ## Specifies the length of the index after the supplied table OID (in OID
+ ## path segments). Truncates the index after this point to remove non-fixed
+ ## value or length index suffixes.
+ # oid_index_length = 0
+```
-* `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""`
-Converts the value according to the given specification.
+### Troubleshooting
- - `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`.
- - `float`: Converts the value into a float with no adjustment. Same as `float(0)`.
- - `int`: Convertes the value into an integer.
- - `hwaddr`: Converts the value to a MAC address.
- - `ipaddr`: Converts the value to an IP address.
+Check that a numeric field can be translated to a textual field:
+```
+$ snmptranslate .1.3.6.1.2.1.1.3.0
+DISMAN-EVENT-MIB::sysUpTimeInstance
+```
-#### Table parameters:
-* `oid`:
-Automatically populates the table's fields using data from the MIB.
+Request a top-level field:
+```
+$ snmpget -v2c -c public 127.0.0.1 sysUpTime.0
+```
-* `name`:
-Output measurement name.
-If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made.
+Request a table:
+```
+$ snmptable -v2c -c public 127.0.0.1 ifTable
+```
-* `inherit_tags`:
-Which tags to inherit from the top-level config and to use in the output of this table's measurement.
+To collect a packet capture, run this command in the background while running
+Telegraf or one of the above commands. Adjust the interface, host and port as
+needed:
+```
+$ sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161
+```
-* `index_as_tag`:
-Adds each row's index within the table as a tag.
+### Example Output
-### MIB lookups
-If the plugin is configured such that it needs to perform lookups from the MIB, it will use the net-snmp utilities `snmptranslate` and `snmptable`.
+```
+snmp,agent_host=127.0.0.1,source=loaner uptime=11331974i 1575509815000000000
+interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000
+interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000
+interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000
+```
-When performing the lookups, the plugin will load all available MIBs. If your MIB files are in a custom path, you may add the path using the `MIBDIRS` environment variable. See [`man 1 snmpcmd`](http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK) for more information on the variable.
+[net-snmp]: http://www.net-snmp.org/
+[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK
+[metric filtering]: /docs/CONFIGURATION.md#metric-filtering
+[metric]: /docs/METRICS.md
diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go
index 24250c22aac39..737be06f67c58 100644
--- a/plugins/inputs/snmp/snmp.go
+++ b/plugins/inputs/snmp/snmp.go
@@ -15,6 +15,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/snmp"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/wlog"
"github.com/soniah/gosnmp"
@@ -22,61 +23,46 @@ import (
const description = `Retrieves SNMP values from remote agents`
const sampleConfig = `
- agents = [ "127.0.0.1:161" ]
- ## Timeout for each SNMP query.
- timeout = "5s"
- ## Number of retries to attempt within timeout.
- retries = 3
- ## SNMP version, values can be 1, 2, or 3
- version = 2
+ ## Agent addresses to retrieve values from.
+ ## example: agents = ["udp://127.0.0.1:161"]
+ ## agents = ["tcp://127.0.0.1:161"]
+ agents = ["udp://127.0.0.1:161"]
+
+ ## Timeout for each request.
+ # timeout = "5s"
+
+ ## SNMP version; can be 1, 2, or 3.
+ # version = 2
## SNMP community string.
- community = "public"
-
- ## The GETBULK max-repetitions parameter
- max_repetitions = 10
-
- ## SNMPv3 auth parameters
- #sec_name = "myuser"
- #auth_protocol = "md5" # Values: "MD5", "SHA", ""
- #auth_password = "pass"
- #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
- #context_name = ""
- #priv_protocol = "" # Values: "DES", "AES", ""
- #priv_password = ""
-
- ## measurement name
- name = "system"
- [[inputs.snmp.field]]
- name = "hostname"
- oid = ".1.0.0.1.1"
- [[inputs.snmp.field]]
- name = "uptime"
- oid = ".1.0.0.1.2"
- [[inputs.snmp.field]]
- name = "load"
- oid = ".1.0.0.1.3"
- [[inputs.snmp.field]]
- oid = "HOST-RESOURCES-MIB::hrMemorySize"
-
- [[inputs.snmp.table]]
- ## measurement name
- name = "remote_servers"
- inherit_tags = [ "hostname" ]
- [[inputs.snmp.table.field]]
- name = "server"
- oid = ".1.0.0.0.1.0"
- is_tag = true
- [[inputs.snmp.table.field]]
- name = "connections"
- oid = ".1.0.0.0.1.1"
- [[inputs.snmp.table.field]]
- name = "latency"
- oid = ".1.0.0.0.1.2"
-
- [[inputs.snmp.table]]
- ## auto populate table's fields using the MIB
- oid = "HOST-RESOURCES-MIB::hrNetworkTable"
+ # community = "public"
+
+ ## Number of retries to attempt.
+ # retries = 3
+
+ ## The GETBULK max-repetitions parameter.
+ # max_repetitions = 10
+
+ ## SNMPv3 authentication and encryption options.
+ ##
+ ## Security Name.
+ # sec_name = "myuser"
+ ## Authentication protocol; one of "MD5", "SHA", or "".
+ # auth_protocol = "MD5"
+ ## Authentication password.
+ # auth_password = "pass"
+ ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+ # sec_level = "authNoPriv"
+ ## Context Name.
+ # context_name = ""
+ ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+ # priv_protocol = ""
+ ## Privacy password used for encrypted messages.
+ # priv_password = ""
+
+ ## Add fields and tables defining the variables you wish to collect. This
+ ## example collects the system uptime and interface variables. Reference the
+ ## full plugin documentation for configuration details.
`
// execCommand is so tests can mock out exec.Command usage.
@@ -90,16 +76,13 @@ func execCmd(arg0 string, args ...string) ([]byte, error) {
for _, arg := range args {
quoted = append(quoted, fmt.Sprintf("%q", arg))
}
- log.Printf("D! [inputs.snmp] Executing %q %s", arg0, strings.Join(quoted, " "))
+ log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " "))
}
out, err := execCommand(arg0, args...).Output()
if err != nil {
if err, ok := err.(*exec.ExitError); ok {
- return nil, NestedError{
- Err: err,
- NestedErr: fmt.Errorf("%s", bytes.TrimRight(err.Stderr, "\r\n")),
- }
+ return nil, fmt.Errorf("%s: %w", bytes.TrimRight(err.Stderr, "\r\n"), err)
}
return nil, err
}
@@ -108,41 +91,18 @@ func execCmd(arg0 string, args ...string) ([]byte, error) {
// Snmp holds the configuration for the plugin.
type Snmp struct {
- // The SNMP agent to query. Format is ADDR[:PORT] (e.g. 1.2.3.4:161).
- Agents []string
- // Timeout to wait for a response.
- Timeout internal.Duration
- Retries int
- // Values: 1, 2, 3
- Version uint8
-
- // Parameters for Version 1 & 2
- Community string
-
- // Parameters for Version 2 & 3
- MaxRepetitions uint8
-
- // Parameters for Version 3
- ContextName string
- // Values: "noAuthNoPriv", "authNoPriv", "authPriv"
- SecLevel string
- SecName string
- // Values: "MD5", "SHA", "". Default: ""
- AuthProtocol string
- AuthPassword string
- // Values: "DES", "AES", "". Default: ""
- PrivProtocol string
- PrivPassword string
- EngineID string
- EngineBoots uint32
- EngineTime uint32
+ // The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g.
+ // udp://1.2.3.4:161). If the scheme is not specified then "udp" is used.
+ Agents []string `toml:"agents"`
+
+ snmp.ClientConfig
Tables []Table `toml:"table"`
// Name & Fields are the elements of a Table.
// Telegraf chokes if we try to embed a Table. So instead we have to embed the
// fields of a Table, and construct a Table during runtime.
- Name string
+ Name string // deprecated in 1.14; use name_override
Fields []Field `toml:"field"`
connectionCache []snmpConnection
@@ -157,14 +117,14 @@ func (s *Snmp) init() error {
s.connectionCache = make([]snmpConnection, len(s.Agents))
for i := range s.Tables {
- if err := s.Tables[i].init(); err != nil {
- return Errorf(err, "initializing table %s", s.Tables[i].Name)
+ if err := s.Tables[i].Init(); err != nil {
+ return fmt.Errorf("initializing table %s: %w", s.Tables[i].Name, err)
}
}
for i := range s.Fields {
if err := s.Fields[i].init(); err != nil {
- return Errorf(err, "initializing field %s", s.Fields[i].Name)
+ return fmt.Errorf("initializing field %s: %w", s.Fields[i].Name, err)
}
}
@@ -194,8 +154,8 @@ type Table struct {
initialized bool
}
-// init() builds & initializes the nested fields.
-func (t *Table) init() error {
+// Init() builds & initializes the nested fields.
+func (t *Table) Init() error {
if t.initialized {
return nil
}
@@ -207,7 +167,7 @@ func (t *Table) init() error {
// initialize all the nested fields
for i := range t.Fields {
if err := t.Fields[i].init(); err != nil {
- return Errorf(err, "initializing field %s", t.Fields[i].Name)
+ return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err)
}
}
@@ -277,9 +237,9 @@ func (f *Field) init() error {
return nil
}
- _, oidNum, oidText, conversion, err := snmpTranslate(f.Oid)
+ _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid)
if err != nil {
- return Errorf(err, "translating")
+ return fmt.Errorf("translating: %w", err)
}
f.Oid = oidNum
if f.Name == "" {
@@ -314,36 +274,30 @@ type RTableRow struct {
Fields map[string]interface{}
}
-// NestedError wraps an error returned from deeper in the code.
-type NestedError struct {
- // Err is the error from where the NestedError was constructed.
- Err error
- // NestedError is the error that was passed back from the called function.
- NestedErr error
+type walkError struct {
+ msg string
+ err error
}
-// Error returns a concatenated string of all the nested errors.
-func (ne NestedError) Error() string {
- return ne.Err.Error() + ": " + ne.NestedErr.Error()
+func (e *walkError) Error() string {
+ return e.msg
}
-// Errorf is a convenience function for constructing a NestedError.
-func Errorf(err error, msg string, format ...interface{}) error {
- return NestedError{
- NestedErr: err,
- Err: fmt.Errorf(msg, format...),
- }
+func (e *walkError) Unwrap() error {
+ return e.err
}
func init() {
inputs.Add("snmp", func() telegraf.Input {
return &Snmp{
- Name: "snmp",
- Retries: 3,
- MaxRepetitions: 10,
- Timeout: internal.Duration{Duration: 5 * time.Second},
- Version: 2,
- Community: "public",
+ Name: "snmp",
+ ClientConfig: snmp.ClientConfig{
+ Retries: 3,
+ MaxRepetitions: 10,
+ Timeout: internal.Duration{Duration: 5 * time.Second},
+ Version: 2,
+ Community: "public",
+ },
}
})
}
@@ -373,7 +327,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
defer wg.Done()
gs, err := s.getConnection(i)
if err != nil {
- acc.AddError(Errorf(err, "agent %s", agent))
+ acc.AddError(fmt.Errorf("agent %s: %w", agent, err))
return
}
@@ -384,13 +338,13 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
}
topTags := map[string]string{}
if err := s.gatherTable(acc, gs, t, topTags, false); err != nil {
- acc.AddError(Errorf(err, "agent %s", agent))
+ acc.AddError(fmt.Errorf("agent %s: %w", agent, err))
}
// Now is the real tables.
for _, t := range s.Tables {
if err := s.gatherTable(acc, gs, t, topTags, true); err != nil {
- acc.AddError(Errorf(err, "agent %s: gathering table %s", agent, t.Name))
+ acc.AddError(fmt.Errorf("agent %s: gathering table %s: %w", agent, t.Name, err))
}
}
}(i, agent)
@@ -460,19 +414,19 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
// empty string. This results in all the non-table fields sharing the same
// index, and being added on the same row.
if pkt, err := gs.Get([]string{oid}); err != nil {
- return nil, Errorf(err, "performing get on field %s", f.Name)
+ return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err)
} else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance {
ent := pkt.Variables[0]
fv, err := fieldConvert(f.Conversion, ent.Value)
if err != nil {
- return nil, Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name)
+ return nil, fmt.Errorf("converting %q (OID %s) for field %s: %w", ent.Value, ent.Name, f.Name, err)
}
ifv[""] = fv
}
} else {
err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error {
if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." {
- return NestedError{} // break the walk
+ return &walkError{} // break the walk
}
idx := ent.Name[len(oid):]
@@ -498,14 +452,20 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
fv, err := fieldConvert(f.Conversion, ent.Value)
if err != nil {
- return Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name)
+ return &walkError{
+ msg: fmt.Sprintf("converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name),
+ err: err,
+ }
}
ifv[idx] = fv
return nil
})
if err != nil {
- if _, ok := err.(NestedError); !ok {
- return nil, Errorf(err, "performing bulk walk for field %s", f.Name)
+ // Our callback always wraps errors in a walkError.
+ // If this error isn't a walkError, we know it's not
+ // from the callback
+ if _, ok := err.(*walkError); !ok {
+ return nil, fmt.Errorf("performing bulk walk for field %s: %w", f.Name, err)
}
}
}
@@ -559,56 +519,6 @@ type snmpConnection interface {
Get(oids []string) (*gosnmp.SnmpPacket, error)
}
-// gosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection.
-type gosnmpWrapper struct {
- *gosnmp.GoSNMP
-}
-
-// Host returns the value of GoSNMP.Target.
-func (gsw gosnmpWrapper) Host() string {
- return gsw.Target
-}
-
-// Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the
-// connection is using SNMPv1 or newer.
-// Also, if any error is encountered, it will just once reconnect and try again.
-func (gsw gosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error {
- var err error
- // On error, retry once.
- // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function.
- for i := 0; i < 2; i++ {
- if gsw.Version == gosnmp.Version1 {
- err = gsw.GoSNMP.Walk(oid, fn)
- } else {
- err = gsw.GoSNMP.BulkWalk(oid, fn)
- }
- if err == nil {
- return nil
- }
- if err := gsw.GoSNMP.Connect(); err != nil {
- return Errorf(err, "reconnecting")
- }
- }
- return err
-}
-
-// Get wraps GoSNMP.GET().
-// If any error is encountered, it will just once reconnect and try again.
-func (gsw gosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) {
- var err error
- var pkt *gosnmp.SnmpPacket
- for i := 0; i < 2; i++ {
- pkt, err = gsw.GoSNMP.Get(oids)
- if err == nil {
- return pkt, nil
- }
- if err := gsw.GoSNMP.Connect(); err != nil {
- return nil, Errorf(err, "reconnecting")
- }
- }
- return nil, err
-}
-
// getConnection creates a snmpConnection (*gosnmp.GoSNMP) object and caches the
// result using `agentIndex` as the cache key. This is done to allow multiple
// connections to a single address. It is an error to use a connection in
@@ -620,105 +530,21 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) {
agent := s.Agents[idx]
- gs := gosnmpWrapper{&gosnmp.GoSNMP{}}
- s.connectionCache[idx] = gs
-
- host, portStr, err := net.SplitHostPort(agent)
+ var err error
+ var gs snmp.GosnmpWrapper
+ gs, err = snmp.NewWrapper(s.ClientConfig)
if err != nil {
- if err, ok := err.(*net.AddrError); !ok || err.Err != "missing port in address" {
- return nil, Errorf(err, "parsing host")
- }
- host = agent
- portStr = "161"
+ return nil, err
}
- gs.Target = host
-
- port, err := strconv.ParseUint(portStr, 10, 16)
+ gs.SetAgent(agent)
if err != nil {
- return nil, Errorf(err, "parsing port")
- }
- gs.Port = uint16(port)
-
- gs.Timeout = s.Timeout.Duration
-
- gs.Retries = s.Retries
-
- switch s.Version {
- case 3:
- gs.Version = gosnmp.Version3
- case 2, 0:
- gs.Version = gosnmp.Version2c
- case 1:
- gs.Version = gosnmp.Version1
- default:
- return nil, fmt.Errorf("invalid version")
- }
-
- if s.Version < 3 {
- if s.Community == "" {
- gs.Community = "public"
- } else {
- gs.Community = s.Community
- }
+ return nil, err
}
- gs.MaxRepetitions = s.MaxRepetitions
-
- if s.Version == 3 {
- gs.ContextName = s.ContextName
-
- sp := &gosnmp.UsmSecurityParameters{}
- gs.SecurityParameters = sp
- gs.SecurityModel = gosnmp.UserSecurityModel
-
- switch strings.ToLower(s.SecLevel) {
- case "noauthnopriv", "":
- gs.MsgFlags = gosnmp.NoAuthNoPriv
- case "authnopriv":
- gs.MsgFlags = gosnmp.AuthNoPriv
- case "authpriv":
- gs.MsgFlags = gosnmp.AuthPriv
- default:
- return nil, fmt.Errorf("invalid secLevel")
- }
-
- sp.UserName = s.SecName
-
- switch strings.ToLower(s.AuthProtocol) {
- case "md5":
- sp.AuthenticationProtocol = gosnmp.MD5
- case "sha":
- sp.AuthenticationProtocol = gosnmp.SHA
- case "":
- sp.AuthenticationProtocol = gosnmp.NoAuth
- default:
- return nil, fmt.Errorf("invalid authProtocol")
- }
-
- sp.AuthenticationPassphrase = s.AuthPassword
-
- switch strings.ToLower(s.PrivProtocol) {
- case "des":
- sp.PrivacyProtocol = gosnmp.DES
- case "aes":
- sp.PrivacyProtocol = gosnmp.AES
- case "":
- sp.PrivacyProtocol = gosnmp.NoPriv
- default:
- return nil, fmt.Errorf("invalid privProtocol")
- }
-
- sp.PrivacyPassphrase = s.PrivPassword
-
- sp.AuthoritativeEngineID = s.EngineID
-
- sp.AuthoritativeEngineBoots = s.EngineBoots
-
- sp.AuthoritativeEngineTime = s.EngineTime
- }
+ s.connectionCache[idx] = gs
if err := gs.Connect(); err != nil {
- return nil, Errorf(err, "setting up connection")
+ return nil, fmt.Errorf("setting up connection: %w", err)
}
return gs, nil
@@ -803,9 +629,9 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) {
case uint64:
v = int64(vt)
case []byte:
- v, _ = strconv.Atoi(string(vt))
+ v, _ = strconv.ParseInt(string(vt), 10, 64)
case string:
- v, _ = strconv.Atoi(vt)
+ v, _ = strconv.ParseInt(vt, 10, 64)
}
return v, nil
}
@@ -878,9 +704,9 @@ func snmpTable(oid string) (mibName string, oidNum string, oidText string, field
}
func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) {
- mibName, oidNum, oidText, _, err = snmpTranslate(oid)
+ mibName, oidNum, oidText, _, err = SnmpTranslate(oid)
if err != nil {
- return "", "", "", nil, Errorf(err, "translating")
+ return "", "", "", nil, fmt.Errorf("translating: %w", err)
}
mibPrefix := mibName + "::"
@@ -917,7 +743,7 @@ func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, f
// this won't actually try to run a query. The `-Ch` will just cause it to dump headers.
out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName)
if err != nil {
- return "", "", "", nil, Errorf(err, "getting table columns")
+ return "", "", "", nil, fmt.Errorf("getting table columns: %w", err)
}
scanner := bufio.NewScanner(bytes.NewBuffer(out))
scanner.Scan()
@@ -948,7 +774,7 @@ var snmpTranslateCachesLock sync.Mutex
var snmpTranslateCaches map[string]snmpTranslateCache
// snmpTranslate resolves the given OID.
-func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
+func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
snmpTranslateCachesLock.Lock()
if snmpTranslateCaches == nil {
snmpTranslateCaches = map[string]snmpTranslateCache{}
@@ -961,9 +787,9 @@ func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, c
// We could speed it up by putting a lock in snmpTranslateCache and then
// returning it immediately, and multiple callers would then release the
// snmpTranslateCachesLock and instead wait on the individual
- // snmpTranlsation.Lock to release. But I don't know that the extra complexity
+ // snmpTranslation.Lock to release. But I don't know that the extra complexity
// is worth it. Especially when it would slam the system pretty hard if lots
- // of lookups are being perfomed.
+ // of lookups are being performed.
stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid)
snmpTranslateCaches[oid] = stc
@@ -974,6 +800,28 @@ func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, c
return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err
}
+func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) {
+ snmpTranslateCachesLock.Lock()
+ defer snmpTranslateCachesLock.Unlock()
+ if snmpTranslateCaches == nil {
+ snmpTranslateCaches = map[string]snmpTranslateCache{}
+ }
+
+ var stc snmpTranslateCache
+ stc.mibName = mibName
+ stc.oidNum = oidNum
+ stc.oidText = oidText
+ stc.conversion = conversion
+ stc.err = nil
+ snmpTranslateCaches[oid] = stc
+}
+
+func SnmpTranslateClear() {
+ snmpTranslateCachesLock.Lock()
+ defer snmpTranslateCachesLock.Unlock()
+ snmpTranslateCaches = map[string]snmpTranslateCache{}
+}
+
func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
var out []byte
if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") {
@@ -993,7 +841,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin
scanner := bufio.NewScanner(bytes.NewBuffer(out))
ok := scanner.Scan()
if !ok && scanner.Err() != nil {
- return "", "", "", "", Errorf(scanner.Err(), "getting OID text")
+ return "", "", "", "", fmt.Errorf("getting OID text: %w", scanner.Err())
}
oidText = scanner.Text()
diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go
index db1a49605df05..9991ff7413a9a 100644
--- a/plugins/inputs/snmp/snmp_test.go
+++ b/plugins/inputs/snmp/snmp_test.go
@@ -10,6 +10,9 @@ import (
"time"
"github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/snmp"
+ config "github.com/influxdata/telegraf/internal/snmp"
+ "github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/testutil"
"github.com/influxdata/toml"
"github.com/soniah/gosnmp"
@@ -82,45 +85,22 @@ var tsc = &testSNMPConnection{
}
func TestSampleConfig(t *testing.T) {
- conf := struct {
- Inputs struct {
- Snmp []*Snmp
- }
- }{}
- err := toml.Unmarshal([]byte("[[inputs.snmp]]\n"+(*Snmp)(nil).SampleConfig()), &conf)
- assert.NoError(t, err)
-
- s := Snmp{
- Agents: []string{"127.0.0.1:161"},
- Timeout: internal.Duration{Duration: 5 * time.Second},
- Version: 2,
- Community: "public",
- MaxRepetitions: 10,
- Retries: 3,
-
- Name: "system",
- Fields: []Field{
- {Name: "hostname", Oid: ".1.0.0.1.1"},
- {Name: "uptime", Oid: ".1.0.0.1.2"},
- {Name: "load", Oid: ".1.0.0.1.3"},
- {Oid: "HOST-RESOURCES-MIB::hrMemorySize"},
- },
- Tables: []Table{
- {
- Name: "remote_servers",
- InheritTags: []string{"hostname"},
- Fields: []Field{
- {Name: "server", Oid: ".1.0.0.0.1.0", IsTag: true},
- {Name: "connections", Oid: ".1.0.0.0.1.1"},
- {Name: "latency", Oid: ".1.0.0.0.1.2"},
- },
- },
- {
- Oid: "HOST-RESOURCES-MIB::hrNetworkTable",
- },
+ conf := inputs.Inputs["snmp"]()
+ err := toml.Unmarshal([]byte(conf.SampleConfig()), conf)
+ require.NoError(t, err)
+
+ expected := &Snmp{
+ Agents: []string{"udp://127.0.0.1:161"},
+ ClientConfig: config.ClientConfig{
+ Timeout: internal.Duration{Duration: 5 * time.Second},
+ Version: 2,
+ Community: "public",
+ MaxRepetitions: 10,
+ Retries: 3,
},
+ Name: "snmp",
}
- assert.Equal(t, &s, conf.Inputs.Snmp[0])
+ require.Equal(t, expected, conf)
}
func TestFieldInit(t *testing.T) {
@@ -165,7 +145,7 @@ func TestTableInit(t *testing.T) {
{Oid: "TEST::description", Name: "description", IsTag: true},
},
}
- err := tbl.init()
+ err := tbl.Init()
require.NoError(t, err)
assert.Equal(t, "testTable", tbl.Name)
@@ -256,52 +236,97 @@ func TestSnmpInit_noTranslate(t *testing.T) {
func TestGetSNMPConnection_v2(t *testing.T) {
s := &Snmp{
- Agents: []string{"1.2.3.4:567", "1.2.3.4"},
- Timeout: internal.Duration{Duration: 3 * time.Second},
- Retries: 4,
- Version: 2,
- Community: "foo",
+ Agents: []string{"1.2.3.4:567", "1.2.3.4", "udp://127.0.0.1"},
+ ClientConfig: config.ClientConfig{
+ Timeout: internal.Duration{Duration: 3 * time.Second},
+ Retries: 4,
+ Version: 2,
+ Community: "foo",
+ },
}
err := s.init()
require.NoError(t, err)
gsc, err := s.getConnection(0)
require.NoError(t, err)
- gs := gsc.(gosnmpWrapper)
+ gs := gsc.(snmp.GosnmpWrapper)
assert.Equal(t, "1.2.3.4", gs.Target)
assert.EqualValues(t, 567, gs.Port)
assert.Equal(t, gosnmp.Version2c, gs.Version)
assert.Equal(t, "foo", gs.Community)
+ assert.Equal(t, "udp", gs.Transport)
gsc, err = s.getConnection(1)
require.NoError(t, err)
- gs = gsc.(gosnmpWrapper)
+ gs = gsc.(snmp.GosnmpWrapper)
assert.Equal(t, "1.2.3.4", gs.Target)
assert.EqualValues(t, 161, gs.Port)
+ assert.Equal(t, "udp", gs.Transport)
+
+ gsc, err = s.getConnection(2)
+ require.NoError(t, err)
+ gs = gsc.(snmp.GosnmpWrapper)
+ assert.Equal(t, "127.0.0.1", gs.Target)
+ assert.EqualValues(t, 161, gs.Port)
+ assert.Equal(t, "udp", gs.Transport)
+}
+
+func TestGetSNMPConnectionTCP(t *testing.T) {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go stubTCPServer(&wg)
+ wg.Wait()
+
+ s := &Snmp{
+ Agents: []string{"tcp://127.0.0.1:56789"},
+ }
+ err := s.init()
+ require.NoError(t, err)
+
+ wg.Add(1)
+ gsc, err := s.getConnection(0)
+ require.NoError(t, err)
+ gs := gsc.(snmp.GosnmpWrapper)
+ assert.Equal(t, "127.0.0.1", gs.Target)
+ assert.EqualValues(t, 56789, gs.Port)
+ assert.Equal(t, "tcp", gs.Transport)
+ wg.Wait()
+}
+
+func stubTCPServer(wg *sync.WaitGroup) {
+ defer wg.Done()
+ tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:56789")
+ tcpServer, _ := net.ListenTCP("tcp", tcpAddr)
+ defer tcpServer.Close()
+ wg.Done()
+ conn, _ := tcpServer.AcceptTCP()
+ defer conn.Close()
}
func TestGetSNMPConnection_v3(t *testing.T) {
s := &Snmp{
- Agents: []string{"1.2.3.4"},
- Version: 3,
- MaxRepetitions: 20,
- ContextName: "mycontext",
- SecLevel: "authPriv",
- SecName: "myuser",
- AuthProtocol: "md5",
- AuthPassword: "password123",
- PrivProtocol: "des",
- PrivPassword: "321drowssap",
- EngineID: "myengineid",
- EngineBoots: 1,
- EngineTime: 2,
+ Agents: []string{"1.2.3.4"},
+ ClientConfig: config.ClientConfig{
+ Version: 3,
+ MaxRepetitions: 20,
+ ContextName: "mycontext",
+ SecLevel: "authPriv",
+ SecName: "myuser",
+ AuthProtocol: "md5",
+ AuthPassword: "password123",
+ PrivProtocol: "des",
+ PrivPassword: "321drowssap",
+ EngineID: "myengineid",
+ EngineBoots: 1,
+ EngineTime: 2,
+ },
}
err := s.init()
require.NoError(t, err)
gsc, err := s.getConnection(0)
require.NoError(t, err)
- gs := gsc.(gosnmpWrapper)
+ gs := gsc.(snmp.GosnmpWrapper)
assert.Equal(t, gs.Version, gosnmp.Version3)
sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters)
assert.Equal(t, "1.2.3.4", gsc.Host())
@@ -377,7 +402,9 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) {
require.NoError(t, err)
conn := gs.Conn
- gsw := gosnmpWrapper{gs}
+ gsw := snmp.GosnmpWrapper{
+ GoSNMP: gs,
+ }
err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil })
srvr.Close()
wg.Wait()
@@ -425,7 +452,9 @@ func TestGosnmpWrapper_get_retry(t *testing.T) {
require.NoError(t, err)
conn := gs.Conn
- gsw := gosnmpWrapper{gs}
+ gsw := snmp.GosnmpWrapper{
+ GoSNMP: gs,
+ }
_, err = gsw.Get([]string{".1.0.0"})
srvr.Close()
wg.Wait()
@@ -677,6 +706,8 @@ func TestFieldConvert(t *testing.T) {
{uint64(123), "float(3)", float64(0.123)},
{"123", "int", int64(123)},
{[]byte("123"), "int", int64(123)},
+ {"123123123123", "int", int64(123123123123)},
+ {[]byte("123123123123"), "int", int64(123123123123)},
{float32(12.3), "int", int64(12)},
{float64(12.3), "int", int64(12)},
{int(123), "int", int64(123)},
@@ -708,7 +739,7 @@ func TestFieldConvert(t *testing.T) {
func TestSnmpTranslateCache_miss(t *testing.T) {
snmpTranslateCaches = nil
oid := "IF-MIB::ifPhysAddress.1"
- mibName, oidNum, oidText, conversion, err := snmpTranslate(oid)
+ mibName, oidNum, oidText, conversion, err := SnmpTranslate(oid)
assert.Len(t, snmpTranslateCaches, 1)
stc := snmpTranslateCaches[oid]
require.NotNil(t, stc)
@@ -729,7 +760,7 @@ func TestSnmpTranslateCache_hit(t *testing.T) {
err: fmt.Errorf("e"),
},
}
- mibName, oidNum, oidText, conversion, err := snmpTranslate("foo")
+ mibName, oidNum, oidText, conversion, err := SnmpTranslate("foo")
assert.Equal(t, "a", mibName)
assert.Equal(t, "b", oidNum)
assert.Equal(t, "c", oidText)
@@ -769,16 +800,3 @@ func TestSnmpTableCache_hit(t *testing.T) {
assert.Equal(t, []Field{{Name: "d"}}, fields)
assert.Equal(t, fmt.Errorf("e"), err)
}
-
-func TestError(t *testing.T) {
- e := fmt.Errorf("nested error")
- err := Errorf(e, "top error %d", 123)
- require.Error(t, err)
-
- ne, ok := err.(NestedError)
- require.True(t, ok)
- assert.Equal(t, e, ne.NestedErr)
-
- assert.Contains(t, err.Error(), "top error 123")
- assert.Contains(t, err.Error(), "nested error")
-}
diff --git a/plugins/inputs/snmp_legacy/README.md b/plugins/inputs/snmp_legacy/README.md
index bee78322803be..06bebbcad6176 100644
--- a/plugins/inputs/snmp_legacy/README.md
+++ b/plugins/inputs/snmp_legacy/README.md
@@ -1,4 +1,4 @@
-# SNMP Input Plugin
+# SNMP Legacy Input Plugin
The SNMP input plugin gathers metrics from SNMP agents
diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go
index 57f9f4fe24738..8df9cff06fa2c 100644
--- a/plugins/inputs/snmp_legacy/snmp_legacy.go
+++ b/plugins/inputs/snmp_legacy/snmp_legacy.go
@@ -1,7 +1,6 @@
package snmp_legacy
import (
- "fmt"
"io/ioutil"
"log"
"net"
@@ -24,6 +23,8 @@ type Snmp struct {
Subtable []Subtable
SnmptranslateFile string
+ Log telegraf.Logger
+
nameToOid map[string]string
initNode Node
subTableMap map[string]Subtable
@@ -297,7 +298,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
data, err := ioutil.ReadFile(s.SnmptranslateFile)
if err != nil {
- log.Printf("E! Reading SNMPtranslate file error: %s", err)
+ s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error())
return err
} else {
for _, line := range strings.Split(string(data), "\n") {
@@ -395,16 +396,16 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// only if len(s.OidInstanceMapping) == 0
if len(host.OidInstanceMapping) >= 0 {
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
- acc.AddError(fmt.Errorf("E! SNMP Mapping error for host '%s': %s", host.Address, err))
+ s.Log.Errorf("Mapping error for host %q: %s", host.Address, err.Error())
continue
}
}
// Launch Get requests
if err := host.SNMPGet(acc, s.initNode); err != nil {
- acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err))
+ s.Log.Errorf("Error for host %q: %s", host.Address, err.Error())
}
if err := host.SNMPBulk(acc, s.initNode); err != nil {
- acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err))
+ s.Log.Errorf("Error for host %q: %s", host.Address, err.Error())
}
}
return nil
@@ -801,7 +802,7 @@ func (h *Host) HandleResponse(
acc.AddFields(field_name, fields, tags)
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
// Oid not found
- log.Printf("E! [snmp input] Oid not found: %s", oid_key)
+ log.Printf("E! [inputs.snmp_legacy] oid %q not found", oid_key)
default:
// delete other data
}
diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md
new file mode 100644
index 0000000000000..046f18e498604
--- /dev/null
+++ b/plugins/inputs/snmp_trap/README.md
@@ -0,0 +1,102 @@
+# SNMP Trap Input Plugin
+
+The SNMP Trap plugin is a service input plugin that receives SNMP
+notifications (traps and inform requests).
+
+Notifications are received on plain UDP. The port to listen is
+configurable.
+
+### Prerequisites
+
+This plugin uses the `snmptranslate` programs from the
+[net-snmp][] project. These tools will need to be installed into the `PATH` in
+order to be located. Other utilities from the net-snmp project may be useful
+for troubleshooting, but are not directly used by the plugin.
+
+These programs will load available MIBs on the system. Typically the default
+directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a
+different location you may need to make the paths known to net-snmp. The
+location of these files can be configured in the `snmp.conf` or via the
+`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more
+information.
+
+### Configuration
+```toml
+[[inputs.snmp_trap]]
+ ## Transport, local address, and port to listen on. Transport must
+ ## be "udp://". Omit local address to listen on all interfaces.
+ ## example: "udp://127.0.0.1:1234"
+ ##
+ ## Special permissions may be required to listen on a port less than
+ ## 1024. See README.md for details
+ ##
+ # service_address = "udp://:162"
+ ## Timeout running snmptranslate command
+ # timeout = "5s"
+ ## Snmp version
+ # version = "2c"
+ ## SNMPv3 authentication and encryption options.
+ ##
+ ## Security Name.
+ # sec_name = "myuser"
+ ## Authentication protocol; one of "MD5", "SHA" or "".
+ # auth_protocol = "MD5"
+ ## Authentication password.
+ # auth_password = "pass"
+ ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+ # sec_level = "authNoPriv"
+ ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "".
+ # priv_protocol = ""
+ ## Privacy password used for encrypted messages.
+ # priv_password = ""
+```
+
+#### Using a Privileged Port
+
+On many operating systems, listening on a privileged port (a port
+number less than 1024) requires extra permission. Since the default
+SNMP trap port 162 is in this category, using telegraf to receive SNMP
+traps may need extra permission.
+
+Instructions for listening on a privileged port vary by operating
+system. It is not recommended to run telegraf as superuser in order to
+use a privileged port. Instead follow the principle of least privilege
+and use a more specific operating system mechanism to allow telegraf to
+use the port. You may also be able to have telegraf use an
+unprivileged port and then configure a firewall port forward rule from
+the privileged port.
+
+To use a privileged port on Linux, you can use setcap to enable the
+CAP_NET_BIND_SERVICE capability on the telegraf binary:
+
+```
+setcap cap_net_bind_service=+ep /usr/bin/telegraf
+```
+
+On Mac OS, listening on privileged ports is unrestricted on versions
+10.14 and later.
+
+### Metrics
+
+- snmp_trap
+ - tags:
+ - source (string, IP address of trap source)
+ - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU)
+ - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU)
+ - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU)
+ - version (string, "1" or "2c" or "3")
+ - context_name (string, value from v3 trap)
+ - engine_id (string, value from v3 trap)
+ - fields:
+ - Fields are mapped from variables in the trap. Field names are
+ the trap variable names after MIB lookup. Field values are trap
+ variable values.
+
+### Example Output
+```
+snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814
+snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459
+```
+
+[net-snmp]: http://www.net-snmp.org/
+[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK
diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go
new file mode 100644
index 0000000000000..dbf0cdbf3ade2
--- /dev/null
+++ b/plugins/inputs/snmp_trap/snmp_trap.go
@@ -0,0 +1,416 @@
+package snmp_trap
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "net"
+ "os/exec"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+
+ "github.com/soniah/gosnmp"
+)
+
+var defaultTimeout = internal.Duration{Duration: time.Second * 5}
+
+type handler func(*gosnmp.SnmpPacket, *net.UDPAddr)
+type execer func(internal.Duration, string, ...string) ([]byte, error)
+
+type mibEntry struct {
+ mibName string
+ oidText string
+}
+
+type SnmpTrap struct {
+ ServiceAddress string `toml:"service_address"`
+ Timeout internal.Duration `toml:"timeout"`
+ Version string `toml:"version"`
+
+ // Settings for version 3
+ // Values: "noAuthNoPriv", "authNoPriv", "authPriv"
+ SecLevel string `toml:"sec_level"`
+ SecName string `toml:"sec_name"`
+ // Values: "MD5", "SHA", "". Default: ""
+ AuthProtocol string `toml:"auth_protocol"`
+ AuthPassword string `toml:"auth_password"`
+ // Values: "DES", "AES", "". Default: ""
+ PrivProtocol string `toml:"priv_protocol"`
+ PrivPassword string `toml:"priv_password"`
+
+ acc telegraf.Accumulator
+ listener *gosnmp.TrapListener
+ timeFunc func() time.Time
+ errCh chan error
+
+ makeHandlerWrapper func(handler) handler
+
+ Log telegraf.Logger `toml:"-"`
+
+ cacheLock sync.Mutex
+ cache map[string]mibEntry
+
+ execCmd execer
+}
+
+var sampleConfig = `
+ ## Transport, local address, and port to listen on. Transport must
+ ## be "udp://". Omit local address to listen on all interfaces.
+ ## example: "udp://127.0.0.1:1234"
+ ##
+ ## Special permissions may be required to listen on a port less than
+ ## 1024. See README.md for details
+ ##
+ # service_address = "udp://:162"
+ ## Timeout running snmptranslate command
+ # timeout = "5s"
+ ## Snmp version, defaults to 2c
+ # version = "2c"
+ ## SNMPv3 authentication and encryption options.
+ ##
+ ## Security Name.
+ # sec_name = "myuser"
+ ## Authentication protocol; one of "MD5", "SHA" or "".
+ # auth_protocol = "MD5"
+ ## Authentication password.
+ # auth_password = "pass"
+ ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+ # sec_level = "authNoPriv"
+ ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "".
+ # priv_protocol = ""
+ ## Privacy password used for encrypted messages.
+ # priv_password = ""
+`
+
+func (s *SnmpTrap) SampleConfig() string {
+ return sampleConfig
+}
+
+func (s *SnmpTrap) Description() string {
+ return "Receive SNMP traps"
+}
+
+func (s *SnmpTrap) Gather(_ telegraf.Accumulator) error {
+ return nil
+}
+
+func init() {
+ inputs.Add("snmp_trap", func() telegraf.Input {
+ return &SnmpTrap{
+ timeFunc: time.Now,
+ ServiceAddress: "udp://:162",
+ Timeout: defaultTimeout,
+ Version: "2c",
+ }
+ })
+}
+
+func realExecCmd(Timeout internal.Duration, arg0 string, args ...string) ([]byte, error) {
+ cmd := exec.Command(arg0, args...)
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := internal.RunTimeout(cmd, Timeout.Duration)
+ if err != nil {
+ return nil, err
+ }
+ return out.Bytes(), nil
+}
+
+func (s *SnmpTrap) Init() error {
+ s.cache = map[string]mibEntry{}
+ s.execCmd = realExecCmd
+ return nil
+}
+
+func (s *SnmpTrap) Start(acc telegraf.Accumulator) error {
+ s.acc = acc
+ s.listener = gosnmp.NewTrapListener()
+ s.listener.OnNewTrap = makeTrapHandler(s)
+ s.listener.Params = gosnmp.Default
+
+ switch s.Version {
+ case "3":
+ s.listener.Params.Version = gosnmp.Version3
+ case "2c":
+ s.listener.Params.Version = gosnmp.Version2c
+ case "1":
+ s.listener.Params.Version = gosnmp.Version1
+ default:
+ s.listener.Params.Version = gosnmp.Version2c
+ }
+
+ if s.listener.Params.Version == gosnmp.Version3 {
+ s.listener.Params.SecurityModel = gosnmp.UserSecurityModel
+
+ switch strings.ToLower(s.SecLevel) {
+ case "noauthnopriv", "":
+ s.listener.Params.MsgFlags = gosnmp.NoAuthNoPriv
+ case "authnopriv":
+ s.listener.Params.MsgFlags = gosnmp.AuthNoPriv
+ case "authpriv":
+ s.listener.Params.MsgFlags = gosnmp.AuthPriv
+ default:
+ return fmt.Errorf("unknown security level '%s'", s.SecLevel)
+ }
+
+ var authenticationProtocol gosnmp.SnmpV3AuthProtocol
+ switch strings.ToLower(s.AuthProtocol) {
+ case "md5":
+ authenticationProtocol = gosnmp.MD5
+ case "sha":
+ authenticationProtocol = gosnmp.SHA
+ //case "sha224":
+ // authenticationProtocol = gosnmp.SHA224
+ //case "sha256":
+ // authenticationProtocol = gosnmp.SHA256
+ //case "sha384":
+ // authenticationProtocol = gosnmp.SHA384
+ //case "sha512":
+ // authenticationProtocol = gosnmp.SHA512
+ case "":
+ authenticationProtocol = gosnmp.NoAuth
+ default:
+ return fmt.Errorf("unknown authentication protocol '%s'", s.AuthProtocol)
+ }
+
+ var privacyProtocol gosnmp.SnmpV3PrivProtocol
+ switch strings.ToLower(s.PrivProtocol) {
+ case "aes":
+ privacyProtocol = gosnmp.AES
+ case "des":
+ privacyProtocol = gosnmp.DES
+ case "aes192":
+ privacyProtocol = gosnmp.AES192
+ case "aes192c":
+ privacyProtocol = gosnmp.AES192C
+ case "aes256":
+ privacyProtocol = gosnmp.AES256
+ case "aes256c":
+ privacyProtocol = gosnmp.AES256C
+ case "":
+ privacyProtocol = gosnmp.NoPriv
+ default:
+ return fmt.Errorf("unknown privacy protocol '%s'", s.PrivProtocol)
+ }
+
+ s.listener.Params.SecurityParameters = &gosnmp.UsmSecurityParameters{
+ UserName: s.SecName,
+ PrivacyProtocol: privacyProtocol,
+ PrivacyPassphrase: s.PrivPassword,
+ AuthenticationPassphrase: s.AuthPassword,
+ AuthenticationProtocol: authenticationProtocol,
+ }
+
+ }
+
+ // wrap the handler, used in unit tests
+ if nil != s.makeHandlerWrapper {
+ s.listener.OnNewTrap = s.makeHandlerWrapper(s.listener.OnNewTrap)
+ }
+
+ split := strings.SplitN(s.ServiceAddress, "://", 2)
+ if len(split) != 2 {
+ return fmt.Errorf("invalid service address: %s", s.ServiceAddress)
+ }
+
+ protocol := split[0]
+ addr := split[1]
+
+ // gosnmp.TrapListener currently supports udp only. For forward
+ // compatibility, require udp in the service address
+ if protocol != "udp" {
+ return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, s.ServiceAddress)
+ }
+
+ // If (*TrapListener).Listen immediately returns an error we need
+ // to return it from this function. Use a channel to get it here
+ // from the goroutine. Buffer one in case Listen returns after
+ // Listening but before our Close is called.
+ s.errCh = make(chan error, 1)
+ go func() {
+ s.errCh <- s.listener.Listen(addr)
+ }()
+
+ select {
+ case <-s.listener.Listening():
+ s.Log.Infof("Listening on %s", s.ServiceAddress)
+ case err := <-s.errCh:
+ return err
+ }
+
+ return nil
+}
+
+func (s *SnmpTrap) Stop() {
+ s.listener.Close()
+ err := <-s.errCh
+ if nil != err {
+ s.Log.Errorf("Error stopping trap listener %v", err)
+ }
+}
+
+func setTrapOid(tags map[string]string, oid string, e mibEntry) {
+ tags["oid"] = oid
+ tags["name"] = e.oidText
+ tags["mib"] = e.mibName
+}
+
+func makeTrapHandler(s *SnmpTrap) handler {
+ return func(packet *gosnmp.SnmpPacket, addr *net.UDPAddr) {
+ tm := s.timeFunc()
+ fields := map[string]interface{}{}
+ tags := map[string]string{}
+
+ tags["version"] = packet.Version.String()
+ tags["source"] = addr.IP.String()
+
+ if packet.Version == gosnmp.Version1 {
+ // Follow the procedure described in RFC 2576 3.1 to
+ // translate a v1 trap to v2.
+ var trapOid string
+
+ if packet.GenericTrap >= 0 && packet.GenericTrap < 6 {
+ trapOid = ".1.3.6.1.6.3.1.1.5." + strconv.Itoa(packet.GenericTrap+1)
+ } else if packet.GenericTrap == 6 {
+ trapOid = packet.Enterprise + ".0." + strconv.Itoa(packet.SpecificTrap)
+ }
+
+ if trapOid != "" {
+ e, err := s.lookup(trapOid)
+ if err != nil {
+ s.Log.Errorf("Error resolving V1 OID: %v", err)
+ return
+ }
+ setTrapOid(tags, trapOid, e)
+ }
+
+ if packet.AgentAddress != "" {
+ tags["agent_address"] = packet.AgentAddress
+ }
+
+ fields["sysUpTimeInstance"] = packet.Timestamp
+ }
+
+ for _, v := range packet.Variables {
+ // Use system mibs to resolve oids. Don't fall back to
+ // numeric oid because it's not useful enough to the end
+ // user and can be difficult to translate or remove from
+ // the database later.
+
+ var value interface{}
+
+ // todo: format the pdu value based on its snmp type and
+ // the mib's textual convention. The snmp input plugin
+ // only handles textual convention for ip and mac
+ // addresses
+
+ switch v.Type {
+ case gosnmp.ObjectIdentifier:
+ val, ok := v.Value.(string)
+ if !ok {
+ s.Log.Errorf("Error getting value OID")
+ return
+ }
+
+ var e mibEntry
+ var err error
+ e, err = s.lookup(val)
+ if nil != err {
+ s.Log.Errorf("Error resolving value OID: %v", err)
+ return
+ }
+
+ value = e.oidText
+
+ // 1.3.6.1.6.3.1.1.4.1.0 is SNMPv2-MIB::snmpTrapOID.0.
+ // If v.Name is this oid, set a tag of the trap name.
+ if v.Name == ".1.3.6.1.6.3.1.1.4.1.0" {
+ setTrapOid(tags, val, e)
+ continue
+ }
+ default:
+ value = v.Value
+ }
+
+ e, err := s.lookup(v.Name)
+ if nil != err {
+ s.Log.Errorf("Error resolving OID: %v", err)
+ return
+ }
+
+ name := e.oidText
+
+ fields[name] = value
+ }
+
+ if packet.Version == gosnmp.Version3 {
+ if packet.ContextName != "" {
+ tags["context_name"] = packet.ContextName
+ }
+ if packet.ContextEngineID != "" {
+ // SNMP RFCs like 3411 and 5343 show engine ID as a hex string
+ tags["engine_id"] = fmt.Sprintf("%x", packet.ContextEngineID)
+ }
+ }
+
+ s.acc.AddFields("snmp_trap", fields, tags, tm)
+ }
+}
+
+func (s *SnmpTrap) lookup(oid string) (e mibEntry, err error) {
+ s.cacheLock.Lock()
+ defer s.cacheLock.Unlock()
+ var ok bool
+ if e, ok = s.cache[oid]; !ok {
+ // cache miss. exec snmptranslate
+ e, err = s.snmptranslate(oid)
+ if err == nil {
+ s.cache[oid] = e
+ }
+ return e, err
+ }
+ return e, nil
+}
+
+func (s *SnmpTrap) clear() {
+ s.cacheLock.Lock()
+ defer s.cacheLock.Unlock()
+ s.cache = map[string]mibEntry{}
+}
+
+func (s *SnmpTrap) load(oid string, e mibEntry) {
+ s.cacheLock.Lock()
+ defer s.cacheLock.Unlock()
+ s.cache[oid] = e
+}
+
+func (s *SnmpTrap) snmptranslate(oid string) (e mibEntry, err error) {
+ var out []byte
+ out, err = s.execCmd(s.Timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid)
+
+ if err != nil {
+ return e, err
+ }
+
+ scanner := bufio.NewScanner(bytes.NewBuffer(out))
+ ok := scanner.Scan()
+ if err = scanner.Err(); !ok && err != nil {
+ return e, err
+ }
+
+ e.oidText = scanner.Text()
+
+ i := strings.Index(e.oidText, "::")
+ if i == -1 {
+ return e, fmt.Errorf("not found")
+ }
+ e.mibName = e.oidText[:i]
+ e.oidText = e.oidText[i+2:]
+ return e, nil
+}
diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go
new file mode 100644
index 0000000000000..ee539f312a77d
--- /dev/null
+++ b/plugins/inputs/snmp_trap/snmp_trap_test.go
@@ -0,0 +1,1319 @@
+package snmp_trap
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/soniah/gosnmp"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestLoad(t *testing.T) {
+ s := &SnmpTrap{}
+ require.Nil(t, s.Init())
+
+ defer s.clear()
+ s.load(
+ ".1.3.6.1.6.3.1.1.5.1",
+ mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ )
+
+ e, err := s.lookup(".1.3.6.1.6.3.1.1.5.1")
+ require.NoError(t, err)
+ require.Equal(t, "SNMPv2-MIB", e.mibName)
+ require.Equal(t, "coldStart", e.oidText)
+}
+
+func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) {
+ return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " "))
+}
+
+func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion, secLevel string, username string, authProto string, authPass string, privProto string, privPass string, contextName string, engineID string) {
+ var s gosnmp.GoSNMP
+
+ if version == gosnmp.Version3 {
+ var msgFlags gosnmp.SnmpV3MsgFlags
+ switch strings.ToLower(secLevel) {
+ case "noauthnopriv", "":
+ msgFlags = gosnmp.NoAuthNoPriv
+ case "authnopriv":
+ msgFlags = gosnmp.AuthNoPriv
+ case "authpriv":
+ msgFlags = gosnmp.AuthPriv
+ default:
+ msgFlags = gosnmp.NoAuthNoPriv
+ }
+
+ var authenticationProtocol gosnmp.SnmpV3AuthProtocol
+ switch strings.ToLower(authProto) {
+ case "md5":
+ authenticationProtocol = gosnmp.MD5
+ case "sha":
+ authenticationProtocol = gosnmp.SHA
+ //case "sha224":
+ // authenticationProtocol = gosnmp.SHA224
+ //case "sha256":
+ // authenticationProtocol = gosnmp.SHA256
+ //case "sha384":
+ // authenticationProtocol = gosnmp.SHA384
+ //case "sha512":
+ // authenticationProtocol = gosnmp.SHA512
+ case "":
+ authenticationProtocol = gosnmp.NoAuth
+ default:
+ authenticationProtocol = gosnmp.NoAuth
+ }
+
+ var privacyProtocol gosnmp.SnmpV3PrivProtocol
+ switch strings.ToLower(privProto) {
+ case "aes":
+ privacyProtocol = gosnmp.AES
+ case "des":
+ privacyProtocol = gosnmp.DES
+ case "aes192":
+ privacyProtocol = gosnmp.AES192
+ case "aes192c":
+ privacyProtocol = gosnmp.AES192C
+ case "aes256":
+ privacyProtocol = gosnmp.AES256
+ case "aes256c":
+ privacyProtocol = gosnmp.AES256C
+ case "":
+ privacyProtocol = gosnmp.NoPriv
+ default:
+ privacyProtocol = gosnmp.NoPriv
+ }
+
+ sp := &gosnmp.UsmSecurityParameters{
+ AuthoritativeEngineID: "1",
+ AuthoritativeEngineBoots: 1,
+ AuthoritativeEngineTime: 1,
+ UserName: username,
+ PrivacyProtocol: privacyProtocol,
+ PrivacyPassphrase: privPass,
+ AuthenticationPassphrase: authPass,
+ AuthenticationProtocol: authenticationProtocol,
+ }
+ s = gosnmp.GoSNMP{
+ Port: port,
+ Version: version,
+ Timeout: time.Duration(2) * time.Second,
+ Retries: 1,
+ MaxOids: gosnmp.MaxOids,
+ Target: "127.0.0.1",
+ SecurityParameters: sp,
+ SecurityModel: gosnmp.UserSecurityModel,
+ MsgFlags: msgFlags,
+ ContextName: contextName,
+ ContextEngineID: engineID,
+ }
+ } else {
+ s = gosnmp.GoSNMP{
+ Port: port,
+ Version: version,
+ Timeout: time.Duration(2) * time.Second,
+ Retries: 1,
+ MaxOids: gosnmp.MaxOids,
+ Target: "127.0.0.1",
+ Community: "public",
+ }
+ }
+
+ err := s.Connect()
+ if err != nil {
+ t.Errorf("Connect() err: %v", err)
+ }
+ defer s.Conn.Close()
+
+ _, err = s.SendTrap(trap)
+ if err != nil {
+ t.Errorf("SendTrap() err: %v", err)
+ }
+}
+
+func TestReceiveTrap(t *testing.T) {
+ var now uint32
+ now = 123123123
+
+ var fakeTime time.Time
+ fakeTime = time.Unix(456456456, 456)
+
+ type entry struct {
+ oid string
+ e mibEntry
+ }
+
+ // If the first pdu isn't type TimeTicks, gosnmp.SendTrap() will
+ // prepend one with time.Now()
+ var tests = []struct {
+ name string
+
+ // send
+ version gosnmp.SnmpVersion
+ trap gosnmp.SnmpTrap // include pdus
+ // V3 auth and priv parameters
+ secName string // v3 username
+ secLevel string // v3 security level
+ authProto string // Auth protocol: "", MD5 or SHA
+ authPass string // Auth passphrase
+ privProto string // Priv protocol: "", DES or AES
+ privPass string // Priv passphrase
+
+ // V3 sender context
+ contextName string
+ engineID string
+
+ // receive
+ entries []entry
+ metrics []telegraf.Metric
+ }{
+ //ordinary v2c coldStart trap
+ {
+ name: "v2c coldStart",
+ version: gosnmp.Version2c,
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "2c",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //Check that we're not running snmptranslate to look up oids
+ //when we shouldn't be. This sends and receives a valid trap
+ //but metric production should fail because the oids aren't in
+ //the cache and oid lookup is intentionally mocked to fail.
+ {
+ name: "missing oid",
+ version: gosnmp.Version2c,
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{}, //nothing in cache
+ metrics: []telegraf.Metric{},
+ },
+ //v1 enterprise specific trap
+ {
+ name: "v1 trap enterprise",
+ version: gosnmp.Version1,
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.2.3.4.5",
+ Type: gosnmp.OctetString,
+ Value: "payload",
+ },
+ },
+ Enterprise: ".1.2.3",
+ AgentAddress: "10.20.30.40",
+ GenericTrap: 6, // enterpriseSpecific
+ SpecificTrap: 55,
+ Timestamp: uint(now),
+ },
+ entries: []entry{
+ {
+ ".1.2.3.4.5",
+ mibEntry{
+ "valueMIB",
+ "valueOID",
+ },
+ },
+ {
+ ".1.2.3.0.55",
+ mibEntry{
+ "enterpriseMIB",
+ "enterpriseOID",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.2.3.0.55",
+ "name": "enterpriseOID",
+ "mib": "enterpriseMIB",
+ "version": "1",
+ "source": "127.0.0.1",
+ "agent_address": "10.20.30.40",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": uint(now),
+ "valueOID": "payload",
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //v1 generic trap
+ {
+ name: "v1 trap generic",
+ version: gosnmp.Version1,
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.2.3.4.5",
+ Type: gosnmp.OctetString,
+ Value: "payload",
+ },
+ },
+ Enterprise: ".1.2.3",
+ AgentAddress: "10.20.30.40",
+ GenericTrap: 0, //coldStart
+ SpecificTrap: 0,
+ Timestamp: uint(now),
+ },
+ entries: []entry{
+ {
+ ".1.2.3.4.5",
+ mibEntry{
+ "valueMIB",
+ "valueOID",
+ },
+ },
+ {
+ ".1.3.6.1.6.3.1.1.5.1",
+ mibEntry{
+ "coldStartMIB",
+ "coldStartOID",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStartOID",
+ "mib": "coldStartMIB",
+ "version": "1",
+ "source": "127.0.0.1",
+ "agent_address": "10.20.30.40",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": uint(now),
+ "valueOID": "payload",
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldStart trap no auth and no priv
+ {
+ name: "v3 coldStart noAuthNoPriv",
+ version: gosnmp.Version3,
+ secName: "noAuthNoPriv",
+ secLevel: "noAuthNoPriv",
+ contextName: "foo_context_name",
+ engineID: "bar_engine_id",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ "context_name": "foo_context_name",
+ "engine_id": "6261725f656e67696e655f6964",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldstart trap SHA auth and no priv
+ {
+ name: "v3 coldStart authShaNoPriv",
+ version: gosnmp.Version3,
+ secName: "authShaNoPriv",
+ secLevel: "authNoPriv",
+ authProto: "SHA",
+ authPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ /*
+ //ordinary v3 coldstart trap SHA224 auth and no priv
+ {
+ name: "v3 coldStart authShaNoPriv",
+ version: gosnmp.Version3,
+ secName: "authSha224NoPriv",
+ secLevel: "authNoPriv",
+ authProto: "SHA224",
+ authPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldstart trap SHA256 auth and no priv
+ {
+ name: "v3 coldStart authSha256NoPriv",
+ version: gosnmp.Version3,
+ secName: "authSha256NoPriv",
+ secLevel: "authNoPriv",
+ authProto: "SHA256",
+ authPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldstart trap SHA384 auth and no priv
+ {
+ name: "v3 coldStart authSha384NoPriv",
+ version: gosnmp.Version3,
+ secName: "authSha384NoPriv",
+ secLevel: "authNoPriv",
+ authProto: "SHA384",
+ authPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldstart trap SHA512 auth and no priv
+ {
+ name: "v3 coldStart authShaNoPriv",
+ version: gosnmp.Version3,
+ secName: "authSha512NoPriv",
+ secLevel: "authNoPriv",
+ authProto: "SHA512",
+ authPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },*/
+ //ordinary v3 coldstart trap SHA auth and no priv
+ {
+ name: "v3 coldStart authShaNoPriv",
+ version: gosnmp.Version3,
+ secName: "authShaNoPriv",
+ secLevel: "authNoPriv",
+ authProto: "SHA",
+ authPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldstart trap MD5 auth and no priv
+ {
+ name: "v3 coldStart authMD5NoPriv",
+ version: gosnmp.Version3,
+ secName: "authMD5NoPriv",
+ secLevel: "authNoPriv",
+ authProto: "MD5",
+ authPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldStart SHA trap auth and AES priv
+ {
+ name: "v3 coldStart authSHAPrivAES",
+ version: gosnmp.Version3,
+ secName: "authSHAPrivAES",
+ secLevel: "authPriv",
+ authProto: "SHA",
+ authPass: "passpass",
+ privProto: "AES",
+ privPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldStart SHA trap auth and DES priv
+ {
+ name: "v3 coldStart authSHAPrivDES",
+ version: gosnmp.Version3,
+ secName: "authSHAPrivDES",
+ secLevel: "authPriv",
+ authProto: "SHA",
+ authPass: "passpass",
+ privProto: "DES",
+ privPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldStart SHA trap auth and AES192 priv
+ {
+ name: "v3 coldStart authSHAPrivAES192",
+ version: gosnmp.Version3,
+ secName: "authSHAPrivAES192",
+ secLevel: "authPriv",
+ authProto: "SHA",
+ authPass: "passpass",
+ privProto: "AES192",
+ privPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldStart SHA trap auth and AES192C priv
+ {
+ name: "v3 coldStart authSHAPrivAES192C",
+ version: gosnmp.Version3,
+ secName: "authSHAPrivAES192C",
+ secLevel: "authPriv",
+ authProto: "SHA",
+ authPass: "passpass",
+ privProto: "AES192C",
+ privPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldStart SHA trap auth and AES256 priv
+ {
+ name: "v3 coldStart authSHAPrivAES256",
+ version: gosnmp.Version3,
+ secName: "authSHAPrivAES256",
+ secLevel: "authPriv",
+ authProto: "SHA",
+ authPass: "passpass",
+ privProto: "AES256",
+ privPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ //ordinary v3 coldStart SHA trap auth and AES256C priv
+ {
+ name: "v3 coldStart authSHAPrivAES256C",
+ version: gosnmp.Version3,
+ secName: "authSHAPrivAES256C",
+ secLevel: "authPriv",
+ authProto: "SHA",
+ authPass: "passpass",
+ privProto: "AES256C",
+ privPass: "passpass",
+ trap: gosnmp.SnmpTrap{
+ Variables: []gosnmp.SnmpPDU{
+ {
+ Name: ".1.3.6.1.2.1.1.3.0",
+ Type: gosnmp.TimeTicks,
+ Value: now,
+ },
+ {
+ Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0
+ Type: gosnmp.ObjectIdentifier,
+ Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart
+ },
+ },
+ },
+ entries: []entry{
+ {
+ oid: ".1.3.6.1.6.3.1.1.4.1.0",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "snmpTrapOID.0",
+ },
+ },
+ {
+ oid: ".1.3.6.1.6.3.1.1.5.1",
+ e: mibEntry{
+ "SNMPv2-MIB",
+ "coldStart",
+ },
+ },
+ {
+ oid: ".1.3.6.1.2.1.1.3.0",
+ e: mibEntry{
+ "UNUSED_MIB_NAME",
+ "sysUpTimeInstance",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "snmp_trap", // name
+ map[string]string{ // tags
+ "oid": ".1.3.6.1.6.3.1.1.5.1",
+ "name": "coldStart",
+ "mib": "SNMPv2-MIB",
+ "version": "3",
+ "source": "127.0.0.1",
+ },
+ map[string]interface{}{ // fields
+ "sysUpTimeInstance": now,
+ },
+ fakeTime,
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // We would prefer to specify port 0 and let the network
+ // stack choose an unused port for us but TrapListener
+ // doesn't have a way to return the autoselected port.
+ // Instead, we'll use an unusual port and hope it's
+ // unused.
+ const port = 12399
+
+ // Hook into the trap handler so the test knows when the
+ // trap has been received
+ received := make(chan int)
+ wrap := func(f handler) handler {
+ return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) {
+ f(p, a)
+ received <- 0
+ }
+ }
+
+ // Set up the service input plugin
+ s := &SnmpTrap{
+ ServiceAddress: "udp://:" + strconv.Itoa(port),
+ makeHandlerWrapper: wrap,
+ timeFunc: func() time.Time {
+ return fakeTime
+ },
+ Log: testutil.Logger{},
+ Version: tt.version.String(),
+ SecName: tt.secName,
+ SecLevel: tt.secLevel,
+ AuthProtocol: tt.authProto,
+ AuthPassword: tt.authPass,
+ PrivProtocol: tt.privProto,
+ PrivPassword: tt.privPass,
+ }
+ require.Nil(t, s.Init())
+ // Don't look up oid with snmptranslate.
+ s.execCmd = fakeExecCmd
+ var acc testutil.Accumulator
+ require.Nil(t, s.Start(&acc))
+ defer s.Stop()
+
+ // Preload the cache with the oids we'll use in this test
+ // so snmptranslate and mibs don't need to be installed.
+ for _, entry := range tt.entries {
+ s.load(entry.oid, entry.e)
+ }
+
+ // Send the trap
+ sendTrap(t, port, now, tt.trap, tt.version, tt.secLevel, tt.secName, tt.authProto, tt.authPass, tt.privProto, tt.privPass, tt.contextName, tt.engineID)
+
+ // Wait for trap to be received
+ select {
+ case <-received:
+ case <-time.After(2 * time.Second):
+ t.Fatal("timed out waiting for trap to be received")
+ }
+
+ // Verify plugin output
+ testutil.RequireMetricsEqual(t,
+ tt.metrics, acc.GetTelegrafMetrics(),
+ testutil.SortMetrics())
+ })
+ }
+
+}
diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md
index 2f1a0572ee055..f5189a195af9d 100644
--- a/plugins/inputs/socket_listener/README.md
+++ b/plugins/inputs/socket_listener/README.md
@@ -1,4 +1,4 @@
-# socket listener service input plugin
+# Socket Listener Input Plugin
The Socket Listener is a service input plugin that listens for messages from
streaming (tcp, unix) or datagram (udp, unixgram) protocols.
@@ -25,6 +25,13 @@ This is a sample configuration for the plugin.
# service_address = "unix:///tmp/telegraf.sock"
# service_address = "unixgram:///tmp/telegraf.sock"
+ ## Change the file mode bits on unix sockets. These permissions may not be
+ ## respected by some platforms, to safely restrict write permissions it is best
+ ## to place the socket into a directory that has previously been created
+ ## with the desired permissions.
+ ## ex: socket_mode = "777"
+ # socket_mode = ""
+
## Maximum number of concurrent connections.
## Only applies to stream sockets (e.g. TCP).
## 0 (default) is unlimited.
@@ -59,6 +66,10 @@ This is a sample configuration for the plugin.
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
+
+ ## Content encoding for message payloads, can be set to "gzip" to or
+ ## "identity" to apply no encoding.
+ # content_encoding = "identity"
```
## A Note on UDP OS Buffer Sizes
@@ -71,12 +82,13 @@ setting.
Instructions on how to adjust these OS settings are available below.
-Some OSes (most notably, Linux) place very restricive limits on the performance
+Some OSes (most notably, Linux) place very restrictive limits on the performance
of UDP protocols. It is _highly_ recommended that you increase these OS limits to
at least 8MB before trying to run large amounts of UDP traffic to your instance.
8MB is just a recommendation, and can be adjusted higher.
### Linux
+
Check the current UDP/IP receive buffer limit & default by typing the following
commands:
diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go
index ed007a00acbf2..e412996f38e6e 100644
--- a/plugins/inputs/socket_listener/socket_listener.go
+++ b/plugins/inputs/socket_listener/socket_listener.go
@@ -5,16 +5,16 @@ import (
"crypto/tls"
"fmt"
"io"
- "log"
"net"
"os"
+ "strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -36,11 +36,13 @@ type streamSocketListener struct {
func (ssl *streamSocketListener) listen() {
ssl.connections = map[string]net.Conn{}
+ wg := sync.WaitGroup{}
+
for {
c, err := ssl.Accept()
if err != nil {
if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
- ssl.AddError(err)
+ ssl.Log.Error(err.Error())
}
break
}
@@ -49,7 +51,7 @@ func (ssl *streamSocketListener) listen() {
if srb, ok := c.(setReadBufferer); ok {
srb.SetReadBuffer(int(ssl.ReadBufferSize.Size))
} else {
- log.Printf("W! Unable to set read buffer on a %s socket", ssl.sockType)
+ ssl.Log.Warnf("Unable to set read buffer on a %s socket", ssl.sockType)
}
}
@@ -63,10 +65,14 @@ func (ssl *streamSocketListener) listen() {
ssl.connectionsMtx.Unlock()
if err := ssl.setKeepAlive(c); err != nil {
- ssl.AddError(fmt.Errorf("unable to configure keep alive (%s): %s", ssl.ServiceAddress, err))
+ ssl.Log.Errorf("Unable to configure keep alive %q: %s", ssl.ServiceAddress, err.Error())
}
- go ssl.read(c)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ ssl.read(c)
+ }()
}
ssl.connectionsMtx.Lock()
@@ -74,6 +80,8 @@ func (ssl *streamSocketListener) listen() {
c.Close()
}
ssl.connectionsMtx.Unlock()
+
+ wg.Wait()
}
func (ssl *streamSocketListener) setKeepAlive(c net.Conn) error {
@@ -103,7 +111,12 @@ func (ssl *streamSocketListener) read(c net.Conn) {
defer ssl.removeConnection(c)
defer c.Close()
- scnr := bufio.NewScanner(c)
+ decoder, err := internal.NewStreamContentDecoder(ssl.ContentEncoding, c)
+ if err != nil {
+ ssl.Log.Error("Read error: %v", err)
+ }
+
+ scnr := bufio.NewScanner(decoder)
for {
if ssl.ReadTimeout != nil && ssl.ReadTimeout.Duration > 0 {
c.SetReadDeadline(time.Now().Add(ssl.ReadTimeout.Duration))
@@ -111,9 +124,12 @@ func (ssl *streamSocketListener) read(c net.Conn) {
if !scnr.Scan() {
break
}
- metrics, err := ssl.Parse(scnr.Bytes())
+
+ body := scnr.Bytes()
+
+ metrics, err := ssl.Parse(body)
if err != nil {
- ssl.AddError(fmt.Errorf("unable to parse incoming line: %s", err))
+ ssl.Log.Errorf("Unable to parse incoming line: %s", err.Error())
// TODO rate limit
continue
}
@@ -124,9 +140,9 @@ func (ssl *streamSocketListener) read(c net.Conn) {
if err := scnr.Err(); err != nil {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- log.Printf("D! Timeout in plugin [input.socket_listener]: %s", err)
+ ssl.Log.Debugf("Timeout in plugin: %s", err.Error())
} else if netErr != nil && !strings.HasSuffix(err.Error(), ": use of closed network connection") {
- ssl.AddError(err)
+ ssl.Log.Error(err.Error())
}
}
}
@@ -134,6 +150,7 @@ func (ssl *streamSocketListener) read(c net.Conn) {
type packetSocketListener struct {
net.PacketConn
*SocketListener
+ decoder internal.ContentDecoder
}
func (psl *packetSocketListener) listen() {
@@ -142,14 +159,19 @@ func (psl *packetSocketListener) listen() {
n, _, err := psl.ReadFrom(buf)
if err != nil {
if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
- psl.AddError(err)
+ psl.Log.Error(err.Error())
}
break
}
- metrics, err := psl.Parse(buf[:n])
+ body, err := psl.decoder.Decode(buf[:n])
+ if err != nil {
+ psl.Log.Errorf("Unable to decode incoming packet: %s", err.Error())
+ }
+
+ metrics, err := psl.Parse(body)
if err != nil {
- psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err))
+ psl.Log.Errorf("Unable to parse incoming packet: %s", err.Error())
// TODO rate limit
continue
}
@@ -165,8 +187,14 @@ type SocketListener struct {
ReadBufferSize internal.Size `toml:"read_buffer_size"`
ReadTimeout *internal.Duration `toml:"read_timeout"`
KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"`
+ SocketMode string `toml:"socket_mode"`
+ ContentEncoding string `toml:"content_encoding"`
tlsint.ServerConfig
+ wg sync.WaitGroup
+
+ Log telegraf.Logger
+
parsers.Parser
telegraf.Accumulator
io.Closer
@@ -190,6 +218,13 @@ func (sl *SocketListener) SampleConfig() string {
# service_address = "unix:///tmp/telegraf.sock"
# service_address = "unixgram:///tmp/telegraf.sock"
+ ## Change the file mode bits on unix sockets. These permissions may not be
+ ## respected by some platforms, to safely restrict write permissions it is best
+ ## to place the socket into a directory that has previously been created
+ ## with the desired permissions.
+ ## ex: socket_mode = "777"
+ # socket_mode = ""
+
## Maximum number of concurrent connections.
## Only applies to stream sockets (e.g. TCP).
## 0 (default) is unlimited.
@@ -224,6 +259,10 @@ func (sl *SocketListener) SampleConfig() string {
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
+
+ ## Content encoding for message payloads, can be set to "gzip" to or
+ ## "identity" to apply no encoding.
+ # content_encoding = "identity"
`
}
@@ -254,16 +293,12 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
switch protocol {
case "tcp", "tcp4", "tcp6", "unix", "unixpacket":
- var (
- err error
- l net.Listener
- )
-
tlsCfg, err := sl.ServerConfig.TLSConfig()
if err != nil {
return err
}
+ var l net.Listener
if tlsCfg == nil {
l, err = net.Listen(protocol, addr)
} else {
@@ -273,7 +308,18 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
return err
}
- log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, l.Addr())
+ sl.Log.Infof("Listening on %s://%s", protocol, l.Addr())
+
+ // Set permissions on socket
+ if (spl[0] == "unix" || spl[0] == "unixpacket") && sl.SocketMode != "" {
+ // Convert from octal in string to int
+ i, err := strconv.ParseUint(sl.SocketMode, 8, 32)
+ if err != nil {
+ return err
+ }
+
+ os.Chmod(spl[1], os.FileMode(uint32(i)))
+ }
ssl := &streamSocketListener{
Listener: l,
@@ -282,30 +328,57 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
}
sl.Closer = ssl
- go ssl.listen()
+ sl.wg = sync.WaitGroup{}
+ sl.wg.Add(1)
+ go func() {
+ defer sl.wg.Done()
+ ssl.listen()
+ }()
case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram":
+ decoder, err := internal.NewContentDecoder(sl.ContentEncoding)
+ if err != nil {
+ return err
+ }
+
pc, err := udpListen(protocol, addr)
if err != nil {
return err
}
+ // Set permissions on socket
+ if spl[0] == "unixgram" && sl.SocketMode != "" {
+ // Convert from octal in string to int
+ i, err := strconv.ParseUint(sl.SocketMode, 8, 32)
+ if err != nil {
+ return err
+ }
+
+ os.Chmod(spl[1], os.FileMode(uint32(i)))
+ }
+
if sl.ReadBufferSize.Size > 0 {
if srb, ok := pc.(setReadBufferer); ok {
srb.SetReadBuffer(int(sl.ReadBufferSize.Size))
} else {
- log.Printf("W! Unable to set read buffer on a %s socket", protocol)
+ sl.Log.Warnf("Unable to set read buffer on a %s socket", protocol)
}
}
- log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, pc.LocalAddr())
+ sl.Log.Infof("Listening on %s://%s", protocol, pc.LocalAddr())
psl := &packetSocketListener{
PacketConn: pc,
SocketListener: sl,
+ decoder: decoder,
}
sl.Closer = psl
- go psl.listen()
+ sl.wg = sync.WaitGroup{}
+ sl.wg.Add(1)
+ go func() {
+ defer sl.wg.Done()
+ psl.listen()
+ }()
default:
return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, sl.ServiceAddress)
}
@@ -347,6 +420,7 @@ func (sl *SocketListener) Stop() {
sl.Close()
sl.Closer = nil
}
+ sl.wg.Wait()
}
func newSocketListener() *SocketListener {
diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go
index b4415e0922887..a46add15cf61b 100644
--- a/plugins/inputs/socket_listener/socket_listener_test.go
+++ b/plugins/inputs/socket_listener/socket_listener_test.go
@@ -48,6 +48,7 @@ func TestSocketListener_tcp_tls(t *testing.T) {
defer testEmptyLog(t)()
sl := newSocketListener()
+ sl.Log = testutil.Logger{}
sl.ServiceAddress = "tcp://127.0.0.1:0"
sl.ServerConfig = *pki.TLSServerConfig()
@@ -72,6 +73,7 @@ func TestSocketListener_unix_tls(t *testing.T) {
sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock")
sl := newSocketListener()
+ sl.Log = testutil.Logger{}
sl.ServiceAddress = "unix://" + sock
sl.ServerConfig = *pki.TLSServerConfig()
@@ -94,6 +96,7 @@ func TestSocketListener_tcp(t *testing.T) {
defer testEmptyLog(t)()
sl := newSocketListener()
+ sl.Log = testutil.Logger{}
sl.ServiceAddress = "tcp://127.0.0.1:0"
sl.ReadBufferSize = internal.Size{Size: 1024}
@@ -112,6 +115,7 @@ func TestSocketListener_udp(t *testing.T) {
defer testEmptyLog(t)()
sl := newSocketListener()
+ sl.Log = testutil.Logger{}
sl.ServiceAddress = "udp://127.0.0.1:0"
sl.ReadBufferSize = internal.Size{Size: 1024}
@@ -136,6 +140,7 @@ func TestSocketListener_unix(t *testing.T) {
os.Create(sock)
sl := newSocketListener()
+ sl.Log = testutil.Logger{}
sl.ServiceAddress = "unix://" + sock
sl.ReadBufferSize = internal.Size{Size: 1024}
@@ -160,6 +165,7 @@ func TestSocketListener_unixgram(t *testing.T) {
os.Create(sock)
sl := newSocketListener()
+ sl.Log = testutil.Logger{}
sl.ServiceAddress = "unixgram://" + sock
sl.ReadBufferSize = internal.Size{Size: 1024}
@@ -174,16 +180,65 @@ func TestSocketListener_unixgram(t *testing.T) {
testSocketListener(t, sl, client)
}
+func TestSocketListenerDecode_tcp(t *testing.T) {
+ defer testEmptyLog(t)()
+
+ sl := newSocketListener()
+ sl.Log = testutil.Logger{}
+ sl.ServiceAddress = "tcp://127.0.0.1:0"
+ sl.ReadBufferSize = internal.Size{Size: 1024}
+ sl.ContentEncoding = "gzip"
+
+ acc := &testutil.Accumulator{}
+ err := sl.Start(acc)
+ require.NoError(t, err)
+ defer sl.Stop()
+
+ client, err := net.Dial("tcp", sl.Closer.(net.Listener).Addr().String())
+ require.NoError(t, err)
+
+ testSocketListener(t, sl, client)
+}
+
+func TestSocketListenerDecode_udp(t *testing.T) {
+ defer testEmptyLog(t)()
+
+ sl := newSocketListener()
+ sl.Log = testutil.Logger{}
+ sl.ServiceAddress = "udp://127.0.0.1:0"
+ sl.ReadBufferSize = internal.Size{Size: 1024}
+ sl.ContentEncoding = "gzip"
+
+ acc := &testutil.Accumulator{}
+ err := sl.Start(acc)
+ require.NoError(t, err)
+ defer sl.Stop()
+
+ client, err := net.Dial("udp", sl.Closer.(net.PacketConn).LocalAddr().String())
+ require.NoError(t, err)
+
+ testSocketListener(t, sl, client)
+}
+
func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) {
- mstr12 := "test,foo=bar v=1i 123456789\ntest,foo=baz v=2i 123456790\n"
- mstr3 := "test,foo=zab v=3i 123456791"
- client.Write([]byte(mstr12))
- client.Write([]byte(mstr3))
- if _, ok := client.(net.Conn); ok {
- // stream connection. needs trailing newline to terminate mstr3
- client.Write([]byte{'\n'})
+ mstr12 := []byte("test,foo=bar v=1i 123456789\ntest,foo=baz v=2i 123456790\n")
+ mstr3 := []byte("test,foo=zab v=3i 123456791\n")
+
+ if sl.ContentEncoding == "gzip" {
+ encoder, err := internal.NewContentEncoder(sl.ContentEncoding)
+ require.NoError(t, err)
+ mstr12, err = encoder.Encode(mstr12)
+ require.NoError(t, err)
+
+ encoder, err = internal.NewContentEncoder(sl.ContentEncoding)
+ require.NoError(t, err)
+ mstr3, err = encoder.Encode(mstr3)
+ require.NoError(t, err)
}
+ client.Write(mstr12)
+ client.Write(mstr3)
+
acc := sl.Accumulator.(*testutil.Accumulator)
acc.Wait(3)
diff --git a/plugins/inputs/solr/README.md b/plugins/inputs/solr/README.md
index 67f4e06ae22ad..c20fa92836c70 100644
--- a/plugins/inputs/solr/README.md
+++ b/plugins/inputs/solr/README.md
@@ -1,4 +1,4 @@
-# Solr input plugin
+# Solr Input Plugin
The [solr](http://lucene.apache.org/solr/) plugin collects stats via the
[MBean Request Handler](https://cwiki.apache.org/confluence/display/solr/MBean+Request+Handler)
@@ -9,13 +9,17 @@ Tested from 3.5 to 7.*
### Configuration:
-```
+```toml
[[inputs.solr]]
## specify a list of one or more Solr servers
servers = ["http://localhost:8983"]
##
## specify a list of one or more Solr cores (default - all)
# cores = ["main"]
+ ##
+ ## Optional HTTP Basic Auth Credentials
+ # username = "username"
+ # password = "pa$$word"
```
### Example output of gathered metrics:
diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go
index 9b5ce92991c2a..ce44fa0869c20 100644
--- a/plugins/inputs/solr/solr.go
+++ b/plugins/inputs/solr/solr.go
@@ -28,12 +28,18 @@ const sampleConfig = `
## specify a list of one or more Solr cores (default - all)
# cores = ["main"]
+
+ ## Optional HTTP Basic Auth Credentials
+ # username = "username"
+ # password = "pa$$word"
`
// Solr is a plugin to read stats from one or many Solr servers
type Solr struct {
Local bool
Servers []string
+ Username string
+ Password string
HTTPTimeout internal.Duration
Cores []string
client *http.Client
@@ -220,7 +226,7 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo
func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
var coreMetrics map[string]Core
if len(mBeansData.SolrMbeans) < 2 {
- return fmt.Errorf("no core metric data to unmarshall")
+ return fmt.Errorf("no core metric data to unmarshal")
}
if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil {
return err
@@ -251,7 +257,7 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa
var queryMetrics map[string]QueryHandler
if len(mBeansData.SolrMbeans) < 4 {
- return fmt.Errorf("no query handler metric data to unmarshall")
+ return fmt.Errorf("no query handler metric data to unmarshal")
}
if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil {
@@ -326,7 +332,7 @@ func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansD
var updateMetrics map[string]UpdateHandler
if len(mBeansData.SolrMbeans) < 6 {
- return fmt.Errorf("no update handler metric data to unmarshall")
+ return fmt.Errorf("no update handler metric data to unmarshal")
}
if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil {
return err
@@ -404,7 +410,7 @@ func getInt(unk interface{}) int64 {
// Add cache metrics section to accumulator
func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
if len(mBeansData.SolrMbeans) < 8 {
- return fmt.Errorf("no cache metric data to unmarshall")
+ return fmt.Errorf("no cache metric data to unmarshal")
}
var cacheMetrics map[string]Cache
if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil {
@@ -471,7 +477,18 @@ func (s *Solr) createHTTPClient() *http.Client {
}
func (s *Solr) gatherData(url string, v interface{}) error {
- r, err := s.client.Get(url)
+ req, reqErr := http.NewRequest(http.MethodGet, url, nil)
+ if reqErr != nil {
+ return reqErr
+ }
+
+ if s.Username != "" {
+ req.SetBasicAuth(s.Username, s.Password)
+ }
+
+ req.Header.Set("User-Agent", internal.ProductToken())
+
+ r, err := s.client.Do(req)
if err != nil {
return err
}
diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md
index e83aca304165a..7f7887769d916 100644
--- a/plugins/inputs/sqlserver/README.md
+++ b/plugins/inputs/sqlserver/README.md
@@ -1,12 +1,18 @@
# SQL Server Input Plugin
-
The `sqlserver` plugin provides metrics for your SQL Server instance. It
currently works with SQL Server 2008 SP3 and newer. Recorded metrics are
lightweight and use Dynamic Management Views supplied by SQL Server.
+### The SQL Server plugin supports the following editions/versions of SQL Server
+- SQL Server
+ - 2008 SP3 (with CU3)
+ - SQL Server 2008 R2 SP3 and newer versions
+- Azure SQL Database (Single)
+- Azure SQL Managed Instance
+
### Additional Setup:
-You have to create a login on every instance you want to monitor, with following script:
+You have to create a login on every SQL Server instance or Azure SQL Managed instance you want to monitor, with following script:
```sql
USE master;
GO
@@ -18,9 +24,21 @@ GRANT VIEW ANY DEFINITION TO [telegraf];
GO
```
+For Azure SQL Database, you require the View Database State permission and can create a user with a password directly in the database.
+```sql
+CREATE USER [telegraf] WITH PASSWORD = N'mystrongpassword';
+GO
+GRANT VIEW DATABASE STATE TO [telegraf];
+GO
+```
+
### Configuration:
```toml
+[agent]
+ ## Default data collection interval for all inputs, can be changed as per collection interval needs
+ interval = "10s"
+
# Read metrics from Microsoft SQL Server
[[inputs.sqlserver]]
## Specify instances to monitor with a list of connection strings.
@@ -28,39 +46,109 @@ GO
## By default, the host is localhost, listening on default port, TCP 1433.
## for Windows, the user is the currently running AD user (SSO).
## See https://github.com/denisenkom/go-mssqldb for detailed connection
- ## parameters.
+ ## parameters, in particular, tls connections can be created like so:
+ ## "encrypt=true;certificate=;hostNameInCertificate="
# servers = [
# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;",
# ]
+ ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
+ ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
+ ## Possible values for database_type are
+ ## "AzureSQLDB"
+ ## "SQLServer"
+ ## "AzureSQLManagedInstance"
+ # database_type = "AzureSQLDB"
+
## Optional parameter, setting this to 2 will use a new version
- ## of the collection queries that break compatibility with the original
- ## dashboards.
- query_version = 2
+ ## of the collection queries that break compatibility with the original dashboards.
+ ## Version 2 - is compatible from SQL Server 2008 Sp3 and later versions and also for SQL Azure DB
+ ## Version 2 is in the process of being deprecated, please consider using database_type.
+ # query_version = 2
## If you are using AzureDB, setting this to true will gather resource utilization metrics
# azuredb = false
- ## If you would like to exclude some of the metrics queries, list them here
- ## Possible choices:
+ ## Possible queries accross different versions of the collectors
+ ## Queries enabled by default for specific Database Type
+
+ ## database_type = AzureSQLDB by default collects the following queries
+ ## - AzureSQLDBWaitStats
+ ## - AzureSQLDBResourceStats
+ ## - AzureSQLDBResourceGovernance
+ ## - AzureSQLDBDatabaseIO
+ ## - AzureSQLDBServerProperties
+ ## - AzureSQLDBSQLOsWaitstats
+ ## - AzureSQLDBMemoryClerks
+ ## - AzureSQLDBPerformanceCounters
+ ## - AzureSQLDBRequests
+ ## - AzureSQLDBSchedulers
+
+ ## database_type = AzureSQLManagedInstance by default collects the following queries
+ ## - AzureSQLMIResourceStats
+ ## - AzureSQLMIResourceGovernance
+ ## - AzureSQLMIDatabaseIO
+ ## - AzureSQLMIServerProperties
+ ## - AzureSQLMIOsWaitstats
+ ## - AzureSQLMIMemoryClerks
+ ## - AzureSQLMIPerformanceCounters
+ ## - AzureSQLMIDBRequests
+ ## - AzureSQLMISchedulers
+
+ ## database_type = SQLServer by default collects the following queries
+ ## - SQLServerPerformanceCounters
+ ## - SQLServerWaitStatsCategorized
+ ## - SQLServerDatabaseIO
+ ## - SQLServerProperties
+ ## - SQLServerMemoryClerks
+ ## - SQLServerSchedulers
+ ## - SQLServerRequests
+ ## - SQLServerVolumeSpace
+ ## - SQLServerCpu
+
+ ## Version 2 by default collects the following queries
+ ## Version 2 is being deprecated, please consider using database_type.
## - PerformanceCounters
## - WaitStatsCategorized
## - DatabaseIO
- ## - DatabaseProperties
+ ## - ServerProperties
+ ## - MemoryClerk
+ ## - Schedulers
+ ## - SqlRequests
+ ## - VolumeSpace
+ ## - Cpu
+
+ ## Version 1 by default collects the following queries
+ ## Version 1 is deprecated, please consider using database_type.
+ ## - PerformanceCounters
+ ## - WaitStatsCategorized
## - CPUHistory
+ ## - DatabaseIO
## - DatabaseSize
## - DatabaseStats
+ ## - DatabaseProperties
## - MemoryClerk
## - VolumeSpace
- exclude_query = [ 'DatabaseIO' ]
+ ## - PerformanceMetrics
+
+
+
+ ## A list of queries to include. If not specified, all the above listed queries are used.
+ # include_query = []
+
+ ## A list of queries to explicitly ignore.
+ exclude_query = [ 'Schedulers' , 'SqlRequests' ]
+
+
+
```
### Metrics:
To provide backwards compatibility, this plugin support two versions of metrics queries.
-**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries are written in such a way as to only gather SQL specific metrics (no disk space or overall CPU related metrics) and they only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software.
+**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software.
-#### Version 1 (deprecated in 1.6):
+#### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type.
The original metrics queries provide:
- *Performance counters*: 1000+ metrics from `sys.dm_os_performance_counters`
- *Performance metrics*: special performance and ratio metrics
@@ -77,9 +165,8 @@ If you are using the original queries all stats have the following tags:
- `servername`: hostname:instance
- `type`: type of stats to easily filter measurements
-#### Version 2:
+#### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type.
The new (version 2) metrics provide:
-- *AzureDB*: AzureDB resource utilization from `sys.dm_db_resource_stats`
- *Database IO*: IO stats from `sys.dm_io_virtual_file_stats`
- *Memory Clerk*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name.
- *Performance Counters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included:
@@ -89,20 +176,84 @@ The new (version 2) metrics provide:
- *Memory*: PLE, Page reads/sec, Page writes/sec, + more
- *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more
- *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more
-- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version
+- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc.
- *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store.
+- *Schedulers* - This captures `sys.dm_os_schedulers`.
+- *SqlRequests* - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and
+ blocking sessions.
+- *VolumeSpace* - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem.
+- *Cpu* - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance).
+
+ In order to allow tracking on a per statement basis this query produces a
+ unique tag for each query. Depending on the database workload, this may
+ result in a high cardinality series. Reference the FAQ for tips on
+ [managing series cardinality][cardinality].
+
- *Azure Managed Instances*
- - Stats from `sys.server_resource_stats`:
- - cpu_count
- - server_memory
- - sku
- - engine_edition
- - hardware_type
- - total_storage_mb
- - available_storage_mb
- - uptime
-
-The following metrics can be used directly, with no delta calculations:
+ - Stats from `sys.server_resource_stats`
+ - Resource governance stats from `sys.dm_instance_resource_governance`
+- *Azure SQL Database* in addition to other stats
+ - Stats from `sys.dm_db_wait_stats`
+ - Resource governance stats from `sys.dm_user_db_resource_governance`
+ - Stats from `sys.dm_db_resource_stats`
+
+
+
+#### database_type = "AzureSQLDB
+These are metrics for Azure SQL Database (single database) and are very similar to version 2 but split out for maintenance reasons, better ability to test,differences in DMVs:
+- AzureSQLDBDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale.
+- AzureSQLDBMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`.
+= AzureSQLDBResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance`
+- AzureSQLDBPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale.
+- AzureSQLDBServerProperties: Relevant Azure SQL relevent properties from such as Tier, #Vcores, Memory etc, storage, etc.
+- AzureSQLDBWaitstats: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only.
+- *AzureSQLOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide
+- *AzureSQLDBRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests`
+- *AzureSQLDBSchedulers* - This captures `sys.dm_os_schedulers` snapshots.
+
+
+#### database_type = "AzureSQLManagedInstance
+These are metrics for Azure SQL Managed instance, are very similar to version 2 but split out for maintenance reasons, better ability to test, differences in DMVs:
+- AzureSQLMIDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale.
+- AzureSQLMIMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`.
+- AzureSQLMIResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance`
+- AzureSQLMIPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale.
+- AzureSQLMIServerProperties: Relevant Azure SQL relevent properties such as Tier, #Vcores, Memory etc, storage, etc.
+- AzureSQLMIOsWaitstats: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide
+- AzureSQLMIRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests`
+- AzureSQLMISchedulers - This captures `sys.dm_os_schedulers` snapshots.
+
+#### database_type = "SQLServer
+- SQLServerDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats`
+- SQLServerMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name.
+- SQLServerPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included:
+ - *Activity*: Transactions/sec/database, Batch requests/sec, blocked processes, + more
+ - *Availability Groups*: Bytes sent to replica, Bytes received from replica, Log bytes received, Log send queue, transaction delay, + more
+ - *Log activity*: Log bytes flushed/sec, Log flushes/sec, Log Flush Wait Time
+ - *Memory*: PLE, Page reads/sec, Page writes/sec, + more
+ - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more
+ - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more
+- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc.
+- SQLServerWaitStatsCategorized: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store.
+- SQLServerSchedulers - This captures `sys.dm_os_schedulers`.
+- SQLServerRequests - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and
+ blocking sessions.
+- SQLServerVolumeSpace - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem.
+- SQLServerCpu - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance).
+
+
+#### Output Measures
+The guiding principal is that all data collected from the same primary DMV ends up in the same measure irrespective of database_type.
+`sqlserver_database_io` - Used by AzureSQLDBDatabaseIO, AzureSQLMIDatabaseIO, SQLServerDatabaseIO, DatabaseIO given the data is from `sys.dm_io_virtual_file_stats`
+`sqlserver_waitstats` - Used by WaitStatsCategorized,AzureSQLDBOsWaitstats,AzureSQLMIOsWaitstats
+`sqlserver_server_properties` - Used by SQLServerProperties, AzureSQLDBServerProperties , AzureSQLMIServerProperties,ServerProperties
+`sqlserver_memory_clerks` - Used by SQLServerMemoryClerks, AzureSQLDBMemoryClerks, AzureSQLMIMemoryClerks,MemoryClerk
+`sqlserver_performance` - Used by SQLServerPerformanceCounters, AzureSQLDBPerformanceCounters, AzureSQLMIPerformanceCounters,PerformanceCounters
+`sys.dm_os_schedulers` - Used by SQLServerSchedulers,AzureSQLDBServerSchedulers, AzureSQLMIServerSchedulers
+
+
+
+The following Performance counter metrics can be used directly, with no delta calculations:
- SQLServer:Buffer Manager\Buffer cache hit ratio
- SQLServer:Buffer Manager\Page life expectancy
- SQLServer:Buffer Node\Page life expectancy
@@ -140,3 +291,6 @@ The following metrics can be used directly, with no delta calculations:
Version 2 queries have the following tags:
- `sql_instance`: Physical host and instance name (hostname:instance)
+- `database_name`: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct.
+
+[cardinality]: /docs/FAQ.md#user-content-q-how-can-i-manage-series-cardinality
diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go
new file mode 100644
index 0000000000000..048b20af26191
--- /dev/null
+++ b/plugins/inputs/sqlserver/azuresqlqueries.go
@@ -0,0 +1,998 @@
+package sqlserver
+
+import (
+ _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization
+)
+
+// Only executed if AzureDB flag is set
+const sqlAzureDBResourceStats string = `SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB?
+BEGIN
+ SELECT TOP(1)
+ 'sqlserver_azure_db_resource_stats' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ DB_NAME() as [database_name],
+ cast(avg_cpu_percent as float) as avg_cpu_percent,
+ cast(avg_data_io_percent as float) as avg_data_io_percent,
+ cast(avg_log_write_percent as float) as avg_log_write_percent,
+ cast(avg_memory_usage_percent as float) as avg_memory_usage_percent,
+ cast(xtp_storage_percent as float) as xtp_storage_percent,
+ cast(max_worker_percent as float) as max_worker_percent,
+ cast(max_session_percent as float) as max_session_percent,
+ dtu_limit,
+ cast(avg_login_rate_percent as float) as avg_login_rate_percent ,
+ end_time,
+ cast(avg_instance_memory_percent as float) as avg_instance_memory_percent ,
+ cast(avg_instance_cpu_percent as float) as avg_instance_cpu_percent
+ FROM
+ sys.dm_db_resource_stats WITH (NOLOCK)
+ ORDER BY
+ end_time DESC
+END
+`
+
+// Resource Governamce is only relevant to Azure SQL DB into separate collector
+// This will only be collected for Azure SQL Database.
+const sqlAzureDBResourceGovernance string = `
+IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB?
+SELECT
+ 'sqlserver_db_resource_governance' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ DB_NAME() as [database_name],
+ slo_name,
+ dtu_limit,
+ max_cpu,
+ cap_cpu,
+ instance_cap_cpu,
+ max_db_memory,
+ max_db_max_size_in_mb,
+ db_file_growth_in_mb,
+ log_size_in_mb,
+ instance_max_worker_threads,
+ primary_group_max_workers,
+ instance_max_log_rate,
+ primary_min_log_rate,
+ primary_max_log_rate,
+ primary_group_min_io,
+ primary_group_max_io,
+ primary_group_min_cpu,
+ primary_group_max_cpu,
+ primary_pool_max_workers,
+ pool_max_io,
+ checkpoint_rate_mbps,
+ checkpoint_rate_io,
+ volume_local_iops,
+ volume_managed_xstore_iops,
+ volume_external_xstore_iops,
+ volume_type_local_iops,
+ volume_type_managed_xstore_iops,
+ volume_type_external_xstore_iops,
+ volume_pfs_iops,
+ volume_type_pfs_iops
+ FROM
+ sys.dm_user_db_resource_governance WITH (NOLOCK);
+`
+
+// DB level wait stats that are only relevant to Azure SQL DB into separate collector
+// This will only be collected for Azure SQL Database.
+const sqlAzureDBWaitStats string = `SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB?
+ SELECT
+ 'sqlserver_azuredb_waitstats' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ DB_NAME() as [database_name'],
+ dbws.wait_type,
+ dbws.wait_time_ms,
+ dbws.wait_time_ms - signal_wait_time_ms AS [resource_wait_ms],
+ dbws.signal_wait_time_ms,
+ dbws.max_wait_time_ms,
+ dbws.waiting_tasks_count
+ FROM
+ sys.dm_db_wait_stats AS dbws WITH (NOLOCK)
+ WHERE
+ dbws.wait_type NOT IN (
+ N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP',
+ N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE',
+ N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE',
+ N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE',
+ N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE',
+ N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX',
+ N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT',
+ N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE',
+ N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE',
+ N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE',
+ N'PARALLEL_REDO_WORKER_WAIT_WORK',
+ N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS',
+ N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS',
+ N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST',
+ N'PREEMPTIVE_OS_DEVICEOPS',
+ N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER',
+ N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT',
+ N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE',
+ N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT',
+ N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP',
+ N'QDS_ASYNC_QUEUE',
+ N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH',
+ N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP',
+ N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY',
+ N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK',
+ N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP',
+ N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
+ N'SQLTRACE_WAIT_ENTRIES',
+ N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT',
+ N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE',
+ N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN',
+ N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT',
+ N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT')
+ AND waiting_tasks_count > 0
+ AND wait_time_ms > 100;
+`
+
+const sqlAzureDBDatabaseIO = `
+SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB?
+SELECT
+ 'sqlserver_database_io' As [measurement]
+ ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ ,DB_NAME() as database_name
+ ,vfs.database_id -- /*needed as tempdb is different for each Azure SQL DB as grouping has to be by logical server + db_name + database_id*/
+ ,vfs.file_id
+ ,vfs.io_stall_read_ms AS read_latency_ms
+ ,vfs.num_of_reads AS reads
+ ,vfs.num_of_bytes_read AS read_bytes
+ ,vfs.io_stall_write_ms AS write_latency_ms
+ ,vfs.num_of_writes AS writes
+ ,vfs.num_of_bytes_written AS write_bytes
+ ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
+ ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]
+ ,CASE
+ WHEN (vfs.database_id = 0) THEN 'RBPEX'
+ ELSE b.logical_filename
+ END as logical_filename
+ ,CASE
+ WHEN (vfs.database_id = 0) THEN 'RBPEX'
+ ELSE b.physical_filename
+ END as physical_filename
+ ,CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'DATA' END AS file_type
+ ,ISNULL(size,0)/128 AS current_size_mb
+ ,ISNULL(FILEPROPERTY(b.logical_filename,'SpaceUsed')/128,0) as space_used_mb
+ FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs
+ -- needed to get Tempdb file names on Azure SQL DB so you can join appropriately. Without this had a bug where join was only on file_id
+ LEFT OUTER join
+ (
+ SELECT DB_ID() as database_id, file_id, logical_filename=name COLLATE SQL_Latin1_General_CP1_CI_AS
+ , physical_filename = physical_name COLLATE SQL_Latin1_General_CP1_CI_AS, size from sys.database_files
+ where type <> 2
+ UNION ALL
+ SELECT 2 as database_id, file_id, logical_filename = name , physical_filename = physical_name, size
+ from tempdb.sys.database_files
+ ) b ON b.database_id = vfs.database_id and b.file_id = vfs.file_id
+ where vfs.database_id IN (DB_ID(),0,2)
+`
+
+const sqlAzureDBProperties = `
+DECLARE @EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+IF @EngineEdition = 5 -- Is this Azure SQL DB?
+SELECT 'sqlserver_server_properties' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ DB_NAME() as [database_name],
+ (SELECT count(*) FROM sys.dm_os_schedulers WHERE status = 'VISIBLE ONLINE') AS cpu_count,
+ (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory,
+ slo.edition as sku,
+ @EngineEdition AS engine_edition,
+ slo.service_objective AS hardware_type,
+ CASE
+ WHEN slo.edition = 'Hyperscale' then NULL
+ ELSE cast(DATABASEPROPERTYEX(DB_NAME(),'MaxSizeInBytes') as bigint)/(1024*1024)
+ END AS total_storage_mb,
+ CASE
+ WHEN slo.edition = 'Hyperscale' then NULL
+ ELSE
+ (cast(DATABASEPROPERTYEX(DB_NAME(),'MaxSizeInBytes') as bigint)/(1024*1024)-
+ (select SUM(size/128 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS int)/128) FROM sys.database_files )
+ )
+ END AS available_storage_mb,
+ (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime
+ FROM sys.databases d
+ -- sys.databases.database_id may not match current DB_ID on Azure SQL DB
+ CROSS JOIN sys.database_service_objectives slo
+ WHERE d.name = DB_NAME() AND slo.database_id = DB_ID();
+`
+
+const sqlAzureDBOsWaitStats = `
+SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB?
+SELECT
+'sqlserver_waitstats' AS [measurement],
+REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+DB_NAME() as [database_name],
+ws.wait_type,
+wait_time_ms,
+wait_time_ms - signal_wait_time_ms AS [resource_wait_ms],
+signal_wait_time_ms,
+max_wait_time_ms,
+waiting_tasks_count,
+CASE
+ WHEN ws.wait_type LIKE 'SOS_SCHEDULER_YIELD' then 'CPU'
+ WHEN ws.wait_type = 'THREADPOOL' THEN 'Worker Thread'
+ WHEN ws.wait_type LIKE 'LCK[_]%' THEN 'Lock'
+ WHEN ws.wait_type LIKE 'LATCH[_]%' THEN 'Latch'
+ WHEN ws.wait_type LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch'
+ WHEN ws.wait_type LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO'
+ WHEN ws.wait_type LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation'
+ WHEN ws.wait_type LIKE 'CLR[_]%' or ws.wait_type like 'SQLCLR%' THEN 'SQL CLR'
+ WHEN ws.wait_type LIKE 'DBMIRROR_%' THEN 'Mirroring'
+ WHEN ws.wait_type LIKE 'DTC[_]%' or ws.wait_type LIKE 'DTCNEW%' or ws.wait_type LIKE 'TRAN_%'
+ or ws.wait_type LIKE 'XACT%' or ws.wait_type like 'MSQL_XACT%' THEN 'Transaction'
+ WHEN ws.wait_type LIKE 'SLEEP[_]%' or
+ ws.wait_type IN ('LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP'
+ , 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT'
+ , 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE'
+ , 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle'
+ WHEN ws.wait_type IN('ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION'
+ ,'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO'
+ WHEN ws.wait_type LIKE 'PREEMPTIVE_%' THEN 'Preemptive'
+ WHEN ws.wait_type LIKE 'BROKER[_]%' THEN 'Service Broker'
+ WHEN ws.wait_type IN ('WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND'
+ , 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO'
+ WHEN ws.wait_type LIKE 'LOG_RATE%' then 'Log Rate Governor'
+ WHEN ws.wait_type LIKE 'HADR_THROTTLE[_]%'
+ or ws.wait_type = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor'
+ WHEN ws.wait_type LIKE 'RBIO_RG%' or ws.wait_type like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor'
+ WHEN ws.wait_type LIKE 'RBIO[_]%' or ws.wait_type like 'WAIT_RBIO[_]%' then 'VLDB RBIO'
+ WHEN ws.wait_type IN('ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF'
+ ,'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO'
+ WHEN ws.wait_type IN ( 'CXPACKET', 'CXCONSUMER')
+ or ws.wait_type like 'HT%' or ws.wait_type like 'BMP%'
+ or ws.wait_type like 'BP%' THEN 'Parallelism'
+WHEN ws.wait_type IN('CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE'
+ ,'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT'
+ ,'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory'
+ WHEN ws.wait_type IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait'
+ WHEN ws.wait_type LIKE 'HADR[_]%' or ws.wait_type LIKE 'PWAIT_HADR%'
+ or ws.wait_type LIKE 'REPLICA[_]%' or ws.wait_type LIKE 'REPL_%'
+ or ws.wait_type LIKE 'SE_REPL[_]%'
+ or ws.wait_type LIKE 'FCB_REPLICA%' THEN 'Replication'
+ WHEN ws.wait_type LIKE 'SQLTRACE[_]%' or ws.wait_type
+ IN ('TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION'
+ , 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN'
+ , 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing'
+ WHEN ws.wait_type IN ('FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX',
+ 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK'
+ , 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR'
+ , 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search'
+ ELSE 'Other'
+END as wait_category
+FROM
+sys.dm_os_wait_stats AS ws WITH (NOLOCK)
+WHERE
+ws.wait_type NOT IN (
+ N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP',
+ N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE',
+ N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE',
+ N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_
+ _QUEUE',
+ N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE',
+ N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX',
+ N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT',
+ N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE',
+ N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE',
+ N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE',
+ N'PARALLEL_REDO_WORKER_WAIT_WORK',
+ N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS',
+ N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS',
+ N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST',
+ N'PREEMPTIVE_OS_DEVICEOPS',
+ N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER',
+ N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT',
+ N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE',
+ N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT',
+ N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP',
+ N'QDS_ASYNC_QUEUE',
+ N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH',
+ N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP',
+ N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY',
+ N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK',
+ N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP',
+ N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
+ N'SQLTRACE_WAIT_ENTRIES',
+ N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT',
+ N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE',
+ N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN',
+ N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT',
+ N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES'
+ , 'RBIO_COMM_RETRY')
+AND waiting_tasks_count > 10
+AND wait_time_ms > 100;
+`
+
+const sqlAzureDBMemoryClerks = `
+SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB?
+SELECT
+ 'sqlserver_memory_clerks' AS [measurement]
+ ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance]
+ ,DB_NAME() AS [database_name]
+ ,mc.[type] AS [clerk_type]
+ ,SUM(mc.[pages_kb]) AS [size_kb]
+FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK)
+GROUP BY
+ mc.[type]
+HAVING
+ SUM(mc.[pages_kb]) >= 1024
+OPTION(RECOMPILE);
+`
+
+const sqlAzureDBPerformanceCounters = `
+IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB?
+BEGIN
+DECLARE @PCounters TABLE
+(
+ object_name nvarchar(128),
+ counter_name nvarchar(128),
+ instance_name nvarchar(128),
+ cntr_value bigint,
+ cntr_type INT ,
+ Primary Key(object_name, counter_name,instance_name)
+);
+
+WITH PerfCounters AS
+ (
+ SELECT DISTINCT
+ RTrim(spi.object_name) object_name,
+ RTrim(spi.counter_name) counter_name,
+ CASE WHEN (
+ RTRIM(spi.object_name) LIKE '%:Databases'
+ OR RTRIM(spi.object_name) LIKE '%:Database Replica'
+ OR RTRIM(spi.object_name) LIKE '%:Catalog Metadata'
+ OR RTRIM(spi.object_name) LIKE '%:Query Store'
+ OR RTRIM(spi.object_name) LIKE '%:Columnstore'
+ OR RTRIM(spi.object_name) LIKE '%:Advanced Analytics')
+ AND TRY_CONVERT(uniqueidentifier, spi.instance_name)
+ IS NOT NULL -- for cloud only
+ THEN ISNULL(d.name,RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value
+ WHEN RTRIM(object_name) LIKE '%:Availability Replica'
+ AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only
+ THEN ISNULL(d.name,RTRIM(spi.instance_name)) + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name)))
+ ELSE RTRIM(spi.instance_name)
+ END AS instance_name,
+ CAST(spi.cntr_value AS BIGINT) AS cntr_value,
+ spi.cntr_type
+ FROM sys.dm_os_performance_counters AS spi
+ LEFT JOIN sys.databases AS d
+ ON LEFT(spi.instance_name, 36) -- some instance_name values have an additional identifier appended after the GUID
+ =CASE WHEN -- in SQL DB standalone, physical_database_name for master is the GUID of the user database
+ d.name = 'master' AND TRY_CONVERT(uniqueidentifier, d.physical_database_name) IS NOT NULL
+ THEN d.name
+ ELSE d.physical_database_name
+ END
+ WHERE (
+ counter_name IN (
+ 'SQL Compilations/sec',
+ 'SQL Re-Compilations/sec',
+ 'User Connections',
+ 'Batch Requests/sec',
+ 'Logouts/sec',
+ 'Logins/sec',
+ 'Processes blocked',
+ 'Latch Waits/sec',
+ 'Full Scans/sec',
+ 'Index Searches/sec',
+ 'Page Splits/sec',
+ 'Page lookups/sec',
+ 'Page reads/sec',
+ 'Page writes/sec',
+ 'Readahead pages/sec',
+ 'Lazy writes/sec',
+ 'Checkpoint pages/sec',
+ 'Page life expectancy',
+ 'Log File(s) Size (KB)',
+ 'Log File(s) Used Size (KB)',
+ 'Data File(s) Size (KB)',
+ 'Transactions/sec',
+ 'Write Transactions/sec',
+ 'Active Temp Tables',
+ 'Temp Tables Creation Rate',
+ 'Temp Tables For Destruction',
+ 'Free Space in tempdb (KB)',
+ 'Version Store Size (KB)',
+ 'Memory Grants Pending',
+ 'Memory Grants Outstanding',
+ 'Free list stalls/sec',
+ 'Buffer cache hit ratio',
+ 'Buffer cache hit ratio base',
+ 'RBPEX cache hit ratio',
+ 'RBPEX cache hit ratio base',
+ 'Backup/Restore Throughput/sec',
+ 'Total Server Memory (KB)',
+ 'Target Server Memory (KB)',
+ 'Log Flushes/sec',
+ 'Log Flush Wait Time',
+ 'Memory broker clerk size',
+ 'Log Bytes Flushed/sec',
+ 'Bytes Sent to Replica/sec',
+ 'Log Send Queue',
+ 'Bytes Sent to Transport/sec',
+ 'Sends to Replica/sec',
+ 'Bytes Sent to Transport/sec',
+ 'Sends to Transport/sec',
+ 'Bytes Received from Replica/sec',
+ 'Receives from Replica/sec',
+ 'Flow Control Time (ms/sec)',
+ 'Flow Control/sec',
+ 'Resent Messages/sec',
+ 'Redone Bytes/sec',
+ 'XTP Memory Used (KB)',
+ 'Transaction Delay',
+ 'Log Bytes Received/sec',
+ 'Log Apply Pending Queue',
+ 'Redone Bytes/sec',
+ 'Recovery Queue',
+ 'Log Apply Ready Queue',
+ 'CPU usage %',
+ 'CPU usage % base',
+ 'Queued requests',
+ 'Requests completed/sec',
+ 'Blocked tasks',
+ 'Active memory grant amount (KB)',
+ 'Disk Read Bytes/sec',
+ 'Disk Read IO Throttled/sec',
+ 'Disk Read IO/sec',
+ 'Disk Write Bytes/sec',
+ 'Disk Write IO Throttled/sec',
+ 'Disk Write IO/sec',
+ 'Used memory (KB)',
+ 'Forwarded Records/sec',
+ 'Background Writer pages/sec',
+ 'Percent Log Used',
+ 'Log Send Queue KB',
+ 'Redo Queue KB',
+ 'Mirrored Write Transactions/sec',
+ 'Group Commit Time',
+ 'Group Commits/Sec'
+ )
+ ) OR (
+ object_name LIKE '%User Settable%'
+ OR object_name LIKE '%SQL Errors%'
+ ) OR (
+ object_name LIKE '%Batch Resp Statistics%'
+ ) OR (
+ instance_name IN ('_Total')
+ AND counter_name IN (
+ 'Lock Timeouts/sec',
+ 'Lock Timeouts (timeout > 0)/sec',
+ 'Number of Deadlocks/sec',
+ 'Lock Waits/sec',
+ 'Latch Waits/sec'
+ )
+ )
+ )
+INSERT INTO @PCounters select * from PerfCounters
+
+select
+ 'sqlserver_performance' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ DB_NAME() as [database_name],
+ pc.object_name AS [object],
+ pc.counter_name AS [counter],
+ CASE pc.instance_name
+ WHEN '_Total' THEN 'Total'
+ ELSE ISNULL(pc.instance_name,'')
+ END AS [instance],
+ CAST(CASE WHEN pc.cntr_type = 537003264 AND pc1.cntr_value > 0 THEN (pc.cntr_value * 1.0) / (pc1.cntr_value * 1.0) * 100 ELSE pc.cntr_value END AS float(10)) AS [value],
+ -- cast to string as TAG
+ cast(pc.cntr_type as varchar(25)) as [counter_type]
+from @PCounters pc
+ LEFT OUTER JOIN @PCounters AS pc1
+ ON (
+ pc.counter_name = REPLACE(pc1.counter_name,' base','')
+ OR pc.counter_name = REPLACE(pc1.counter_name,' base',' (ms)')
+ )
+ AND pc.object_name = pc1.object_name
+ AND pc.instance_name = pc1.instance_name
+ AND pc1.counter_name LIKE '%base'
+WHERE pc.counter_name NOT LIKE '% base'
+OPTION (RECOMPILE)
+END
+`
+
+const sqlAzureDBRequests string = `
+SET NOCOUNT ON;
+IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB?
+BEGIN
+ SELECT blocking_session_id into #blockingSessions FROM sys.dm_exec_requests WHERE blocking_session_id != 0
+ create index ix_blockingSessions_1 on #blockingSessions (blocking_session_id)
+ SELECT
+ 'sqlserver_requests' AS [measurement]
+ , REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ , DB_NAME() as [database_name]
+ , s.session_id
+ , ISNULL(r.request_id,0) as request_id
+ , DB_NAME(s.database_id) as session_db_name
+ , COALESCE(r.status,s.status) AS status
+ , COALESCE(r.cpu_time,s.cpu_time) AS cpu_time_ms
+ , COALESCE(r.total_elapsed_time,s.total_elapsed_time) AS total_elapsed_time_ms
+ , COALESCE(r.logical_reads,s.logical_reads) AS logical_reads
+ , COALESCE(r.writes,s.writes) AS writes
+ , r.command
+ , r.wait_time as wait_time_ms
+ , r.wait_type
+ , r.wait_resource
+ , r.blocking_session_id
+ , s.program_name
+ , s.host_name
+ , s.nt_user_name
+ , s.open_transaction_count AS open_transaction
+ , LEFT (CASE COALESCE(r.transaction_isolation_level, s.transaction_isolation_level)
+ WHEN 0 THEN '0-Read Committed'
+ WHEN 1 THEN '1-Read Uncommitted (NOLOCK)'
+ WHEN 2 THEN '2-Read Committed'
+ WHEN 3 THEN '3-Repeatable Read'
+ WHEN 4 THEN '4-Serializable'
+ WHEN 5 THEN '5-Snapshot'
+ ELSE CONVERT (varchar(30), r.transaction_isolation_level) + '-UNKNOWN'
+ END, 30) AS transaction_isolation_level
+ , r.granted_query_memory as granted_query_memory_pages
+ , r.percent_complete
+ , SUBSTRING(
+ qt.text,
+ r.statement_start_offset / 2 + 1,
+ (CASE WHEN r.statement_end_offset = -1
+ THEN DATALENGTH(qt.text)
+ ELSE r.statement_end_offset
+ END - r.statement_start_offset) / 2 + 1
+ ) AS statement_text
+ , qt.objectid
+ , QUOTENAME(OBJECT_SCHEMA_NAME(qt.objectid,qt.dbid)) + '.' + QUOTENAME(OBJECT_NAME(qt.objectid,qt.dbid)) as stmt_object_name
+ , DB_NAME(qt.dbid) stmt_db_name
+ , CONVERT(varchar(20),[query_hash],1) as [query_hash]
+ , CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash]
+ FROM sys.dm_exec_sessions AS s
+ LEFT OUTER JOIN sys.dm_exec_requests AS r
+ ON s.session_id = r.session_id
+ OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt
+ WHERE 1 = 1
+ AND (r.session_id IS NOT NULL AND (s.is_user_process = 1 OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping')))
+ OR (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions))
+ OPTION(MAXDOP 1)
+END
+`
+
+const sqlAzureMIProperties = `
+DECLARE @EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+IF @EngineEdition = 8 /*Managed Instance*/
+ SELECT TOP 1 'sqlserver_server_properties' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ virtual_core_count AS cpu_count,
+ (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory,
+ sku,
+ @EngineEdition AS engine_edition,
+ hardware_generation AS hardware_type,
+ cast(reserved_storage_mb as bigint) AS total_storage_mb,
+ cast((reserved_storage_mb - storage_space_used_mb) as bigint) AS available_storage_mb,
+ (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime,
+ SERVERPROPERTY('ProductVersion') AS sql_version,
+ db_online,
+ db_restoring,
+ db_recovering,
+ db_recoveryPending,
+ db_suspect
+ FROM sys.server_resource_stats
+ CROSS APPLY
+ (SELECT SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online,
+ SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring,
+ SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering,
+ SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending,
+ SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect,
+ SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline
+ FROM sys.databases
+ ) AS dbs
+ ORDER BY start_time DESC;
+`
+
+const sqlAzureMIResourceStats = `
+IF SERVERPROPERTY('EngineEdition') = 8 /*Managed Instance*/
+ SELECT TOP(1)
+ 'sqlserver_azure_db_resource_stats' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ cast(avg_cpu_percent as float) as avg_cpu_percent
+ FROM
+ sys.server_resource_stats;
+`
+
+const sqlAzureMIResourceGovernance string = `
+IF SERVERPROPERTY('EngineEdition') = 8 -- Is this Azure SQL Managed Instance?
+ SELECT
+ 'sqlserver_instance_resource_governance' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ instance_cap_cpu,
+ instance_max_log_rate,
+ instance_max_worker_threads,
+ tempdb_log_file_number,
+ volume_local_iops,
+ volume_external_xstore_iops,
+ volume_managed_xstore_iops,
+ volume_type_local_iops as voltype_local_iops,
+ volume_type_managed_xstore_iops as voltype_man_xtore_iops,
+ volume_type_external_xstore_iops as voltype_ext_xtore_iops,
+ volume_external_xstore_iops as vol_ext_xtore_iops
+ from
+ sys.dm_instance_resource_governance;
+`
+
+const sqlAzureMIDatabaseIO = `
+SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 8 /*Managed Instance*/
+ SELECT
+ 'sqlserver_database_io' AS [measurement]
+ ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ ,COALESCE(mf.[physical_name],'RBPEX') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension
+ ,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension
+ ,mf.[type_desc] AS [file_type]
+ ,vfs.[io_stall_read_ms] AS [read_latency_ms]
+ ,vfs.[num_of_reads] AS [reads]
+ ,vfs.[num_of_bytes_read] AS [read_bytes]
+ ,vfs.[io_stall_write_ms] AS [write_latency_ms]
+ ,vfs.[num_of_writes] AS [writes]
+ ,vfs.[num_of_bytes_written] AS [write_bytes]
+ ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
+ ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]
+ FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs
+ LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK)
+ ON vfs.[database_id] = mf.[database_id] AND vfs.[file_id] = mf.[file_id]
+ where vfs.[database_id] < 32760
+`
+
+const sqlAzureMIMemoryClerks = `
+SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 8 /*Managed Instance*/
+SELECT
+ 'sqlserver_memory_clerks' AS [measurement]
+ ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance]
+ ,mc.[type] AS [clerk_type]
+ ,SUM(mc.[pages_kb]) AS [size_kb]
+FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK)
+GROUP BY
+ mc.[type]
+HAVING
+ SUM(mc.[pages_kb]) >= 1024
+OPTION(RECOMPILE);
+`
+
+const sqlAzureMIOsWaitStats = `
+SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 8 /*Managed Instance*/
+SELECT
+'sqlserver_waitstats' AS [measurement],
+REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ws.wait_type,
+wait_time_ms,
+wait_time_ms - signal_wait_time_ms AS [resource_wait_ms],
+signal_wait_time_ms,
+max_wait_time_ms,
+waiting_tasks_count,
+CASE
+ WHEN ws.wait_type LIKE 'SOS_SCHEDULER_YIELD' then 'CPU'
+ WHEN ws.wait_type = 'THREADPOOL' THEN 'Worker Thread'
+ WHEN ws.wait_type LIKE 'LCK[_]%' THEN 'Lock'
+ WHEN ws.wait_type LIKE 'LATCH[_]%' THEN 'Latch'
+ WHEN ws.wait_type LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch'
+ WHEN ws.wait_type LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO'
+ WHEN ws.wait_type LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation'
+ WHEN ws.wait_type LIKE 'CLR[_]%' or ws.wait_type like 'SQLCLR%' THEN 'SQL CLR'
+ WHEN ws.wait_type LIKE 'DBMIRROR_%' THEN 'Mirroring'
+ WHEN ws.wait_type LIKE 'DTC[_]%' or ws.wait_type LIKE 'DTCNEW%' or ws.wait_type LIKE 'TRAN_%'
+ or ws.wait_type LIKE 'XACT%' or ws.wait_type like 'MSQL_XACT%' THEN 'Transaction'
+ WHEN ws.wait_type LIKE 'SLEEP[_]%' or
+ ws.wait_type IN ('LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP'
+ , 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT'
+ , 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE'
+ , 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle'
+ WHEN ws.wait_type IN('ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION'
+ ,'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO'
+ WHEN ws.wait_type LIKE 'PREEMPTIVE_%' THEN 'Preemptive'
+ WHEN ws.wait_type LIKE 'BROKER[_]%' THEN 'Service Broker'
+ WHEN ws.wait_type IN ('WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND'
+ , 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO'
+ WHEN ws.wait_type LIKE 'LOG_RATE%' then 'Log Rate Governor'
+ WHEN ws.wait_type LIKE 'HADR_THROTTLE[_]%'
+ or ws.wait_type = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor'
+ WHEN ws.wait_type LIKE 'RBIO_RG%' or ws.wait_type like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor'
+ WHEN ws.wait_type LIKE 'RBIO[_]%' or ws.wait_type like 'WAIT_RBIO[_]%' then 'VLDB RBIO'
+ WHEN ws.wait_type IN('ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF'
+ ,'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO'
+ WHEN ws.wait_type IN ( 'CXPACKET', 'CXCONSUMER')
+ or ws.wait_type like 'HT%' or ws.wait_type like 'BMP%'
+ or ws.wait_type like 'BP%' THEN 'Parallelism'
+WHEN ws.wait_type IN('CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE'
+ ,'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT'
+ ,'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory'
+ WHEN ws.wait_type IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait'
+ WHEN ws.wait_type LIKE 'HADR[_]%' or ws.wait_type LIKE 'PWAIT_HADR%'
+ or ws.wait_type LIKE 'REPLICA[_]%' or ws.wait_type LIKE 'REPL_%'
+ or ws.wait_type LIKE 'SE_REPL[_]%'
+ or ws.wait_type LIKE 'FCB_REPLICA%' THEN 'Replication'
+ WHEN ws.wait_type LIKE 'SQLTRACE[_]%' or ws.wait_type
+ IN ('TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION'
+ , 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN'
+ , 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing'
+ WHEN ws.wait_type IN ('FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX',
+ 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK'
+ , 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR'
+ , 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search'
+ ELSE 'Other'
+END as wait_category
+FROM
+sys.dm_os_wait_stats AS ws WITH (NOLOCK)
+WHERE
+ws.wait_type NOT IN (
+ N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP',
+ N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE',
+ N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE',
+ N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_
+ _QUEUE',
+ N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE',
+ N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX',
+ N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT',
+ N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE',
+ N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE',
+ N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE',
+ N'PARALLEL_REDO_WORKER_WAIT_WORK',
+ N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS',
+ N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS',
+ N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST',
+ N'PREEMPTIVE_OS_DEVICEOPS',
+ N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER',
+ N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT',
+ N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE',
+ N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT',
+ N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP',
+ N'QDS_ASYNC_QUEUE',
+ N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH',
+ N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP',
+ N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY',
+ N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK',
+ N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP',
+ N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
+ N'SQLTRACE_WAIT_ENTRIES',
+ N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT',
+ N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE',
+ N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN',
+ N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT',
+ N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES'
+ , 'RBIO_COMM_RETRY')
+AND waiting_tasks_count > 10
+AND wait_time_ms > 100;
+`
+
+const sqlAzureMIPerformanceCounters = `
+SET DEADLOCK_PRIORITY -10;
+IF SERVERPROPERTY('EngineEdition') = 8 /*Managed Instance*/
+DECLARE @PCounters TABLE
+(
+ object_name nvarchar(128),
+ counter_name nvarchar(128),
+ instance_name nvarchar(128),
+ cntr_value bigint,
+ cntr_type INT ,
+ Primary Key(object_name, counter_name,instance_name)
+);
+
+WITH PerfCounters AS
+ (
+ SELECT DISTINCT
+ RTrim(spi.object_name) object_name,
+ RTrim(spi.counter_name) counter_name,
+ CASE WHEN (
+ RTRIM(spi.object_name) LIKE '%:Databases'
+ OR RTRIM(spi.object_name) LIKE '%:Database Replica'
+ OR RTRIM(spi.object_name) LIKE '%:Catalog Metadata'
+ OR RTRIM(spi.object_name) LIKE '%:Query Store'
+ OR RTRIM(spi.object_name) LIKE '%:Columnstore'
+ OR RTRIM(spi.object_name) LIKE '%:Advanced Analytics')
+ AND TRY_CONVERT(uniqueidentifier, spi.instance_name)
+ IS NOT NULL -- for cloud only
+ THEN ISNULL(d.name,RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value
+ WHEN RTRIM(object_name) LIKE '%:Availability Replica'
+ AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only
+ THEN ISNULL(d.name,RTRIM(spi.instance_name)) + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name)))
+ ELSE RTRIM(spi.instance_name)
+ END AS instance_name,
+ CAST(spi.cntr_value AS BIGINT) AS cntr_value,
+ spi.cntr_type
+ FROM sys.dm_os_performance_counters AS spi
+ LEFT JOIN sys.databases AS d
+ ON LEFT(spi.instance_name, 36) -- some instance_name values have an additional identifier appended after the GUID
+ =CASE WHEN -- in SQL DB standalone, physical_database_name for master is the GUID of the user database
+ d.name = 'master' AND TRY_CONVERT(uniqueidentifier, d.physical_database_name) IS NOT NULL
+ THEN d.name
+ ELSE d.physical_database_name
+ END
+ WHERE (
+ counter_name IN (
+ 'SQL Compilations/sec',
+ 'SQL Re-Compilations/sec',
+ 'User Connections',
+ 'Batch Requests/sec',
+ 'Logouts/sec',
+ 'Logins/sec',
+ 'Processes blocked',
+ 'Latch Waits/sec',
+ 'Full Scans/sec',
+ 'Index Searches/sec',
+ 'Page Splits/sec',
+ 'Page lookups/sec',
+ 'Page reads/sec',
+ 'Page writes/sec',
+ 'Readahead pages/sec',
+ 'Lazy writes/sec',
+ 'Checkpoint pages/sec',
+ 'Page life expectancy',
+ 'Log File(s) Size (KB)',
+ 'Log File(s) Used Size (KB)',
+ 'Data File(s) Size (KB)',
+ 'Transactions/sec',
+ 'Write Transactions/sec',
+ 'Active Temp Tables',
+ 'Temp Tables Creation Rate',
+ 'Temp Tables For Destruction',
+ 'Free Space in tempdb (KB)',
+ 'Version Store Size (KB)',
+ 'Memory Grants Pending',
+ 'Memory Grants Outstanding',
+ 'Free list stalls/sec',
+ 'Buffer cache hit ratio',
+ 'Buffer cache hit ratio base',
+ 'RBPEX cache hit ratio',
+ 'RBPEX cache hit ratio base',
+ 'Backup/Restore Throughput/sec',
+ 'Total Server Memory (KB)',
+ 'Target Server Memory (KB)',
+ 'Log Flushes/sec',
+ 'Log Flush Wait Time',
+ 'Memory broker clerk size',
+ 'Log Bytes Flushed/sec',
+ 'Bytes Sent to Replica/sec',
+ 'Log Send Queue',
+ 'Bytes Sent to Transport/sec',
+ 'Sends to Replica/sec',
+ 'Bytes Sent to Transport/sec',
+ 'Sends to Transport/sec',
+ 'Bytes Received from Replica/sec',
+ 'Receives from Replica/sec',
+ 'Flow Control Time (ms/sec)',
+ 'Flow Control/sec',
+ 'Resent Messages/sec',
+ 'Redone Bytes/sec',
+ 'XTP Memory Used (KB)',
+ 'Transaction Delay',
+ 'Log Bytes Received/sec',
+ 'Log Apply Pending Queue',
+ 'Redone Bytes/sec',
+ 'Recovery Queue',
+ 'Log Apply Ready Queue',
+ 'CPU usage %',
+ 'CPU usage % base',
+ 'Queued requests',
+ 'Requests completed/sec',
+ 'Blocked tasks',
+ 'Active memory grant amount (KB)',
+ 'Disk Read Bytes/sec',
+ 'Disk Read IO Throttled/sec',
+ 'Disk Read IO/sec',
+ 'Disk Write Bytes/sec',
+ 'Disk Write IO Throttled/sec',
+ 'Disk Write IO/sec',
+ 'Used memory (KB)',
+ 'Forwarded Records/sec',
+ 'Background Writer pages/sec',
+ 'Percent Log Used',
+ 'Log Send Queue KB',
+ 'Redo Queue KB',
+ 'Mirrored Write Transactions/sec',
+ 'Group Commit Time',
+ 'Group Commits/Sec'
+ )
+ ) OR (
+ object_name LIKE '%User Settable%'
+ OR object_name LIKE '%SQL Errors%'
+ ) OR (
+ object_name LIKE '%Batch Resp Statistics%'
+ ) OR (
+ instance_name IN ('_Total')
+ AND counter_name IN (
+ 'Lock Timeouts/sec',
+ 'Number of Deadlocks/sec',
+ 'Lock Waits/sec',
+ 'Latch Waits/sec'
+ )
+ )
+ )
+INSERT INTO @PCounters select * from PerfCounters
+
+select
+ 'sqlserver_performance' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ pc.object_name AS [object],
+ pc.counter_name AS [counter],
+ CASE pc.instance_name
+ WHEN '_Total' THEN 'Total'
+ ELSE ISNULL(pc.instance_name,'')
+ END AS [instance],
+ CAST(CASE WHEN pc.cntr_type = 537003264 AND pc1.cntr_value > 0 THEN (pc.cntr_value * 1.0) / (pc1.cntr_value * 1.0) * 100 ELSE pc.cntr_value END AS float(10)) AS [value],
+ -- cast to string as TAG
+ cast(pc.cntr_type as varchar(25)) as [counter_type]
+from @PCounters pc
+ LEFT OUTER JOIN @PCounters AS pc1
+ ON (
+ pc.counter_name = REPLACE(pc1.counter_name,' base','')
+ OR pc.counter_name = REPLACE(pc1.counter_name,' base',' (ms)')
+ )
+ AND pc.object_name = pc1.object_name
+ AND pc.instance_name = pc1.instance_name
+ AND pc1.counter_name LIKE '%base'
+WHERE pc.counter_name NOT LIKE '% base'
+OPTION (RECOMPILE)
+`
+
+const sqlAzureMIRequests string = `
+SET NOCOUNT ON;
+IF SERVERPROPERTY('EngineEdition') = 8 -- Is this Azure SQL DB?
+BEGIN
+ SELECT blocking_session_id into #blockingSessions FROM sys.dm_exec_requests WHERE blocking_session_id != 0
+ create index ix_blockingSessions_1 on #blockingSessions (blocking_session_id)
+ SELECT
+ 'sqlserver_requests' AS [measurement]
+ , REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ , DB_NAME() as [database_name]
+ , s.session_id
+ , ISNULL(r.request_id,0) as request_id
+ , DB_NAME(s.database_id) as session_db_name
+ , COALESCE(r.status,s.status) AS status
+ , COALESCE(r.cpu_time,s.cpu_time) AS cpu_time_ms
+ , COALESCE(r.total_elapsed_time,s.total_elapsed_time) AS total_elapsed_time_ms
+ , COALESCE(r.logical_reads,s.logical_reads) AS logical_reads
+ , COALESCE(r.writes,s.writes) AS writes
+ , r.command
+ , r.wait_time as wait_time_ms
+ , r.wait_type
+ , r.wait_resource
+ , r.blocking_session_id
+ , s.program_name
+ , s.host_name
+ , s.nt_user_name
+ , s.open_transaction_count AS open_transaction
+ , LEFT (CASE COALESCE(r.transaction_isolation_level, s.transaction_isolation_level)
+ WHEN 0 THEN '0-Read Committed'
+ WHEN 1 THEN '1-Read Uncommitted (NOLOCK)'
+ WHEN 2 THEN '2-Read Committed'
+ WHEN 3 THEN '3-Repeatable Read'
+ WHEN 4 THEN '4-Serializable'
+ WHEN 5 THEN '5-Snapshot'
+ ELSE CONVERT (varchar(30), r.transaction_isolation_level) + '-UNKNOWN'
+ END, 30) AS transaction_isolation_level
+ , r.granted_query_memory as granted_query_memory_pages
+ , r.percent_complete
+ , SUBSTRING(
+ qt.text,
+ r.statement_start_offset / 2 + 1,
+ (CASE WHEN r.statement_end_offset = -1
+ THEN DATALENGTH(qt.text)
+ ELSE r.statement_end_offset
+ END - r.statement_start_offset) / 2 + 1
+ ) AS statement_text
+ , qt.objectid
+ , QUOTENAME(OBJECT_SCHEMA_NAME(qt.objectid,qt.dbid)) + '.' + QUOTENAME(OBJECT_NAME(qt.objectid,qt.dbid)) as stmt_object_name
+ , DB_NAME(qt.dbid) stmt_db_name
+ , CONVERT(varchar(20),[query_hash],1) as [query_hash]
+ , CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash]
+ FROM sys.dm_exec_sessions AS s
+ LEFT OUTER JOIN sys.dm_exec_requests AS r
+ ON s.session_id = r.session_id
+ OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt
+ WHERE 1 = 1
+ AND (r.session_id IS NOT NULL AND (s.is_user_process = 1 OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping')))
+ OR (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions))
+ OPTION(MAXDOP 1)
+END
+`
diff --git a/plugins/inputs/sqlserver/sqlqueriesV1.go b/plugins/inputs/sqlserver/sqlqueriesV1.go
new file mode 100644
index 0000000000000..a0c8180690f5c
--- /dev/null
+++ b/plugins/inputs/sqlserver/sqlqueriesV1.go
@@ -0,0 +1,1408 @@
+package sqlserver
+
+import (
+ _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization
+)
+
+// Queries V1
+const sqlPerformanceMetrics string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET ARITHABORT ON;
+SET QUOTED_IDENTIFIER ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
+
+DECLARE @PCounters TABLE
+(
+ counter_name nvarchar(64),
+ cntr_value bigint,
+ Primary Key(counter_name)
+);
+
+INSERT @PCounters (counter_name, cntr_value)
+SELECT 'Point In Time Recovery', Value = CASE
+ WHEN 1 > 1.0 * COUNT(*) / NULLIF((SELECT COUNT(*) FROM sys.databases d WHERE database_id > 4), 0)
+ THEN 0 ELSE 1 END
+FROM sys.databases d
+WHERE database_id > 4
+ AND recovery_model IN (1)
+UNION ALL
+SELECT 'Page File Usage (%)', CAST(100 * (1 - available_page_file_kb * 1. / total_page_file_kb) as decimal(9,2)) as [PageFileUsage (%)]
+FROM sys.dm_os_sys_memory
+UNION ALL
+SELECT 'Connection memory per connection (bytes)', Ratio = CAST((cntr_value / (SELECT 1.0 * cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'User Connections')) * 1024 as int)
+FROM sys.dm_os_performance_counters
+WHERE counter_name = 'Connection Memory (KB)'
+UNION ALL
+SELECT 'Available physical memory (bytes)', available_physical_memory_kb * 1024
+FROM sys.dm_os_sys_memory
+UNION ALL
+SELECT 'Signal wait (%)', SignalWaitPercent = CAST(100.0 * SUM(signal_wait_time_ms) / SUM (wait_time_ms) AS NUMERIC(20,2))
+FROM sys.dm_os_wait_stats
+UNION ALL
+SELECT 'Sql compilation per batch request', SqlCompilationPercent = 100.0 * cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec')
+FROM sys.dm_os_performance_counters
+WHERE counter_name = 'SQL Compilations/sec'
+UNION ALL
+SELECT 'Sql recompilation per batch request', SqlReCompilationPercent = 100.0 *cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec')
+FROM sys.dm_os_performance_counters
+WHERE counter_name = 'SQL Re-Compilations/sec'
+UNION ALL
+SELECT 'Page lookup per batch request',PageLookupPercent = 100.0 * cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec')
+FROM sys.dm_os_performance_counters
+WHERE counter_name = 'Page lookups/sec'
+UNION ALL
+SELECT 'Page split per batch request',PageSplitPercent = 100.0 * cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec')
+FROM sys.dm_os_performance_counters
+WHERE counter_name = 'Page splits/sec'
+UNION ALL
+SELECT 'Average tasks', AverageTaskCount = (SELECT AVG(current_tasks_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
+UNION ALL
+SELECT 'Average runnable tasks', AverageRunnableTaskCount = (SELECT AVG(runnable_tasks_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
+UNION ALL
+SELECT 'Average pending disk IO', AveragePendingDiskIOCount = (SELECT AVG(pending_disk_io_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
+UNION ALL
+SELECT 'Buffer pool rate (bytes/sec)', BufferPoolRate = (1.0*cntr_value * 8 * 1024) /
+ (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' AND counter_name = 'Page life expectancy')
+FROM sys.dm_os_performance_counters
+WHERE object_name like '%Buffer Manager%'
+AND counter_name = 'Database pages'
+UNION ALL
+SELECT 'Memory grant pending', MemoryGrantPending = cntr_value
+FROM sys.dm_os_performance_counters
+WHERE counter_name = 'Memory Grants Pending'
+UNION ALL
+SELECT 'Readahead per page read', Readahead = 100.0 *cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Page Reads/sec')
+FROM sys.dm_os_performance_counters
+WHERE counter_name = 'Readahead pages/sec'
+UNION ALL
+SELECT 'Total target memory ratio', TotalTargetMemoryRatio = 100.0 * cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Target Server Memory (KB)')
+FROM sys.dm_os_performance_counters
+WHERE counter_name = 'Total Server Memory (KB)'
+
+IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters;
+SELECT * INTO #PCounters FROM @PCounters
+
+DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
+DECLARE @ColumnName AS NVARCHAR(MAX)
+SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(counter_name)
+FROM (SELECT DISTINCT counter_name FROM @PCounters) AS bl
+
+SET @DynamicPivotQuery = N'
+SELECT measurement = ''Performance metrics'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Performance metrics''
+, ' + @ColumnName + ' FROM
+(
+SELECT counter_name, cntr_value
+FROM #PCounters
+) as V
+PIVOT(SUM(cntr_value) FOR counter_name IN (' + @ColumnName + ')) AS PVTTable
+'
+EXEC sp_executesql @DynamicPivotQuery;
+`
+
+const sqlMemoryClerk string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+DECLARE @sqlVers numeric(4,2)
+SELECT @sqlVers = LEFT(CAST(SERVERPROPERTY('productversion') as varchar), 4)
+
+IF OBJECT_ID('tempdb..#clerk') IS NOT NULL
+ DROP TABLE #clerk;
+
+CREATE TABLE #clerk (
+ ClerkCategory nvarchar(64) NOT NULL,
+ UsedPercent decimal(9,2),
+ UsedBytes bigint
+);
+
+DECLARE @DynamicClerkQuery AS NVARCHAR(MAX)
+
+IF @sqlVers < 11
+BEGIN
+ SET @DynamicClerkQuery = N'
+ INSERT #clerk (ClerkCategory, UsedPercent, UsedBytes)
+ SELECT ClerkCategory
+ , UsedPercent = SUM(UsedPercent)
+ , UsedBytes = SUM(UsedBytes)
+ FROM
+ (
+ SELECT ClerkCategory = CASE MC.[type]
+ WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool''
+ WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)''
+ WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)''
+ ELSE ''Other'' END
+ , SUM((single_pages_kb + multi_pages_kb) * 1024) AS UsedBytes
+ , Cast(100 * Sum((single_pages_kb + multi_pages_kb))*1.0/(Select Sum((single_pages_kb + multi_pages_kb)) From sys.dm_os_memory_clerks) as Decimal(7, 4)) UsedPercent
+ FROM sys.dm_os_memory_clerks MC
+ WHERE (single_pages_kb + multi_pages_kb) > 0
+ GROUP BY CASE MC.[type]
+ WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool''
+ WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)''
+ WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)''
+ ELSE ''Other'' END
+ ) as T
+ GROUP BY ClerkCategory;
+ '
+END
+ELSE
+BEGIN
+ SET @DynamicClerkQuery = N'
+ INSERT #clerk (ClerkCategory, UsedPercent, UsedBytes)
+ SELECT ClerkCategory
+ , UsedPercent = SUM(UsedPercent)
+ , UsedBytes = SUM(UsedBytes)
+ FROM
+ (
+ SELECT ClerkCategory = CASE MC.[type]
+ WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool''
+ WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)''
+ WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)''
+ ELSE ''Other'' END
+ , SUM(pages_kb * 1024) AS UsedBytes
+ , Cast(100 * Sum(pages_kb)*1.0/(Select Sum(pages_kb) From sys.dm_os_memory_clerks) as Decimal(7, 4)) UsedPercent
+ FROM sys.dm_os_memory_clerks MC
+ WHERE pages_kb > 0
+ GROUP BY CASE MC.[type]
+ WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool''
+ WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)''
+ WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)''
+ ELSE ''Other'' END
+ ) as T
+ GROUP BY ClerkCategory;
+ '
+END
+EXEC sp_executesql @DynamicClerkQuery;
+SELECT
+-- measurement
+measurement
+-- tags
+, servername= REPLACE(@@SERVERNAME, '\', ':')
+, type = 'Memory clerk'
+-- value
+, [Buffer pool]
+, [Cache (objects)]
+, [Cache (sql plans)]
+, [Other]
+FROM
+(
+SELECT measurement = 'Memory breakdown (%)'
+, [Buffer pool] = ISNULL(ROUND([Buffer Pool], 1), 0)
+, [Cache (objects)] = ISNULL(ROUND([Cache (objects)], 1), 0)
+, [Cache (sql plans)] = ISNULL(ROUND([Cache (sql plans)], 1), 0)
+, [Other] = ISNULL(ROUND([Other], 1), 0)
+FROM (SELECT ClerkCategory, UsedPercent FROM #clerk) as G1
+PIVOT
+(
+ SUM(UsedPercent)
+ FOR ClerkCategory IN ([Buffer Pool], [Cache (objects)], [Cache (sql plans)], [Other])
+) AS PivotTable
+
+UNION ALL
+
+SELECT measurement = 'Memory breakdown (bytes)'
+, [Buffer pool] = ISNULL(ROUND([Buffer Pool], 1), 0)
+, [Cache (objects)] = ISNULL(ROUND([Cache (objects)], 1), 0)
+, [Cache (sql plans)] = ISNULL(ROUND([Cache (sql plans)], 1), 0)
+, [Other] = ISNULL(ROUND([Other], 1), 0)
+FROM (SELECT ClerkCategory, UsedBytes FROM #clerk) as G2
+PIVOT
+(
+ SUM(UsedBytes)
+ FOR ClerkCategory IN ([Buffer Pool], [Cache (objects)], [Cache (sql plans)], [Other])
+) AS PivotTable
+) as T;
+`
+
+const sqlDatabaseSize string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
+
+IF OBJECT_ID('tempdb..#baseline') IS NOT NULL
+ DROP TABLE #baseline;
+SELECT
+ DB_NAME(mf.database_id) AS database_name ,
+ CAST(mf.size AS BIGINT) as database_size_8k_pages,
+ CAST(mf.max_size AS BIGINT) as database_max_size_8k_pages,
+ size_on_disk_bytes ,
+ type_desc as datafile_type,
+ GETDATE() AS baselineDate
+INTO #baseline
+FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs
+INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id
+ AND mf.file_id = divfs.file_id
+
+DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
+DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX)
+
+SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(database_name)
+FROM (SELECT DISTINCT database_name FROM #baseline) AS bl
+
+--Prepare the PIVOT query using the dynamic
+SET @DynamicPivotQuery = N'
+SELECT measurement = ''Log size (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
+, ' + @ColumnName + ' FROM
+(
+SELECT database_name, size_on_disk_bytes
+FROM #baseline
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(size_on_disk_bytes) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Rows size (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
+, ' + @ColumnName + ' FROM
+(
+SELECT database_name, size_on_disk_bytes
+FROM #baseline
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(size_on_disk_bytes) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Rows size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
+, ' + @ColumnName + ' FROM
+(
+SELECT database_name, database_size_8k_pages
+FROM #baseline
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(database_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Log size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
+, ' + @ColumnName + ' FROM
+(
+SELECT database_name, database_size_8k_pages
+FROM #baseline
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(database_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Rows max size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
+, ' + @ColumnName + ' FROM
+(
+SELECT database_name, database_max_size_8k_pages
+FROM #baseline
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Logs max size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
+, ' + @ColumnName + ' FROM
+(
+SELECT database_name, database_max_size_8k_pages
+FROM #baseline
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+'
+--PRINT @DynamicPivotQuery
+EXEC sp_executesql @DynamicPivotQuery;
+`
+
+const sqlDatabaseStats string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+IF OBJECT_ID('tempdb..#baseline') IS NOT NULL
+ DROP TABLE #baseline;
+
+SELECT
+[ReadLatency] =
+ CASE WHEN [num_of_reads] = 0
+ THEN 0 ELSE ([io_stall_read_ms] / [num_of_reads]) END,
+[WriteLatency] =
+ CASE WHEN [num_of_writes] = 0
+ THEN 0 ELSE ([io_stall_write_ms] / [num_of_writes]) END,
+[Latency] =
+ CASE WHEN ([num_of_reads] = 0 AND [num_of_writes] = 0)
+ THEN 0 ELSE ([io_stall] / ([num_of_reads] + [num_of_writes])) END,
+[AvgBytesPerRead] =
+ CASE WHEN [num_of_reads] = 0
+ THEN 0 ELSE ([num_of_bytes_read] / [num_of_reads]) END,
+[AvgBytesPerWrite] =
+ CASE WHEN [num_of_writes] = 0
+ THEN 0 ELSE ([num_of_bytes_written] / [num_of_writes]) END,
+[AvgBytesPerTransfer] =
+ CASE WHEN ([num_of_reads] = 0 AND [num_of_writes] = 0)
+ THEN 0 ELSE
+ (([num_of_bytes_read] + [num_of_bytes_written]) /
+ ([num_of_reads] + [num_of_writes])) END,
+DB_NAME ([vfs].[database_id]) AS DatabaseName,
+[mf].type_desc as datafile_type
+INTO #baseline
+FROM sys.dm_io_virtual_file_stats (NULL,NULL) AS [vfs]
+JOIN sys.master_files AS [mf] ON [vfs].[database_id] = [mf].[database_id]
+ AND [vfs].[file_id] = [mf].[file_id]
+
+
+
+DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
+DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX)
+
+SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(DatabaseName)
+FROM (SELECT DISTINCT DatabaseName FROM #baseline) AS bl
+
+--Prepare the PIVOT query using the dynamic
+SET @DynamicPivotQuery = N'
+SELECT measurement = ''Log read latency (ms)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
+, ' + @ColumnName + ' FROM
+(
+SELECT DatabaseName, ReadLatency
+FROM #baseline
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(MAX(ReadLatency) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Log write latency (ms)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
+, ' + @ColumnName + ' FROM
+(
+SELECT DatabaseName, WriteLatency
+FROM #baseline
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(MAX(WriteLatency) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Rows read latency (ms)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
+, ' + @ColumnName + ' FROM
+(
+SELECT DatabaseName, ReadLatency
+FROM #baseline
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(MAX(ReadLatency) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Rows write latency (ms)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
+, ' + @ColumnName + ' FROM
+(
+SELECT DatabaseName, WriteLatency
+FROM #baseline
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(MAX(WriteLatency) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Rows (average bytes/read)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
+, ' + @ColumnName + ' FROM
+(
+SELECT DatabaseName, AvgBytesPerRead
+FROM #baseline
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(AvgBytesPerRead) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Rows (average bytes/write)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
+, ' + @ColumnName + ' FROM
+(
+SELECT DatabaseName, AvgBytesPerWrite
+FROM #baseline
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(AvgBytesPerWrite) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Log (average bytes/read)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
+, ' + @ColumnName + ' FROM
+(
+SELECT DatabaseName, AvgBytesPerRead
+FROM #baseline
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(AvgBytesPerRead) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Log (average bytes/write)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
+, ' + @ColumnName + ' FROM
+(
+SELECT DatabaseName, AvgBytesPerWrite
+FROM #baseline
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(AvgBytesPerWrite) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+'
+--PRINT @DynamicPivotQuery
+EXEC sp_executesql @DynamicPivotQuery;
+`
+
+const sqlDatabaseIO string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+DECLARE @secondsBetween tinyint = 5;
+DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108);
+IF OBJECT_ID('tempdb..#baseline') IS NOT NULL
+ DROP TABLE #baseline;
+IF OBJECT_ID('tempdb..#baselinewritten') IS NOT NULL
+ DROP TABLE #baselinewritten;
+SELECT DB_NAME(mf.database_id) AS databaseName ,
+ mf.physical_name,
+ divfs.num_of_bytes_read,
+ divfs.num_of_bytes_written,
+ divfs.num_of_reads,
+ divfs.num_of_writes,
+ GETDATE() AS baselinedate
+INTO #baseline
+FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs
+INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id
+ AND mf.file_id = divfs.file_id
+WAITFOR DELAY @delayInterval;
+;WITH currentLine AS
+(
+ SELECT DB_NAME(mf.database_id) AS databaseName ,
+ type_desc,
+ mf.physical_name,
+ divfs.num_of_bytes_read,
+ divfs.num_of_bytes_written,
+ divfs.num_of_reads,
+ divfs.num_of_writes,
+ GETDATE() AS currentlinedate
+ FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs
+ INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id
+ AND mf.file_id = divfs.file_id
+)
+SELECT database_name
+, datafile_type
+, num_of_bytes_read_persec = SUM(num_of_bytes_read_persec)
+, num_of_bytes_written_persec = SUM(num_of_bytes_written_persec)
+, num_of_reads_persec = SUM(num_of_reads_persec)
+, num_of_writes_persec = SUM(num_of_writes_persec)
+INTO #baselinewritten
+FROM
+(
+SELECT
+ database_name = currentLine.databaseName
+, datafile_type = type_desc
+, num_of_bytes_read_persec = (currentLine.num_of_bytes_read - T1.num_of_bytes_read) / (DATEDIFF(SECOND,baselinedate,currentlinedate))
+, num_of_bytes_written_persec = (currentLine.num_of_bytes_written - T1.num_of_bytes_written) / (DATEDIFF(SECOND,baselinedate,currentlinedate))
+, num_of_reads_persec = (currentLine.num_of_reads - T1.num_of_reads) / (DATEDIFF(SECOND,baselinedate,currentlinedate))
+, num_of_writes_persec = (currentLine.num_of_writes - T1.num_of_writes) / (DATEDIFF(SECOND,baselinedate,currentlinedate))
+FROM currentLine
+INNER JOIN #baseline T1 ON T1.databaseName = currentLine.databaseName
+ AND T1.physical_name = currentLine.physical_name
+) as T
+GROUP BY database_name, datafile_type
+DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
+DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX)
+SELECT @ColumnName = ISNULL(@ColumnName + ',','') + QUOTENAME(database_name)
+ FROM (SELECT DISTINCT database_name FROM #baselinewritten) AS bl
+SELECT @ColumnName2 = ISNULL(@ColumnName2 + '+','') + QUOTENAME(database_name)
+ FROM (SELECT DISTINCT database_name FROM #baselinewritten) AS bl
+SET @DynamicPivotQuery = N'
+SELECT measurement = ''Log writes (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
+, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
+(
+SELECT database_name, num_of_bytes_written_persec
+FROM #baselinewritten
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(num_of_bytes_written_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+UNION ALL
+SELECT measurement = ''Rows writes (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
+, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
+(
+SELECT database_name, num_of_bytes_written_persec
+FROM #baselinewritten
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(num_of_bytes_written_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+UNION ALL
+SELECT measurement = ''Log reads (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
+, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
+(
+SELECT database_name, num_of_bytes_read_persec
+FROM #baselinewritten
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(num_of_bytes_read_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+UNION ALL
+SELECT measurement = ''Rows reads (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
+, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
+(
+SELECT database_name, num_of_bytes_read_persec
+FROM #baselinewritten
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(num_of_bytes_read_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+UNION ALL
+SELECT measurement = ''Log (writes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
+, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
+(
+SELECT database_name, num_of_writes_persec
+FROM #baselinewritten
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+UNION ALL
+SELECT measurement = ''Rows (writes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
+, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
+(
+SELECT database_name, num_of_writes_persec
+FROM #baselinewritten
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTabl
+UNION ALL
+SELECT measurement = ''Log (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
+, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
+(
+SELECT database_name, num_of_reads_persec
+FROM #baselinewritten
+WHERE datafile_type = ''LOG''
+) as V
+PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+UNION ALL
+SELECT measurement = ''Rows (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
+, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
+(
+SELECT database_name, num_of_reads_persec
+FROM #baselinewritten
+WHERE datafile_type = ''ROWS''
+) as V
+PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
+'
+EXEC sp_executesql @DynamicPivotQuery;
+`
+
+const sqlDatabaseProperties string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET ARITHABORT ON;
+SET QUOTED_IDENTIFIER ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
+
+IF OBJECT_ID('tempdb..#Databases') IS NOT NULL
+ DROP TABLE #Databases;
+CREATE TABLE #Databases
+(
+ Measurement nvarchar(64) NOT NULL,
+ DatabaseName nvarchar(128) NOT NULL,
+ Value tinyint NOT NULL
+ Primary Key(DatabaseName, Measurement)
+);
+
+INSERT #Databases ( Measurement, DatabaseName, Value)
+SELECT
+ Measurement = 'Recovery Model FULL'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.recovery_model = 1 THEN 1 ELSE 0 END
+FROM sys.databases d
+UNION ALL
+SELECT
+ Measurement = 'Recovery Model BULK_LOGGED'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.recovery_model = 2 THEN 1 ELSE 0 END
+FROM sys.databases d
+UNION ALL
+SELECT
+ Measurement = 'Recovery Model SIMPLE'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.recovery_model = 3 THEN 1 ELSE 0 END
+FROM sys.databases d
+
+UNION ALL
+SELECT
+ Measurement = 'State ONLINE'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.state = 0 THEN 1 ELSE 0 END
+FROM sys.databases d
+UNION ALL
+SELECT
+ Measurement = 'State RESTORING'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.state = 1 THEN 1 ELSE 0 END
+FROM sys.databases d
+UNION ALL
+SELECT
+ Measurement = 'State RECOVERING'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.state = 2 THEN 1 ELSE 0 END
+FROM sys.databases d
+UNION ALL
+SELECT
+ Measurement = 'State RECOVERY_PENDING'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.state = 3 THEN 1 ELSE 0 END
+FROM sys.databases d
+UNION ALL
+SELECT
+ Measurement = 'State SUSPECT'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.state = 4 THEN 1 ELSE 0 END
+FROM sys.databases d
+UNION ALL
+SELECT
+ Measurement = 'State EMERGENCY'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.state = 5 THEN 1 ELSE 0 END
+FROM sys.databases d
+UNION ALL
+SELECT
+ Measurement = 'State OFFLINE'
+, DatabaseName = d.Name
+, Value = CASE WHEN d.state = 6 THEN 1 ELSE 0 END
+FROM sys.databases d
+
+DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
+DECLARE @ColumnName AS NVARCHAR(MAX)
+SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(DatabaseName)
+FROM (SELECT DISTINCT DatabaseName FROM #Databases) AS bl
+
+SET @DynamicPivotQuery = N'
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''Recovery Model FULL''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''Recovery Model BULK_LOGGED''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''Recovery Model SIMPLE''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''State ONLINE''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''State RESTORING''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''State RECOVERING''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''State RECOVERY_PENDING''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''State SUSPECT''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''State EMERGENCY''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
+, type = ''Database properties''
+, ' + @ColumnName + ', Total FROM
+(
+SELECT Measurement, DatabaseName, Value
+, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
+FROM #Databases d
+WHERE d.Measurement = ''State OFFLINE''
+) as V
+PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
+'
+EXEC sp_executesql @DynamicPivotQuery;
+`
+
+const sqlCPUHistory string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET ARITHABORT ON;
+SET QUOTED_IDENTIFIER ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+DECLARE @ms_ticks bigint;
+SET @ms_ticks = (Select ms_ticks From sys.dm_os_sys_info);
+DECLARE @maxEvents int = 1
+
+SELECT
+---- measurement
+ measurement = 'CPU (%)'
+---- tags
+, servername= REPLACE(@@SERVERNAME, '\', ':')
+, type = 'CPU usage'
+-- value
+, [SQL process] = ProcessUtilization
+, [External process]= 100 - SystemIdle - ProcessUtilization
+, [SystemIdle]
+FROM
+(
+SELECT TOP (@maxEvents)
+ EventTime = CAST(DateAdd(ms, -1 * (@ms_ticks - timestamp_ms), GetUTCDate()) as datetime)
+, ProcessUtilization = CAST(ProcessUtilization as int)
+, SystemIdle = CAST(SystemIdle as int)
+FROM (SELECT Record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') as SystemIdle,
+ Record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') as ProcessUtilization,
+ timestamp as timestamp_ms
+FROM (SELECT timestamp, convert(xml, record) As Record
+ FROM sys.dm_os_ring_buffers
+ WHERE ring_buffer_type = N'RING_BUFFER_SCHEDULER_MONITOR'
+ And record Like '%%') x) y
+ORDER BY timestamp_ms Desc
+) as T;
+`
+
+const sqlPerformanceCounters string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters
+CREATE TABLE #PCounters
+(
+ object_name nvarchar(128),
+ counter_name nvarchar(128),
+ instance_name nvarchar(128),
+ cntr_value bigint,
+ cntr_type INT,
+ Primary Key(object_name, counter_name, instance_name)
+);
+INSERT #PCounters
+SELECT DISTINCT RTrim(spi.object_name) object_name
+, RTrim(spi.counter_name) counter_name
+, RTrim(spi.instance_name) instance_name
+, spi.cntr_value
+, spi.cntr_type
+FROM sys.dm_os_performance_counters spi
+WHERE spi.object_name NOT LIKE '%Backup Device%'
+ AND NOT EXISTS (SELECT 1 FROM sys.databases WHERE Name = spi.instance_name);
+
+WAITFOR DELAY '00:00:01';
+
+IF OBJECT_ID('tempdb..#CCounters') IS NOT NULL DROP TABLE #CCounters
+CREATE TABLE #CCounters
+(
+ object_name nvarchar(128),
+ counter_name nvarchar(128),
+ instance_name nvarchar(128),
+ cntr_value bigint,
+ cntr_type INT,
+ Primary Key(object_name, counter_name, instance_name)
+);
+INSERT #CCounters
+SELECT DISTINCT RTrim(spi.object_name) object_name
+, RTrim(spi.counter_name) counter_name
+, RTrim(spi.instance_name) instance_name
+, spi.cntr_value
+, spi.cntr_type
+FROM sys.dm_os_performance_counters spi
+WHERE spi.object_name NOT LIKE '%Backup Device%'
+ AND NOT EXISTS (SELECT 1 FROM sys.databases WHERE Name = spi.instance_name);
+
+SELECT
+ measurement = cc.counter_name
+ + CASE WHEN LEN(cc.instance_name) > 0 THEN ' | ' + cc.instance_name ELSE '' END
+ + ' | '
+ + SUBSTRING( cc.object_name, CHARINDEX(':', cc.object_name) + 1, LEN( cc.object_name) - CHARINDEX(':', cc.object_name))
+-- tags
+, servername = REPLACE(@@SERVERNAME, '\', ':')
+, type = 'Performance counters'
+--, countertype = CASE cc.cntr_type
+-- When 65792 Then 'Count'
+-- When 537003264 Then 'Ratio'
+-- When 272696576 Then 'Per second'
+-- When 1073874176 Then 'Average'
+-- When 272696320 Then 'Average Per Second'
+-- When 1073939712 Then 'Base'
+-- END
+-- value
+, value = CAST(CASE cc.cntr_type
+ When 65792 Then cc.cntr_value -- Count
+ When 537003264 Then IsNull(Cast(cc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value, 0), 0) -- Ratio
+ When 272696576 Then cc.cntr_value - pc.cntr_value -- Per Second
+ When 1073874176 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg
+ When 272696320 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg/sec
+ When 1073939712 Then cc.cntr_value - pc.cntr_value -- Base
+ Else cc.cntr_value End as bigint)
+--, currentvalue= CAST(cc.cntr_value as bigint)
+FROM #CCounters cc
+INNER JOIN #PCounters pc On cc.object_name = pc.object_name
+ And cc.counter_name = pc.counter_name
+ And cc.instance_name = pc.instance_name
+ And cc.cntr_type = pc.cntr_type
+LEFT JOIN #CCounters cbc On cc.object_name = cbc.object_name
+ And (Case When cc.counter_name Like '%(ms)' Then Replace(cc.counter_name, ' (ms)',' Base')
+ When cc.object_name like '%FileTable' Then Replace(cc.counter_name, 'Avg ','') + ' base'
+ When cc.counter_name = 'Worktables From Cache Ratio' Then 'Worktables From Cache Base'
+ When cc.counter_name = 'Avg. Length of Batched Writes' Then 'Avg. Length of Batched Writes BS'
+ Else cc.counter_name + ' base'
+ End) = cbc.counter_name
+ And cc.instance_name = cbc.instance_name
+ And cc.cntr_type In (537003264, 1073874176)
+ And cbc.cntr_type = 1073939712
+LEFT JOIN #PCounters pbc On pc.object_name = pbc.object_name
+ And pc.instance_name = pbc.instance_name
+ And (Case When pc.counter_name Like '%(ms)' Then Replace(pc.counter_name, ' (ms)',' Base')
+ When pc.object_name like '%FileTable' Then Replace(pc.counter_name, 'Avg ','') + ' base'
+ When pc.counter_name = 'Worktables From Cache Ratio' Then 'Worktables From Cache Base'
+ When pc.counter_name = 'Avg. Length of Batched Writes' Then 'Avg. Length of Batched Writes BS'
+ Else pc.counter_name + ' base'
+ End) = pbc.counter_name
+ And pc.cntr_type In (537003264, 1073874176)
+
+IF OBJECT_ID('tempdb..#CCounters') IS NOT NULL DROP TABLE #CCounters;
+IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters;
+`
+
+const sqlWaitStatsCategorized string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
+DECLARE @secondsBetween tinyint = 5
+DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108);
+
+DECLARE @w1 TABLE
+(
+ WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
+ WaitTimeInMs bigint NOT NULL,
+ WaitTaskCount bigint NOT NULL,
+ CollectionDate datetime NOT NULL
+)
+DECLARE @w2 TABLE
+(
+ WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
+ WaitTimeInMs bigint NOT NULL,
+ WaitTaskCount bigint NOT NULL,
+ CollectionDate datetime NOT NULL
+)
+DECLARE @w3 TABLE
+(
+ WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
+)
+DECLARE @w4 TABLE
+(
+ WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
+ WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
+)
+DECLARE @w5 TABLE
+(
+ WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
+ WaitTimeInMs bigint NOT NULL,
+ WaitTaskCount bigint NOT NULL
+)
+
+INSERT @w3 (WaitType)
+VALUES (N'QDS_SHUTDOWN_QUEUE'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETION'),
+ (N'BROKER_EVENTHANDLER'), (N'BROKER_RECEIVE_WAITFOR'),
+ (N'BROKER_TASK_STOP'), (N'BROKER_TO_FLUSH'),
+ (N'BROKER_TRANSMITTER'), (N'CHECKPOINT_QUEUE'),
+ (N'CHKPT'), (N'CLR_AUTO_EVENT'),
+ (N'CLR_MANUAL_EVENT'), (N'CLR_SEMAPHORE'),
+ (N'DBMIRROR_DBM_EVENT'), (N'DBMIRROR_EVENTS_QUEUE'),
+ (N'DBMIRROR_WORKER_QUEUE'), (N'DBMIRRORING_CMD'),
+ (N'DIRTY_PAGE_POLL'), (N'DISPATCHER_QUEUE_SEMAPHORE'),
+ (N'EXECSYNC'), (N'FSAGENT'),
+ (N'FT_IFTS_SCHEDULER_IDLE_WAIT'), (N'FT_IFTSHC_MUTEX'),
+ (N'HADR_CLUSAPI_CALL'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETIO(N'),
+ (N'HADR_LOGCAPTURE_WAIT'), (N'HADR_NOTIFICATION_DEQUEUE'),
+ (N'HADR_TIMER_TASK'), (N'HADR_WORK_QUEUE'),
+ (N'KSOURCE_WAKEUP'), (N'LAZYWRITER_SLEEP'),
+ (N'LOGMGR_QUEUE'), (N'ONDEMAND_TASK_QUEUE'),
+ (N'PWAIT_ALL_COMPONENTS_INITIALIZED'),
+ (N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP'),
+ (N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP'),
+ (N'REQUEST_FOR_DEADLOCK_SEARCH'), (N'RESOURCE_QUEUE'),
+ (N'SERVER_IDLE_CHECK'), (N'SLEEP_BPOOL_FLUSH'),
+ (N'SLEEP_DBSTARTUP'), (N'SLEEP_DCOMSTARTUP'),
+ (N'SLEEP_MASTERDBREADY'), (N'SLEEP_MASTERMDREADY'),
+ (N'SLEEP_MASTERUPGRADED'), (N'SLEEP_MSDBSTARTUP'),
+ (N'SLEEP_SYSTEMTASK'), (N'SLEEP_TASK'),
+ (N'SLEEP_TEMPDBSTARTUP'), (N'SNI_HTTP_ACCEPT'),
+ (N'SP_SERVER_DIAGNOSTICS_SLEEP'), (N'SQLTRACE_BUFFER_FLUSH'),
+ (N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP'),
+ (N'SQLTRACE_WAIT_ENTRIES'), (N'WAIT_FOR_RESULTS'),
+ (N'WAITFOR'), (N'WAITFOR_TASKSHUTDOW(N'),
+ (N'WAIT_XTP_HOST_WAIT'), (N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG'),
+ (N'WAIT_XTP_CKPT_CLOSE'), (N'XE_DISPATCHER_JOI(N'),
+ (N'XE_DISPATCHER_WAIT'), (N'XE_TIMER_EVENT');
+
+INSERT @w4 (WaitType, WaitCategory) VALUES ('ABR', 'OTHER') ,
+('ASSEMBLY_LOAD' , 'OTHER') , ('ASYNC_DISKPOOL_LOCK' , 'I/O') , ('ASYNC_IO_COMPLETION' , 'I/O') ,
+('ASYNC_NETWORK_IO' , 'NETWORK') , ('AUDIT_GROUPCACHE_LOCK' , 'OTHER') , ('AUDIT_LOGINCACHE_LOCK' ,
+'OTHER') , ('AUDIT_ON_DEMAND_TARGET_LOCK' , 'OTHER') , ('AUDIT_XE_SESSION_MGR' , 'OTHER') , ('BACKUP' ,
+'BACKUP') , ('BACKUP_CLIENTLOCK ' , 'BACKUP') , ('BACKUP_OPERATOR' , 'BACKUP') , ('BACKUPBUFFER' ,
+'BACKUP') , ('BACKUPIO' , 'BACKUP') , ('BACKUPTHREAD' , 'BACKUP') , ('BAD_PAGE_PROCESS' , 'MEMORY') ,
+('BROKER_CONNECTION_RECEIVE_TASK' , 'SERVICE BROKER') , ('BROKER_ENDPOINT_STATE_MUTEX' , 'SERVICE BROKER')
+, ('BROKER_EVENTHANDLER' , 'SERVICE BROKER') , ('BROKER_INIT' , 'SERVICE BROKER') , ('BROKER_MASTERSTART'
+, 'SERVICE BROKER') , ('BROKER_RECEIVE_WAITFOR' , 'SERVICE BROKER') , ('BROKER_REGISTERALLENDPOINTS' ,
+'SERVICE BROKER') , ('BROKER_SERVICE' , 'SERVICE BROKER') , ('BROKER_SHUTDOWN' , 'SERVICE BROKER') ,
+('BROKER_TASK_STOP' , 'SERVICE BROKER') , ('BROKER_TO_FLUSH' , 'SERVICE BROKER') , ('BROKER_TRANSMITTER' ,
+'SERVICE BROKER') , ('BUILTIN_HASHKEY_MUTEX' , 'OTHER') , ('CHECK_PRINT_RECORD' , 'OTHER') ,
+('CHECKPOINT_QUEUE' , 'BUFFER') , ('CHKPT' , 'BUFFER') , ('CLEAR_DB' , 'OTHER') , ('CLR_AUTO_EVENT' ,
+'CLR') , ('CLR_CRST' , 'CLR') , ('CLR_JOIN' , 'CLR') , ('CLR_MANUAL_EVENT' , 'CLR') , ('CLR_MEMORY_SPY' ,
+'CLR') , ('CLR_MONITOR' , 'CLR') , ('CLR_RWLOCK_READER' , 'CLR') , ('CLR_RWLOCK_WRITER' , 'CLR') ,
+('CLR_SEMAPHORE' , 'CLR') , ('CLR_TASK_START' , 'CLR') , ('CLRHOST_STATE_ACCESS' , 'CLR') , ('CMEMTHREAD'
+, 'MEMORY') , ('COMMIT_TABLE' , 'OTHER') , ('CURSOR' , 'OTHER') , ('CURSOR_ASYNC' , 'OTHER') , ('CXPACKET'
+, 'OTHER') , ('CXROWSET_SYNC' , 'OTHER') , ('DAC_INIT' , 'OTHER') , ('DBMIRROR_DBM_EVENT ' , 'OTHER') ,
+('DBMIRROR_DBM_MUTEX ' , 'OTHER') , ('DBMIRROR_EVENTS_QUEUE' , 'OTHER') , ('DBMIRROR_SEND' , 'OTHER') ,
+('DBMIRROR_WORKER_QUEUE' , 'OTHER') , ('DBMIRRORING_CMD' , 'OTHER') , ('DBTABLE' , 'OTHER') ,
+('DEADLOCK_ENUM_MUTEX' , 'LOCK') , ('DEADLOCK_TASK_SEARCH' , 'LOCK') , ('DEBUG' , 'OTHER') ,
+('DISABLE_VERSIONING' , 'OTHER') , ('DISKIO_SUSPEND' , 'BACKUP') , ('DISPATCHER_QUEUE_SEMAPHORE' ,
+'OTHER') , ('DLL_LOADING_MUTEX' , 'XML') , ('DROPTEMP' , 'TEMPORARY OBJECTS') , ('DTC' , 'OTHER') ,
+('DTC_ABORT_REQUEST' , 'OTHER') , ('DTC_RESOLVE' , 'OTHER') , ('DTC_STATE' , 'DOTHERTC') ,
+('DTC_TMDOWN_REQUEST' , 'OTHER') , ('DTC_WAITFOR_OUTCOME' , 'OTHER') , ('DUMP_LOG_COORDINATOR' , 'OTHER')
+, ('DUMP_LOG_COORDINATOR_QUEUE' , 'OTHER') , ('DUMPTRIGGER' , 'OTHER') , ('EC' , 'OTHER') , ('EE_PMOLOCK'
+, 'MEMORY') , ('EE_SPECPROC_MAP_INIT' , 'OTHER') , ('ENABLE_VERSIONING' , 'OTHER') ,
+('ERROR_REPORTING_MANAGER' , 'OTHER') , ('EXCHANGE' , 'OTHER') , ('EXECSYNC' , 'OTHER') ,
+('EXECUTION_PIPE_EVENT_OTHER' , 'OTHER') , ('Failpoint' , 'OTHER') , ('FCB_REPLICA_READ' , 'OTHER') ,
+('FCB_REPLICA_WRITE' , 'OTHER') , ('FS_FC_RWLOCK' , 'OTHER') , ('FS_GARBAGE_COLLECTOR_SHUTDOWN' , 'OTHER')
+, ('FS_HEADER_RWLOCK' , 'OTHER') , ('FS_LOGTRUNC_RWLOCK' , 'OTHER') , ('FSA_FORCE_OWN_XACT' , 'OTHER') ,
+('FSAGENT' , 'OTHER') , ('FSTR_CONFIG_MUTEX' , 'OTHER') , ('FSTR_CONFIG_RWLOCK' , 'OTHER') ,
+('FT_COMPROWSET_RWLOCK' , 'OTHER') , ('FT_IFTS_RWLOCK' , 'OTHER') , ('FT_IFTS_SCHEDULER_IDLE_WAIT' ,
+'OTHER') , ('FT_IFTSHC_MUTEX' , 'OTHER') , ('FT_IFTSISM_MUTEX' , 'OTHER') , ('FT_MASTER_MERGE' , 'OTHER')
+, ('FT_METADATA_MUTEX' , 'OTHER') , ('FT_RESTART_CRAWL' , 'OTHER') , ('FT_RESUME_CRAWL' , 'OTHER') ,
+('FULLTEXT GATHERER' , 'OTHER') , ('GUARDIAN' , 'OTHER') , ('HTTP_ENDPOINT_COLLCREATE' , 'SERVICE BROKER')
+, ('HTTP_ENUMERATION' , 'SERVICE BROKER') , ('HTTP_START' , 'SERVICE BROKER') , ('IMP_IMPORT_MUTEX' ,
+'OTHER') , ('IMPPROV_IOWAIT' , 'I/O') , ('INDEX_USAGE_STATS_MUTEX' , 'OTHER') , ('OTHER_TESTING' ,
+'OTHER') , ('IO_AUDIT_MUTEX' , 'OTHER') , ('IO_COMPLETION' , 'I/O') , ('IO_RETRY' , 'I/O') ,
+('IOAFF_RANGE_QUEUE' , 'OTHER') , ('KSOURCE_WAKEUP' , 'SHUTDOWN') , ('KTM_ENLISTMENT' , 'OTHER') ,
+('KTM_RECOVERY_MANAGER' , 'OTHER') , ('KTM_RECOVERY_RESOLUTION' , 'OTHER') , ('LATCH_DT' , 'LATCH') ,
+('LATCH_EX' , 'LATCH') , ('LATCH_KP' , 'LATCH') , ('LATCH_NL' , 'LATCH') , ('LATCH_SH' , 'LATCH') ,
+('LATCH_UP' , 'LATCH') , ('LAZYWRITER_SLEEP' , 'BUFFER') , ('LCK_M_BU' , 'LOCK') , ('LCK_M_IS' , 'LOCK') ,
+('LCK_M_IU' , 'LOCK') , ('LCK_M_IX' , 'LOCK') , ('LCK_M_RIn_NL' , 'LOCK') , ('LCK_M_RIn_S' , 'LOCK') ,
+('LCK_M_RIn_U' , 'LOCK') , ('LCK_M_RIn_X' , 'LOCK') , ('LCK_M_RS_S' , 'LOCK') , ('LCK_M_RS_U' , 'LOCK') ,
+('LCK_M_RX_S' , 'LOCK') , ('LCK_M_RX_U' , 'LOCK') , ('LCK_M_RX_X' , 'LOCK') , ('LCK_M_S' , 'LOCK') ,
+('LCK_M_SCH_M' , 'LOCK') , ('LCK_M_SCH_S' , 'LOCK') , ('LCK_M_SIU' , 'LOCK') , ('LCK_M_SIX' , 'LOCK') ,
+('LCK_M_U' , 'LOCK') , ('LCK_M_UIX' , 'LOCK') , ('LCK_M_X' , 'LOCK') , ('LOGBUFFER' , 'OTHER') ,
+('LOGGENERATION' , 'OTHER') , ('LOGMGR' , 'OTHER') , ('LOGMGR_FLUSH' , 'OTHER') , ('LOGMGR_QUEUE' ,
+'OTHER') , ('LOGMGR_RESERVE_APPEND' , 'OTHER') , ('LOWFAIL_MEMMGR_QUEUE' , 'MEMORY') ,
+('METADATA_LAZYCACHE_RWLOCK' , 'OTHER') , ('MIRROR_SEND_MESSAGE' , 'OTHER') , ('MISCELLANEOUS' , 'IGNORE')
+, ('MSQL_DQ' , 'DISTRIBUTED QUERY') , ('MSQL_SYNC_PIPE' , 'OTHER') , ('MSQL_XACT_MGR_MUTEX' , 'OTHER') ,
+('MSQL_XACT_MUTEX' , 'OTHER') , ('MSQL_XP' , 'OTHER') , ('MSSEARCH' , 'OTHER') , ('NET_WAITFOR_PACKET' ,
+'NETWORK') , ('NODE_CACHE_MUTEX' , 'OTHER') , ('OTHER' , 'OTHER') , ('ONDEMAND_TASK_QUEUE' , 'OTHER') ,
+('PAGEIOLATCH_DT' , 'LATCH') , ('PAGEIOLATCH_EX' , 'LATCH') , ('PAGEIOLATCH_KP' , 'LATCH') ,
+('PAGEIOLATCH_NL' , 'LATCH') , ('PAGEIOLATCH_SH' , 'LATCH') , ('PAGEIOLATCH_UP' , 'LATCH') ,
+('PAGELATCH_DT' , 'LATCH') , ('PAGELATCH_EX' , 'LATCH') , ('PAGELATCH_KP' , 'LATCH') , ('PAGELATCH_NL' ,
+'LATCH') , ('PAGELATCH_SH' , 'LATCH') , ('PAGELATCH_UP' , 'LATCH') , ('PARALLEL_BACKUP_QUEUE' , 'BACKUP')
+, ('PERFORMANCE_COUNTERS_RWLOCK' , 'OTHER') , ('PREEMPTIVE_ABR' , 'OTHER') ,
+('PREEMPTIVE_AUDIT_ACCESS_EVENTLOG' , 'OTHER') , ('PREEMPTIVE_AUDIT_ACCESS_SECLOG' , 'OTHER') ,
+('PREEMPTIVE_CLOSEBACKUPMEDIA' , 'OTHER') , ('PREEMPTIVE_CLOSEBACKUPTAPE' , 'OTHER') ,
+('PREEMPTIVE_CLOSEBACKUPVDIDEVICE' , 'OTHER') , ('PREEMPTIVE_CLUSAPI_CLUSTERRESOURCECONTROL' , 'OTHER') ,
+('PREEMPTIVE_COM_COCREATEINSTANCE' , 'OTHER') , ('PREEMPTIVE_COM_COGETCLASSOBJECT' , 'OTHER') ,
+('PREEMPTIVE_COM_CREATEACCESSOR' , 'OTHER') , ('PREEMPTIVE_COM_DELETEROWS' , 'OTHER') ,
+('PREEMPTIVE_COM_GETCOMMANDTEXT' , 'OTHER') , ('PREEMPTIVE_COM_GETDATA' , 'OTHER') ,
+('PREEMPTIVE_COM_GETNEXTROWS' , 'OTHER') , ('PREEMPTIVE_COM_GETRESULT' , 'OTHER') ,
+('PREEMPTIVE_COM_GETROWSBYBOOKMARK' , 'OTHER') , ('PREEMPTIVE_COM_LBFLUSH' , 'OTHER') ,
+('PREEMPTIVE_COM_LBLOCKREGION' , 'OTHER') , ('PREEMPTIVE_COM_LBREADAT' , 'OTHER') ,
+('PREEMPTIVE_COM_LBSETSIZE' , 'OTHER') , ('PREEMPTIVE_COM_LBSTAT' , 'OTHER') ,
+('PREEMPTIVE_COM_LBUNLOCKREGION' , 'OTHER') , ('PREEMPTIVE_COM_LBWRITEAT' , 'OTHER') ,
+('PREEMPTIVE_COM_QUERYINTERFACE' , 'OTHER') , ('PREEMPTIVE_COM_RELEASE' , 'OTHER') ,
+('PREEMPTIVE_COM_RELEASEACCESSOR' , 'OTHER') , ('PREEMPTIVE_COM_RELEASEROWS' , 'OTHER') ,
+('PREEMPTIVE_COM_RELEASESESSION' , 'OTHER') , ('PREEMPTIVE_COM_RESTARTPOSITION' , 'OTHER') ,
+('PREEMPTIVE_COM_SEQSTRMREAD' , 'OTHER') , ('PREEMPTIVE_COM_SEQSTRMREADANDWRITE' , 'OTHER') ,
+('PREEMPTIVE_COM_SETDATAFAILURE' , 'OTHER') , ('PREEMPTIVE_COM_SETPARAMETERINFO' , 'OTHER') ,
+('PREEMPTIVE_COM_SETPARAMETERPROPERTIES' , 'OTHER') , ('PREEMPTIVE_COM_STRMLOCKREGION' , 'OTHER') ,
+('PREEMPTIVE_COM_STRMSEEKANDREAD' , 'OTHER') , ('PREEMPTIVE_COM_STRMSEEKANDWRITE' , 'OTHER') ,
+('PREEMPTIVE_COM_STRMSETSIZE' , 'OTHER') , ('PREEMPTIVE_COM_STRMSTAT' , 'OTHER') ,
+('PREEMPTIVE_COM_STRMUNLOCKREGION' , 'OTHER') , ('PREEMPTIVE_CONSOLEWRITE' , 'OTHER') ,
+('PREEMPTIVE_CREATEPARAM' , 'OTHER') , ('PREEMPTIVE_DEBUG' , 'OTHER') , ('PREEMPTIVE_DFSADDLINK' ,
+'OTHER') , ('PREEMPTIVE_DFSLINKEXISTCHECK' , 'OTHER') , ('PREEMPTIVE_DFSLINKHEALTHCHECK' , 'OTHER') ,
+('PREEMPTIVE_DFSREMOVELINK' , 'OTHER') , ('PREEMPTIVE_DFSREMOVEROOT' , 'OTHER') ,
+('PREEMPTIVE_DFSROOTFOLDERCHECK' , 'OTHER') , ('PREEMPTIVE_DFSROOTINIT' , 'OTHER') ,
+('PREEMPTIVE_DFSROOTSHARECHECK' , 'OTHER') , ('PREEMPTIVE_DTC_ABORT' , 'OTHER') ,
+('PREEMPTIVE_DTC_ABORTREQUESTDONE' , 'OTHER') , ('PREEMPTIVE_DTC_BEGINOTHER' , 'OTHER') ,
+('PREEMPTIVE_DTC_COMMITREQUESTDONE' , 'OTHER') , ('PREEMPTIVE_DTC_ENLIST' , 'OTHER') ,
+('PREEMPTIVE_DTC_PREPAREREQUESTDONE' , 'OTHER') , ('PREEMPTIVE_FILESIZEGET' , 'OTHER') ,
+('PREEMPTIVE_FSAOTHER_ABORTOTHER' , 'OTHER') , ('PREEMPTIVE_FSAOTHER_COMMITOTHER' , 'OTHER') ,
+('PREEMPTIVE_FSAOTHER_STARTOTHER' , 'OTHER') , ('PREEMPTIVE_FSRECOVER_UNCONDITIONALUNDO' , 'OTHER') ,
+('PREEMPTIVE_GETRMINFO' , 'OTHER') , ('PREEMPTIVE_LOCKMONITOR' , 'OTHER') , ('PREEMPTIVE_MSS_RELEASE' ,
+'OTHER') , ('PREEMPTIVE_ODBCOPS' , 'OTHER') , ('PREEMPTIVE_OLE_UNINIT' , 'OTHER') ,
+('PREEMPTIVE_OTHER_ABORTORCOMMITTRAN' , 'OTHER') , ('PREEMPTIVE_OTHER_ABORTTRAN' , 'OTHER') ,
+('PREEMPTIVE_OTHER_GETDATASOURCE' , 'OTHER') , ('PREEMPTIVE_OTHER_GETLITERALINFO' , 'OTHER') ,
+('PREEMPTIVE_OTHER_GETPROPERTIES' , 'OTHER') , ('PREEMPTIVE_OTHER_GETPROPERTYINFO' , 'OTHER') ,
+('PREEMPTIVE_OTHER_GETSCHEMALOCK' , 'OTHER') , ('PREEMPTIVE_OTHER_JOINOTHER' , 'OTHER') ,
+('PREEMPTIVE_OTHER_RELEASE' , 'OTHER') , ('PREEMPTIVE_OTHER_SETPROPERTIES' , 'OTHER') ,
+('PREEMPTIVE_OTHEROPS' , 'OTHER') , ('PREEMPTIVE_OS_ACCEPTSECURITYCONTEXT' , 'OTHER') ,
+('PREEMPTIVE_OS_ACQUIRECREDENTIALSHANDLE' , 'OTHER') , ('PREEMPTIVE_OS_AU,TICATIONOPS' , 'OTHER') ,
+('PREEMPTIVE_OS_AUTHORIZATIONOPS' , 'OTHER') , ('PREEMPTIVE_OS_AUTHZGETINFORMATIONFROMCONTEXT' , 'OTHER')
+, ('PREEMPTIVE_OS_AUTHZINITIALIZECONTEXTFROMSID' , 'OTHER') ,
+('PREEMPTIVE_OS_AUTHZINITIALIZERESOURCEMANAGER' , 'OTHER') , ('PREEMPTIVE_OS_BACKUPREAD' , 'OTHER') ,
+('PREEMPTIVE_OS_CLOSEHANDLE' , 'OTHER') , ('PREEMPTIVE_OS_CLUSTEROPS' , 'OTHER') , ('PREEMPTIVE_OS_COMOPS'
+, 'OTHER') , ('PREEMPTIVE_OS_COMPLETEAUTHTOKEN' , 'OTHER') , ('PREEMPTIVE_OS_COPYFILE' , 'OTHER') ,
+('PREEMPTIVE_OS_CREATEDIRECTORY' , 'OTHER') , ('PREEMPTIVE_OS_CREATEFILE' , 'OTHER') ,
+('PREEMPTIVE_OS_CRYPTACQUIRECONTEXT' , 'OTHER') , ('PREEMPTIVE_OS_CRYPTIMPORTKEY' , 'OTHER') ,
+('PREEMPTIVE_OS_CRYPTOPS' , 'OTHER') , ('PREEMPTIVE_OS_DECRYPTMESSAGE' , 'OTHER') ,
+('PREEMPTIVE_OS_DELETEFILE' , 'OTHER') , ('PREEMPTIVE_OS_DELETESECURITYCONTEXT' , 'OTHER') ,
+('PREEMPTIVE_OS_DEVICEIOCONTROL' , 'OTHER') , ('PREEMPTIVE_OS_DEVICEOPS' , 'OTHER') ,
+('PREEMPTIVE_OS_DIRSVC_NETWORKOPS' , 'OTHER') , ('PREEMPTIVE_OS_DISCONNECTNAMEDPIPE' , 'OTHER') ,
+('PREEMPTIVE_OS_DOMAINSERVICESOPS' , 'OTHER') , ('PREEMPTIVE_OS_DSGETDCNAME' , 'OTHER') ,
+('PREEMPTIVE_OS_DTCOPS' , 'OTHER') , ('PREEMPTIVE_OS_ENCRYPTMESSAGE' , 'OTHER') , ('PREEMPTIVE_OS_FILEOPS'
+, 'OTHER') , ('PREEMPTIVE_OS_FINDFILE' , 'OTHER') , ('PREEMPTIVE_OS_FLUSHFILEBUFFERS' , 'OTHER') ,
+('PREEMPTIVE_OS_FORMATMESSAGE' , 'OTHER') , ('PREEMPTIVE_OS_FREECREDENTIALSHANDLE' , 'OTHER') ,
+('PREEMPTIVE_OS_FREELIBRARY' , 'OTHER') , ('PREEMPTIVE_OS_GENERICOPS' , 'OTHER') ,
+('PREEMPTIVE_OS_GETADDRINFO' , 'OTHER') , ('PREEMPTIVE_OS_GETCOMPRESSEDFILESIZE' , 'OTHER') ,
+('PREEMPTIVE_OS_GETDISKFREESPACE' , 'OTHER') , ('PREEMPTIVE_OS_GETFILEATTRIBUTES' , 'OTHER') ,
+('PREEMPTIVE_OS_GETFILESIZE' , 'OTHER') , ('PREEMPTIVE_OS_GETLONGPATHNAME' , 'OTHER') ,
+('PREEMPTIVE_OS_GETPROCADDRESS' , 'OTHER') , ('PREEMPTIVE_OS_GETVOLUMENAMEFORVOLUMEMOUNTPOINT' , 'OTHER')
+, ('PREEMPTIVE_OS_GETVOLUMEPATHNAME' , 'OTHER') , ('PREEMPTIVE_OS_INITIALIZESECURITYCONTEXT' , 'OTHER') ,
+('PREEMPTIVE_OS_LIBRARYOPS' , 'OTHER') , ('PREEMPTIVE_OS_LOADLIBRARY' , 'OTHER') ,
+('PREEMPTIVE_OS_LOGONUSER' , 'OTHER') , ('PREEMPTIVE_OS_LOOKUPACCOUNTSID' , 'OTHER') ,
+('PREEMPTIVE_OS_MESSAGEQUEUEOPS' , 'OTHER') , ('PREEMPTIVE_OS_MOVEFILE' , 'OTHER') ,
+('PREEMPTIVE_OS_NETGROUPGETUSERS' , 'OTHER') , ('PREEMPTIVE_OS_NETLOCALGROUPGETMEMBERS' , 'OTHER') ,
+('PREEMPTIVE_OS_NETUSERGETGROUPS' , 'OTHER') , ('PREEMPTIVE_OS_NETUSERGETLOCALGROUPS' , 'OTHER') ,
+('PREEMPTIVE_OS_NETUSERMODALSGET' , 'OTHER') , ('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICY' , 'OTHER') ,
+('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICYFREE' , 'OTHER') , ('PREEMPTIVE_OS_OPENDIRECTORY' , 'OTHER') ,
+('PREEMPTIVE_OS_PIPEOPS' , 'OTHER') , ('PREEMPTIVE_OS_PROCESSOPS' , 'OTHER') ,
+('PREEMPTIVE_OS_QUERYREGISTRY' , 'OTHER') , ('PREEMPTIVE_OS_QUERYSECURITYCONTEXTTOKEN' , 'OTHER') ,
+('PREEMPTIVE_OS_REMOVEDIRECTORY' , 'OTHER') , ('PREEMPTIVE_OS_REPORTEVENT' , 'OTHER') ,
+('PREEMPTIVE_OS_REVERTTOSELF' , 'OTHER') , ('PREEMPTIVE_OS_RSFXDEVICEOPS' , 'OTHER') ,
+('PREEMPTIVE_OS_SECURITYOPS' , 'OTHER') , ('PREEMPTIVE_OS_SERVICEOPS' , 'OTHER') ,
+('PREEMPTIVE_OS_SETENDOFFILE' , 'OTHER') , ('PREEMPTIVE_OS_SETFILEPOINTER' , 'OTHER') ,
+('PREEMPTIVE_OS_SETFILEVALIDDATA' , 'OTHER') , ('PREEMPTIVE_OS_SETNAMEDSECURITYINFO' , 'OTHER') ,
+('PREEMPTIVE_OS_SQLCLROPS' , 'OTHER') , ('PREEMPTIVE_OS_SQMLAUNCH' , 'OTHER') ,
+('PREEMPTIVE_OS_VERIFYSIGNATURE' , 'OTHER') , ('PREEMPTIVE_OS_VSSOPS' , 'OTHER') ,
+('PREEMPTIVE_OS_WAITFORSINGLEOBJECT' , 'OTHER') , ('PREEMPTIVE_OS_WINSOCKOPS' , 'OTHER') ,
+('PREEMPTIVE_OS_WRITEFILE' , 'OTHER') , ('PREEMPTIVE_OS_WRITEFILEGATHER' , 'OTHER') ,
+('PREEMPTIVE_OS_WSASETLASTERROR' , 'OTHER') , ('PREEMPTIVE_REENLIST' , 'OTHER') , ('PREEMPTIVE_RESIZELOG'
+, 'OTHER') , ('PREEMPTIVE_ROLLFORWARDREDO' , 'OTHER') , ('PREEMPTIVE_ROLLFORWARDUNDO' , 'OTHER') ,
+('PREEMPTIVE_SB_STOPENDPOINT' , 'OTHER') , ('PREEMPTIVE_SERVER_STARTUP' , 'OTHER') ,
+('PREEMPTIVE_SETRMINFO' , 'OTHER') , ('PREEMPTIVE_SHAREDMEM_GETDATA' , 'OTHER') , ('PREEMPTIVE_SNIOPEN' ,
+'OTHER') , ('PREEMPTIVE_SOSHOST' , 'OTHER') , ('PREEMPTIVE_SOSTESTING' , 'OTHER') , ('PREEMPTIVE_STARTRM'
+, 'OTHER') , ('PREEMPTIVE_STREAMFCB_CHECKPOINT' , 'OTHER') , ('PREEMPTIVE_STREAMFCB_RECOVER' , 'OTHER') ,
+('PREEMPTIVE_STRESSDRIVER' , 'OTHER') , ('PREEMPTIVE_TESTING' , 'OTHER') , ('PREEMPTIVE_TRANSIMPORT' ,
+'OTHER') , ('PREEMPTIVE_UNMARSHALPROPAGATIONTOKEN' , 'OTHER') , ('PREEMPTIVE_VSS_CREATESNAPSHOT' ,
+'OTHER') , ('PREEMPTIVE_VSS_CREATEVOLUMESNAPSHOT' , 'OTHER') , ('PREEMPTIVE_XE_CALLBACKEXECUTE' , 'OTHER')
+, ('PREEMPTIVE_XE_DISPATCHER' , 'OTHER') , ('PREEMPTIVE_XE_ENGINEINIT' , 'OTHER') ,
+('PREEMPTIVE_XE_GETTARGETSTATE' , 'OTHER') , ('PREEMPTIVE_XE_SESSIONCOMMIT' , 'OTHER') ,
+('PREEMPTIVE_XE_TARGETFINALIZE' , 'OTHER') , ('PREEMPTIVE_XE_TARGETINIT' , 'OTHER') ,
+('PREEMPTIVE_XE_TIMERRUN' , 'OTHER') , ('PREEMPTIVE_XETESTING' , 'OTHER') , ('PREEMPTIVE_XXX' , 'OTHER') ,
+('PRINT_ROLLBACK_PROGRESS' , 'OTHER') , ('QNMANAGER_ACQUIRE' , 'OTHER') , ('QPJOB_KILL' , 'OTHER') ,
+('QPJOB_WAITFOR_ABORT' , 'OTHER') , ('QRY_MEM_GRANT_INFO_MUTEX' , 'OTHER') , ('QUERY_ERRHDL_SERVICE_DONE'
+, 'OTHER') , ('QUERY_EXECUTION_INDEX_SORT_EVENT_OPEN' , 'OTHER') , ('QUERY_NOTIFICATION_MGR_MUTEX' ,
+'OTHER') , ('QUERY_NOTIFICATION_SUBSCRIPTION_MUTEX' , 'OTHER') , ('QUERY_NOTIFICATION_TABLE_MGR_MUTEX' ,
+'OTHER') , ('QUERY_NOTIFICATION_UNITTEST_MUTEX' , 'OTHER') , ('QUERY_OPTIMIZER_PRINT_MUTEX' , 'OTHER') ,
+('QUERY_TRACEOUT' , 'OTHER') , ('QUERY_WAIT_ERRHDL_SERVICE' , 'OTHER') , ('RECOVER_CHANGEDB' , 'OTHER') ,
+('REPL_CACHE_ACCESS' , 'REPLICATION') , ('REPL_HISTORYCACHE_ACCESS' , 'OTHER') , ('REPL_SCHEMA_ACCESS' ,
+'OTHER') , ('REPL_TRANHASHTABLE_ACCESS' , 'OTHER') , ('REPLICA_WRITES' , 'OTHER') ,
+('REQUEST_DISPENSER_PAUSE' , 'BACKUP') , ('REQUEST_FOR_DEADLOCK_SEARCH' , 'LOCK') , ('RESMGR_THROTTLED' ,
+'OTHER') , ('RESOURCE_QUERY_SEMAPHORE_COMPILE' , 'QUERY') , ('RESOURCE_QUEUE' , 'OTHER') ,
+('RESOURCE_SEMAPHORE' , 'OTHER') , ('RESOURCE_SEMAPHORE_MUTEX' , 'MEMORY') ,
+('RESOURCE_SEMAPHORE_QUERY_COMPILE' , 'MEMORY') , ('RESOURCE_SEMAPHORE_SMALL_QUERY' , 'MEMORY') ,
+('RG_RECONFIG' , 'OTHER') , ('SEC_DROP_TEMP_KEY' , 'SECURITY') , ('SECURITY_MUTEX' , 'OTHER') ,
+('SEQUENTIAL_GUID' , 'OTHER') , ('SERVER_IDLE_CHECK' , 'OTHER') , ('SHUTDOWN' , 'OTHER') ,
+('SLEEP_BPOOL_FLUSH' , 'OTHER') , ('SLEEP_DBSTARTUP' , 'OTHER') , ('SLEEP_DCOMSTARTUP' , 'OTHER') ,
+('SLEEP_MSDBSTARTUP' , 'OTHER') , ('SLEEP_SYSTEMTASK' , 'OTHER') , ('SLEEP_TASK' , 'OTHER') ,
+('SLEEP_TEMPDBSTARTUP' , 'OTHER') , ('SNI_CRITICAL_SECTION' , 'OTHER') , ('SNI_HTTP_ACCEPT' , 'OTHER') ,
+('SNI_HTTP_WAITFOR_0_DISCON' , 'OTHER') , ('SNI_LISTENER_ACCESS' , 'OTHER') , ('SNI_TASK_COMPLETION' ,
+'OTHER') , ('SOAP_READ' , 'OTHER') , ('SOAP_WRITE' , 'OTHER') , ('SOS_CALLBACK_REMOVAL' , 'OTHER') ,
+('SOS_DISPATCHER_MUTEX' , 'OTHER') , ('SOS_LOCALALLOCATORLIST' , 'OTHER') , ('SOS_MEMORY_USAGE_ADJUSTMENT'
+, 'OTHER') , ('SOS_OBJECT_STORE_DESTROY_MUTEX' , 'OTHER') , ('SOS_PROCESS_AFFINITY_MUTEX' , 'OTHER') ,
+('SOS_RESERVEDMEMBLOCKLIST' , 'OTHER') , ('SOS_SCHEDULER_YIELD' , 'SQLOS') , ('SOS_SMALL_PAGE_ALLOC' ,
+'OTHER') , ('SOS_STACKSTORE_INIT_MUTEX' , 'OTHER') , ('SOS_SYNC_TASK_ENQUEUE_EVENT' , 'OTHER') ,
+('SOS_VIRTUALMEMORY_LOW' , 'OTHER') , ('SOSHOST_EVENT' , 'CLR') , ('SOSHOST_OTHER' , 'CLR') ,
+('SOSHOST_MUTEX' , 'CLR') , ('SOSHOST_ROWLOCK' , 'CLR') , ('SOSHOST_RWLOCK' , 'CLR') ,
+('SOSHOST_SEMAPHORE' , 'CLR') , ('SOSHOST_SLEEP' , 'CLR') , ('SOSHOST_TRACELOCK' , 'CLR') ,
+('SOSHOST_WAITFORDONE' , 'CLR') , ('SQLCLR_APPDOMAIN' , 'CLR') , ('SQLCLR_ASSEMBLY' , 'CLR') ,
+('SQLCLR_DEADLOCK_DETECTION' , 'CLR') , ('SQLCLR_QUANTUM_PUNISHMENT' , 'CLR') , ('SQLSORT_NORMMUTEX' ,
+'OTHER') , ('SQLSORT_SORTMUTEX' , 'OTHER') , ('SQLTRACE_BUFFER_FLUSH ' , 'TRACE') , ('SQLTRACE_LOCK' ,
+'OTHER') , ('SQLTRACE_SHUTDOWN' , 'OTHER') , ('SQLTRACE_WAIT_ENTRIES' , 'OTHER') , ('SRVPROC_SHUTDOWN' ,
+'OTHER') , ('TEMPOBJ' , 'OTHER') , ('THREADPOOL' , 'SQLOS') , ('TIMEPRIV_TIMEPERIOD' , 'OTHER') ,
+('TRACE_EVTNOTIF' , 'OTHER') , ('TRACEWRITE' , 'OTHER') , ('TRAN_MARKLATCH_DT' , 'TRAN_MARKLATCH') ,
+('TRAN_MARKLATCH_EX' , 'TRAN_MARKLATCH') , ('TRAN_MARKLATCH_KP' , 'TRAN_MARKLATCH') , ('TRAN_MARKLATCH_NL'
+, 'TRAN_MARKLATCH') , ('TRAN_MARKLATCH_SH' , 'TRAN_MARKLATCH') , ('TRAN_MARKLATCH_UP' , 'TRAN_MARKLATCH')
+, ('OTHER_MUTEX' , 'OTHER') , ('UTIL_PAGE_ALLOC' , 'OTHER') , ('VIA_ACCEPT' , 'OTHER') ,
+('VIEW_DEFINITION_MUTEX' , 'OTHER') , ('WAIT_FOR_RESULTS' , 'OTHER') , ('WAITFOR' , 'WAITFOR') ,
+('WAITFOR_TASKSHUTDOWN' , 'OTHER') , ('WAITSTAT_MUTEX' , 'OTHER') , ('WCC' , 'OTHER') , ('WORKTBL_DROP' ,
+'OTHER') , ('WRITE_COMPLETION' , 'OTHER') , ('WRITELOG' , 'I/O') , ('XACT_OWN_OTHER' , 'OTHER') ,
+('XACT_RECLAIM_SESSION' , 'OTHER') , ('XACTLOCKINFO' , 'OTHER') , ('XACTWORKSPACE_MUTEX' , 'OTHER') ,
+('XE_BUFFERMGR_ALLPROCESSED_EVENT' , 'XEVENT') , ('XE_BUFFERMGR_FREEBUF_EVENT' , 'XEVENT') ,
+('XE_DISPATCHER_CONFIG_SESSION_LIST' , 'XEVENT') , ('XE_DISPATCHER_JOIN' , 'XEVENT') ,
+('XE_DISPATCHER_WAIT' , 'XEVENT') , ('XE_MODULEMGR_SYNC' , 'XEVENT') , ('XE_OLS_LOCK' , 'XEVENT') ,
+('XE_PACKAGE_LOCK_BACKOFF' , 'XEVENT') , ('XE_SERVICES_EVENTMANUAL' , 'XEVENT') , ('XE_SERVICES_MUTEX' ,
+'XEVENT') , ('XE_SERVICES_RWLOCK' , 'XEVENT') , ('XE_SESSION_CREATE_SYNC' , 'XEVENT') ,
+('XE_SESSION_FLUSH' , 'XEVENT') , ('XE_SESSION_SYNC' , 'XEVENT') , ('XE_STM_CREATE' , 'XEVENT') ,
+('XE_TIMER_EVENT' , 'XEVENT') , ('XE_TIMER_MUTEX' , 'XEVENT')
+, ('XE_TIMER_TASK_DONE' , 'XEVENT');
+
+
+INSERT @w1 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
+SELECT
+ WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
+, WaitTimeInMs = SUM(wait_time_ms)
+, WaitTaskCount = SUM(waiting_tasks_count)
+, CollectionDate = GETDATE()
+FROM sys.dm_os_wait_stats
+WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
+(
+ SELECT WaitType FROM @w3
+)
+AND [waiting_tasks_count] > 0
+GROUP BY wait_type
+
+WAITFOR DELAY @delayInterval;
+
+INSERT @w2 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
+SELECT
+ WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
+, WaitTimeInMs = SUM(wait_time_ms)
+, WaitTaskCount = SUM(waiting_tasks_count)
+, CollectionDate = GETDATE()
+FROM sys.dm_os_wait_stats
+WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
+(
+ SELECT WaitType FROM @w3
+)
+AND [waiting_tasks_count] > 0
+GROUP BY wait_type;
+
+
+INSERT @w5 (WaitCategory, WaitTimeInMs, WaitTaskCount)
+SELECT WaitCategory
+, WaitTimeInMs = SUM(WaitTimeInMs)
+, WaitTaskCount = SUM(WaitTaskCount)
+FROM
+(
+SELECT
+ WaitCategory = ISNULL(T4.WaitCategory, 'OTHER')
+, WaitTimeInMs = (T2.WaitTimeInMs - T1.WaitTimeInMs)
+, WaitTaskCount = (T2.WaitTaskCount - T1.WaitTaskCount)
+--, WaitTimeInMsPerSec = ((T2.WaitTimeInMs - T1.WaitTimeInMs) / CAST(DATEDIFF(SECOND, T1.CollectionDate, T2.CollectionDate) as float))
+FROM @w1 T1
+INNER JOIN @w2 T2 ON T2.WaitType = T1.WaitType
+LEFT JOIN @w4 T4 ON T4.WaitType = T1.WaitType
+WHERE T2.WaitTaskCount - T1.WaitTaskCount > 0
+) as G
+GROUP BY G.WaitCategory;
+
+
+
+SELECT
+---- measurement
+ measurement = 'Wait time (ms)'
+---- tags
+, servername= REPLACE(@@SERVERNAME, '\', ':')
+, type = 'Wait stats'
+---- values
+, [I/O] = SUM([I/O])
+, [Latch] = SUM([LATCH])
+, [Lock] = SUM([LOCK])
+, [Network] = SUM([NETWORK])
+, [Service broker] = SUM([SERVICE BROKER])
+, [Memory] = SUM([MEMORY])
+, [Buffer] = SUM([BUFFER])
+, [CLR] = SUM([CLR])
+, [SQLOS] = SUM([SQLOS])
+, [XEvent] = SUM([XEVENT])
+, [Other] = SUM([OTHER])
+, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
+FROM
+(
+SELECT
+ [I/O] = ISNULL([I/O] , 0)
+, [MEMORY] = ISNULL([MEMORY] , 0)
+, [BUFFER] = ISNULL([BUFFER] , 0)
+, [LATCH] = ISNULL([LATCH] , 0)
+, [LOCK] = ISNULL([LOCK] , 0)
+, [NETWORK] = ISNULL([NETWORK] , 0)
+, [SERVICE BROKER] = ISNULL([SERVICE BROKER] , 0)
+, [CLR] = ISNULL([CLR] , 0)
+, [XEVENT] = ISNULL([XEVENT] , 0)
+, [SQLOS] = ISNULL([SQLOS] , 0)
+, [OTHER] = ISNULL([OTHER] , 0)
+FROM @w5 as P
+PIVOT
+(
+ SUM(WaitTimeInMs)
+ FOR WaitCategory IN ([I/O], [LATCH], [LOCK], [NETWORK], [SERVICE BROKER], [MEMORY], [BUFFER], [CLR], [XEVENT], [SQLOS], [OTHER])
+) AS PivotTable
+) as T
+
+UNION ALL
+
+SELECT
+---- measurement
+ measurement = 'Wait tasks'
+---- tags
+, server_name= REPLACE(@@SERVERNAME, '\', ':')
+, type = 'Wait stats'
+---- values
+, [I/O] = SUM([I/O])
+, [Latch] = SUM([LATCH])
+, [Lock] = SUM([LOCK])
+, [Network] = SUM([NETWORK])
+, [Service broker] = SUM([SERVICE BROKER])
+, [Memory] = SUM([MEMORY])
+, [Buffer] = SUM([BUFFER])
+, [CLR] = SUM([CLR])
+, [SQLOS] = SUM([SQLOS])
+, [XEvent] = SUM([XEVENT])
+, [Other] = SUM([OTHER])
+, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
+FROM
+(
+SELECT
+ [I/O] = ISNULL([I/O] , 0)
+, [MEMORY] = ISNULL([MEMORY] , 0)
+, [BUFFER] = ISNULL([BUFFER] , 0)
+, [LATCH] = ISNULL([LATCH] , 0)
+, [LOCK] = ISNULL([LOCK] , 0)
+, [NETWORK] = ISNULL([NETWORK] , 0)
+, [SERVICE BROKER] = ISNULL([SERVICE BROKER] , 0)
+, [CLR] = ISNULL([CLR] , 0)
+, [XEVENT] = ISNULL([XEVENT] , 0)
+, [SQLOS] = ISNULL([SQLOS] , 0)
+, [OTHER] = ISNULL([OTHER] , 0)
+FROM @w5 as P
+PIVOT
+(
+ SUM(WaitTaskCount)
+ FOR WaitCategory IN ([I/O], [LATCH], [LOCK], [NETWORK], [SERVICE BROKER], [MEMORY], [BUFFER], [CLR], [XEVENT], [SQLOS], [OTHER])
+) AS PivotTable
+) as T;
+`
+
+const sqlVolumeSpace string = `SET DEADLOCK_PRIORITY -10;
+SET NOCOUNT ON;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+IF OBJECT_ID('tempdb..#volumestats') IS NOT NULL
+ DROP TABLE #volumestats;
+SELECT DISTINCT
+ volume = REPLACE(vs.volume_mount_point, '\', '')
+ + CASE WHEN LEN(vs.logical_volume_name) > 0
+ THEN ' (' + vs.logical_volume_name + ')'
+ ELSE '' END
+, total_bytes = vs.total_bytes
+, available_bytes = vs.available_bytes
+, used_bytes = vs.total_bytes - vs.available_bytes
+, used_percent = 100 * CAST(ROUND((vs.total_bytes - vs.available_bytes) * 1. / vs.total_bytes, 2) as decimal(5,2))
+INTO #volumestats
+FROM sys.master_files AS f
+CROSS APPLY sys.dm_os_volume_stats(f.database_id, f.file_id) vs
+
+DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
+DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX)
+
+SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(volume)
+FROM (SELECT DISTINCT volume FROM #volumestats) AS bl
+
+--Prepare the PIVOT query using the dynamic
+SET @DynamicPivotQuery = N'
+SELECT measurement = ''Volume total space (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''OS Volume space''
+, ' + @ColumnName + ' FROM
+(
+SELECT volume, total_bytes
+FROM #volumestats
+) as V
+PIVOT(SUM(total_bytes) FOR volume IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Volume available space (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''OS Volume space''
+, ' + @ColumnName + ' FROM
+(
+SELECT volume, available_bytes
+FROM #volumestats
+) as V
+PIVOT(SUM(available_bytes) FOR volume IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Volume used space (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''OS Volume space''
+, ' + @ColumnName + ' FROM
+(
+SELECT volume, used_bytes
+FROM #volumestats
+) as V
+PIVOT(SUM(used_bytes) FOR volume IN (' + @ColumnName + ')) AS PVTTable
+
+UNION ALL
+
+SELECT measurement = ''Volume used space (%)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''OS Volume space''
+, ' + @ColumnName + ' FROM
+(
+SELECT volume, used_percent
+FROM #volumestats
+) as V
+PIVOT(SUM(used_percent) FOR volume IN (' + @ColumnName + ')) AS PVTTable'
+
+EXEC sp_executesql @DynamicPivotQuery;
+`
diff --git a/plugins/inputs/sqlserver/sqlqueriesV2.go b/plugins/inputs/sqlserver/sqlqueriesV2.go
new file mode 100644
index 0000000000000..5cc8d4953689a
--- /dev/null
+++ b/plugins/inputs/sqlserver/sqlqueriesV2.go
@@ -0,0 +1,1364 @@
+package sqlserver
+
+import (
+ _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization
+)
+
+// Queries - V2
+// Thanks Bob Ward (http://aka.ms/bobwardms)
+// and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs)
+// for putting most of the memory clerk definitions online!
+/*
+The SQL scripts use a series of IF and CASE statemens to choose the correct query based on edition and version of SQL Server, below the meaning of the numbers:
+EngineEdition:
+1 = Personal or Desktop Engine (Not available in SQL Server 2005 (9.x) and later versions.)
+2 = Standard (This is returned for Standard, Web, and Business Intelligence.)
+3 = Enterprise (This is returned for Evaluation, Developer, and Enterprise editions.)
+4 = Express (This is returned for Express, Express with Tools, and Express with Advanced Services)
+5 = SQL Database
+6 = Microsoft Azure Synapse Analytics (formerly SQL Data Warehouse)
+8 = Managed Instance
+
+ProductVersion:
+see https://sqlserverbuilds.blogspot.com/ for all the details about the version number of SQL Server
+*/
+
+const sqlMemoryClerkV2 = `
+SET DEADLOCK_PRIORITY -10;
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+ ,@Columns AS nvarchar(max) = ''
+
+IF @MajorMinorVersion >= 1100
+ SET @Columns += N'mc.[pages_kb]';
+ELSE
+ SET @Columns += N'mc.[single_pages_kb] + mc.[multi_pages_kb]';
+
+SET @SqlStatement = N'
+SELECT
+ ''sqlserver_memory_clerks'' AS [measurement]
+ ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance]
+ ,DB_NAME() AS [database_name]
+ ,ISNULL(clerk_names.[name],mc.[type]) AS [clerk_type]
+ ,SUM(' + @Columns + N') AS [size_kb]
+FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK)
+LEFT OUTER JOIN ( VALUES
+ (''CACHESTORE_BROKERDSH'',''Service Broker Dialog Security Header Cache'')
+ ,(''CACHESTORE_BROKERKEK'',''Service Broker Key Exchange Key Cache'')
+ ,(''CACHESTORE_BROKERREADONLY'',''Service Broker (Read-Only)'')
+ ,(''CACHESTORE_BROKERRSB'',''Service Broker Null Remote Service Binding Cache'')
+ ,(''CACHESTORE_BROKERTBLACS'',''Broker dormant rowsets'')
+ ,(''CACHESTORE_BROKERTO'',''Service Broker Transmission Object Cache'')
+ ,(''CACHESTORE_BROKERUSERCERTLOOKUP'',''Service Broker user certificates lookup result cache'')
+ ,(''CACHESTORE_CLRPROC'',''CLR Procedure Cache'')
+ ,(''CACHESTORE_CLRUDTINFO'',''CLR UDT Info'')
+ ,(''CACHESTORE_COLUMNSTOREOBJECTPOOL'',''Column Store Object Pool'')
+ ,(''CACHESTORE_CONVPRI'',''Conversation Priority Cache'')
+ ,(''CACHESTORE_EVENTS'',''Event Notification Cache'')
+ ,(''CACHESTORE_FULLTEXTSTOPLIST'',''Full Text Stoplist Cache'')
+ ,(''CACHESTORE_NOTIF'',''Notification Store'')
+ ,(''CACHESTORE_OBJCP'',''Object Plans'')
+ ,(''CACHESTORE_PHDR'',''Bound Trees'')
+ ,(''CACHESTORE_SEARCHPROPERTYLIST'',''Search Property List Cache'')
+ ,(''CACHESTORE_SEHOBTCOLUMNATTRIBUTE'',''SE Shared Column Metadata Cache'')
+ ,(''CACHESTORE_SQLCP'',''SQL Plans'')
+ ,(''CACHESTORE_STACKFRAMES'',''SOS_StackFramesStore'')
+ ,(''CACHESTORE_SYSTEMROWSET'',''System Rowset Store'')
+ ,(''CACHESTORE_TEMPTABLES'',''Temporary Tables & Table Variables'')
+ ,(''CACHESTORE_VIEWDEFINITIONS'',''View Definition Cache'')
+ ,(''CACHESTORE_XML_SELECTIVE_DG'',''XML DB Cache (Selective)'')
+ ,(''CACHESTORE_XMLDBATTRIBUTE'',''XML DB Cache (Attribute)'')
+ ,(''CACHESTORE_XMLDBELEMENT'',''XML DB Cache (Element)'')
+ ,(''CACHESTORE_XMLDBTYPE'',''XML DB Cache (Type)'')
+ ,(''CACHESTORE_XPROC'',''Extended Stored Procedures'')
+ ,(''MEMORYCLERK_FILETABLE'',''Memory Clerk (File Table)'')
+ ,(''MEMORYCLERK_FSCHUNKER'',''Memory Clerk (FS Chunker)'')
+ ,(''MEMORYCLERK_FULLTEXT'',''Full Text'')
+ ,(''MEMORYCLERK_FULLTEXT_SHMEM'',''Full-text IG'')
+ ,(''MEMORYCLERK_HADR'',''HADR'')
+ ,(''MEMORYCLERK_HOST'',''Host'')
+ ,(''MEMORYCLERK_LANGSVC'',''Language Service'')
+ ,(''MEMORYCLERK_LWC'',''Light Weight Cache'')
+ ,(''MEMORYCLERK_QSRANGEPREFETCH'',''QS Range Prefetch'')
+ ,(''MEMORYCLERK_SERIALIZATION'',''Serialization'')
+ ,(''MEMORYCLERK_SNI'',''SNI'')
+ ,(''MEMORYCLERK_SOSMEMMANAGER'',''SOS Memory Manager'')
+ ,(''MEMORYCLERK_SOSNODE'',''SOS Node'')
+ ,(''MEMORYCLERK_SOSOS'',''SOS Memory Clerk'')
+ ,(''MEMORYCLERK_SQLBUFFERPOOL'',''Buffer Pool'')
+ ,(''MEMORYCLERK_SQLCLR'',''CLR'')
+ ,(''MEMORYCLERK_SQLCLRASSEMBLY'',''CLR Assembly'')
+ ,(''MEMORYCLERK_SQLCONNECTIONPOOL'',''Connection Pool'')
+ ,(''MEMORYCLERK_SQLGENERAL'',''General'')
+ ,(''MEMORYCLERK_SQLHTTP'',''HTTP'')
+ ,(''MEMORYCLERK_SQLLOGPOOL'',''Log Pool'')
+ ,(''MEMORYCLERK_SQLOPTIMIZER'',''SQL Optimizer'')
+ ,(''MEMORYCLERK_SQLQERESERVATIONS'',''SQL Reservations'')
+ ,(''MEMORYCLERK_SQLQUERYCOMPILE'',''SQL Query Compile'')
+ ,(''MEMORYCLERK_SQLQUERYEXEC'',''SQL Query Exec'')
+ ,(''MEMORYCLERK_SQLQUERYPLAN'',''SQL Query Plan'')
+ ,(''MEMORYCLERK_SQLSERVICEBROKER'',''SQL Service Broker'')
+ ,(''MEMORYCLERK_SQLSERVICEBROKERTRANSPORT'',''Unified Communication Stack'')
+ ,(''MEMORYCLERK_SQLSOAP'',''SQL SOAP'')
+ ,(''MEMORYCLERK_SQLSOAPSESSIONSTORE'',''SQL SOAP (Session Store)'')
+ ,(''MEMORYCLERK_SQLSTORENG'',''SQL Storage Engine'')
+ ,(''MEMORYCLERK_SQLUTILITIES'',''SQL Utilities'')
+ ,(''MEMORYCLERK_SQLXML'',''SQL XML'')
+ ,(''MEMORYCLERK_SQLXP'',''SQL XP'')
+ ,(''MEMORYCLERK_TRACE_EVTNOTIF'',''Trace Event Notification'')
+ ,(''MEMORYCLERK_XE'',''XE Engine'')
+ ,(''MEMORYCLERK_XE_BUFFER'',''XE Buffer'')
+ ,(''MEMORYCLERK_XTP'',''In-Memory OLTP'')
+ ,(''OBJECTSTORE_LBSS'',''Lbss Cache (Object Store)'')
+ ,(''OBJECTSTORE_LOCK_MANAGER'',''Lock Manager (Object Store)'')
+ ,(''OBJECTSTORE_SECAUDIT_EVENT_BUFFER'',''Audit Event Buffer (Object Store)'')
+ ,(''OBJECTSTORE_SERVICE_BROKER'',''Service Broker (Object Store)'')
+ ,(''OBJECTSTORE_SNI_PACKET'',''SNI Packet (Object Store)'')
+ ,(''OBJECTSTORE_XACT_CACHE'',''Transactions Cache (Object Store)'')
+ ,(''USERSTORE_DBMETADATA'',''DB Metadata (User Store)'')
+ ,(''USERSTORE_OBJPERM'',''Object Permissions (User Store)'')
+ ,(''USERSTORE_SCHEMAMGR'',''Schema Manager (User Store)'')
+ ,(''USERSTORE_SXC'',''SXC (User Store)'')
+ ,(''USERSTORE_TOKENPERM'',''Token Permissions (User Store)'')
+ ,(''USERSTORE_QDSSTMT'',''QDS Statement Buffer (Pre-persist)'')
+ ,(''CACHESTORE_QDSRUNTIMESTATS'',''QDS Runtime Stats (Pre-persist)'')
+ ,(''CACHESTORE_QDSCONTEXTSETTINGS'',''QDS Unique Context Settings'')
+ ,(''MEMORYCLERK_QUERYDISKSTORE'',''QDS General'')
+ ,(''MEMORYCLERK_QUERYDISKSTORE_HASHMAP'',''QDS Query/Plan Hash Table'')
+) AS clerk_names([system_name],[name])
+ ON mc.[type] = clerk_names.[system_name]
+GROUP BY
+ ISNULL(clerk_names.[name], mc.[type])
+HAVING
+ SUM(' + @Columns + N') >= 1024
+OPTION(RECOMPILE);
+'
+
+EXEC(@SqlStatement)
+`
+
+// Conditional check based on Azure SQL DB OR On-prem SQL Server
+// EngineEdition=5 is Azure SQL DB
+const sqlDatabaseIOV2 = `
+SET DEADLOCK_PRIORITY -10;
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+
+IF @EngineEdition = 5
+BEGIN
+ SET @SqlStatement = '
+ SELECT
+ ''sqlserver_database_io'' As [measurement]
+ ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
+ ,DB_NAME() as database_name
+ ,vfs.database_id -- /*needed as tempdb is different for each Azure SQL DB as grouping has to be by logical server + db_name + database_id*/
+ ,vfs.file_id
+ ,vfs.io_stall_read_ms AS read_latency_ms
+ ,vfs.num_of_reads AS reads
+ ,vfs.num_of_bytes_read AS read_bytes
+ ,vfs.io_stall_write_ms AS write_latency_ms
+ ,vfs.num_of_writes AS writes
+ ,vfs.num_of_bytes_written AS write_bytes
+ ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
+ ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]
+ ,CASE
+ WHEN (vfs.database_id = 0) THEN ''RBPEX''
+ ELSE b.logical_filename
+ END as logical_filename
+ ,CASE
+ WHEN (vfs.database_id = 0) THEN ''RBPEX''
+ ELSE b.physical_filename
+ END as physical_filename
+ ,CASE WHEN vfs.file_id = 2 THEN ''LOG'' ELSE ''DATA'' END AS file_type
+ ,ISNULL(size,0)/128 AS current_size_mb
+ ,ISNULL(FILEPROPERTY(b.logical_filename,''SpaceUsed'')/128,0) as space_used_mb
+ FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs
+ -- needed to get Tempdb file names on Azure SQL DB so you can join appropriately. Without this had a bug where join was only on file_id
+ LEFT OUTER join
+ (
+ SELECT DB_ID() as database_id, file_id, logical_filename=name COLLATE SQL_Latin1_General_CP1_CI_AS
+ , physical_filename = physical_name COLLATE SQL_Latin1_General_CP1_CI_AS, size from sys.database_files
+ where type <> 2
+ UNION ALL
+ SELECT 2 as database_id, file_id, logical_filename = name , physical_filename = physical_name, size
+ from tempdb.sys.database_files
+ ) b ON b.database_id = vfs.database_id and b.file_id = vfs.file_id
+ where vfs.database_id IN (DB_ID(),0,2)
+ '
+ EXEC sp_executesql @SqlStatement
+
+END
+ELSE IF @EngineEdition IN (2,3,4) /*Standard,Enterprise,Express*/
+BEGIN
+
+ DECLARE @MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int) * 100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+
+ DECLARE @Columns as nvarchar(max) = ''
+ DECLARE @Tables as nvarchar(max) = ''
+
+ IF @MajorMinorVersion >= 1050 BEGIN
+ /*in [volume_mount_point] any trailing "\" char will be removed by telegraf */
+ SET @Columns += N',[volume_mount_point]'
+ SET @Tables += N'CROSS APPLY sys.dm_os_volume_stats(vfs.[database_id], vfs.[file_id]) AS vs'
+ END
+
+ IF @MajorMinorVersion > 1100 BEGIN
+ SET @Columns += N'
+,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
+,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]'
+ END
+
+ SET @SqlStatement = N'
+ SELECT
+ ''sqlserver_database_io'' AS [measurement]
+ ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
+ ,DB_NAME(vfs.[database_id]) AS [database_name]
+ ,COALESCE(mf.[physical_name],''RBPEX'') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension
+ ,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension
+ ,mf.[type_desc] AS [file_type]
+ ,vfs.[io_stall_read_ms] AS [read_latency_ms]
+ ,vfs.[num_of_reads] AS [reads]
+ ,vfs.[num_of_bytes_read] AS [read_bytes]
+ ,vfs.[io_stall_write_ms] AS [write_latency_ms]
+ ,vfs.[num_of_writes] AS [writes]
+ ,vfs.[num_of_bytes_written] AS [write_bytes]'
+ + @Columns + N'
+ FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs
+ INNER JOIN sys.master_files AS mf WITH (NOLOCK)
+ ON vfs.[database_id] = mf.[database_id] AND vfs.[file_id] = mf.[file_id]
+ '
+ + @Tables;
+
+ EXEC sp_executesql @SqlStatement
+
+END
+`
+
+// Conditional check based on Azure SQL DB, Azure SQL Managed instance OR On-prem SQL Server
+// EngineEdition=5 is Azure SQL DB, EngineEdition=8 is Managed instance
+
+const sqlServerPropertiesV2 = `
+SET DEADLOCK_PRIORITY -10;
+DECLARE
+ @SqlStatement AS nvarchar(max) = ''
+ ,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+
+IF @EngineEdition = 8 /*Managed Instance*/
+ SET @SqlStatement = 'SELECT TOP 1 ''sqlserver_server_properties'' AS [measurement],
+ REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance],
+ DB_NAME() as [database_name],
+ virtual_core_count AS cpu_count,
+ (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory,
+ sku,
+ @EngineEdition AS engine_edition,
+ hardware_generation AS hardware_type,
+ reserved_storage_mb AS total_storage_mb,
+ (reserved_storage_mb - storage_space_used_mb) AS available_storage_mb,
+ (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime,
+ SERVERPROPERTY(''ProductVersion'') AS sql_version,
+ db_online,
+ db_restoring,
+ db_recovering,
+ db_recoveryPending,
+ db_suspect
+ FROM sys.server_resource_stats
+ CROSS APPLY
+ (SELECT SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online,
+ SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring,
+ SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering,
+ SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending,
+ SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect,
+ SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline
+ FROM sys.databases
+ ) AS dbs
+ ORDER BY start_time DESC';
+
+IF @EngineEdition = 5 /*Azure SQL DB*/
+ SET @SqlStatement = 'SELECT ''sqlserver_server_properties'' AS [measurement],
+ REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance],
+ DB_NAME() as [database_name],
+ (SELECT count(*) FROM sys.dm_os_schedulers WHERE status = ''VISIBLE ONLINE'') AS cpu_count,
+ (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory,
+ slo.edition as sku,
+ @EngineEdition AS engine_edition,
+ slo.service_objective AS hardware_type,
+ CASE
+ WHEN slo.edition = ''Hyperscale'' then NULL
+ ELSE cast(DATABASEPROPERTYEX(DB_NAME(),''MaxSizeInBytes'') as bigint)/(1024*1024)
+ END AS total_storage_mb,
+ CASE
+ WHEN slo.edition = ''Hyperscale'' then NULL
+ ELSE
+ (cast(DATABASEPROPERTYEX(DB_NAME(),''MaxSizeInBytes'') as bigint)/(1024*1024)-
+ (select SUM(size/128 - CAST(FILEPROPERTY(name, ''SpaceUsed'') AS int)/128) FROM sys.database_files )
+ )
+ END AS available_storage_mb,
+ (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime
+ FROM sys.databases d
+ -- sys.databases.database_id may not match current DB_ID on Azure SQL DB
+ CROSS JOIN sys.database_service_objectives slo
+ WHERE d.name = DB_NAME() AND slo.database_id = DB_ID()';
+
+ELSE IF @EngineEdition IN (2,3,4) /*Standard,Enterprise,Express*/
+BEGIN
+
+ DECLARE @MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+ DECLARE @Columns AS nvarchar(MAX) = ''
+
+ IF @MajorMinorVersion >= 1050
+ SET @Columns = N',CASE [virtual_machine_type_desc]
+ WHEN ''NONE'' THEN ''PHYSICAL Machine''
+ ELSE [virtual_machine_type_desc]
+ END AS [hardware_type]';
+ ELSE /*data not available*/
+ SET @Columns = N','''' AS [hardware_type]';
+
+ SET @SqlStatement = 'SELECT ''sqlserver_server_properties'' AS [measurement],
+ REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance],
+ DB_NAME() as [database_name],
+ [cpu_count]
+ ,(SELECT [total_physical_memory_kb] FROM sys.[dm_os_sys_memory]) AS [server_memory]
+ ,CAST(SERVERPROPERTY(''Edition'') AS NVARCHAR) AS [sku]
+ ,@EngineEdition AS [engine_edition]
+ ,DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) AS [uptime]
+ ' + @Columns + ',
+ SERVERPROPERTY(''ProductVersion'') AS sql_version,
+ db_online,
+ db_restoring,
+ db_recovering,
+ db_recoveryPending,
+ db_suspect,
+ db_offline
+ FROM sys.[dm_os_sys_info]
+ CROSS APPLY
+ ( SELECT SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online,
+ SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring,
+ SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering,
+ SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending,
+ SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect,
+ SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline
+ FROM sys.databases
+ ) AS dbs';
+
+ END
+ EXEC sp_executesql @SqlStatement , N'@EngineEdition smallint', @EngineEdition = @EngineEdition;
+
+`
+
+//Recommend disabling this by default, but is useful to detect single CPU spikes/bottlenecks
+const sqlServerSchedulersV2 string = `
+SET DEADLOCK_PRIORITY - 10;
+
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+ ,@Columns AS nvarchar(MAX) = ''
+
+ IF @MajorMinorVersion >= 1300 BEGIN
+ SET @Columns += N',s.[total_cpu_usage_ms]
+ ,s.[total_scheduler_delay_ms]'
+ END
+
+SET @SqlStatement = N'
+SELECT
+ ''sqlserver_schedulers'' AS [measurement]
+ ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance]
+ ,DB_NAME() AS [database_name]
+ ,cast(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id]
+ ,cast(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id]
+ ,s.[is_online]
+ ,s.[is_idle]
+ ,s.[preemptive_switches_count]
+ ,s.[context_switches_count]
+ ,s.[current_tasks_count]
+ ,s.[runnable_tasks_count]
+ ,s.[current_workers_count]
+ ,s.[active_workers_count]
+ ,s.[work_queue_count]
+ ,s.[pending_disk_io_count]
+ ,s.[load_factor]
+ ,s.[yield_count]
+ ' + @Columns + N'
+FROM sys.dm_os_schedulers AS s'
+
+EXEC sp_executesql @SqlStatement
+`
+
+const sqlPerformanceCountersV2 string = `
+SET DEADLOCK_PRIORITY -10;
+
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+ ,@Columns AS nvarchar(MAX) = ''
+ ,@PivotColumns AS nvarchar(MAX) = ''
+
+DECLARE @PCounters TABLE
+(
+ object_name nvarchar(128),
+ counter_name nvarchar(128),
+ instance_name nvarchar(128),
+ cntr_value bigint,
+ cntr_type INT,
+ Primary Key(object_name, counter_name, instance_name)
+);
+
+SET @SqlStatement = N'SELECT DISTINCT
+ RTrim(spi.object_name) object_name,
+ RTrim(spi.counter_name) counter_name,'
+ +
+ CASE
+ WHEN @EngineEdition IN (5,8) --- needed to get actual DB Name for SQL DB/ Managed instance
+ THEN N'CASE WHEN (
+ RTRIM(spi.object_name) LIKE ''%:Databases''
+ OR RTRIM(spi.object_name) LIKE ''%:Database Replica''
+ OR RTRIM(spi.object_name) LIKE ''%:Catalog Metadata''
+ OR RTRIM(spi.object_name) LIKE ''%:Query Store''
+ OR RTRIM(spi.object_name) LIKE ''%:Columnstore''
+ OR RTRIM(spi.object_name) LIKE ''%:Advanced Analytics'')
+ AND TRY_CONVERT(uniqueidentifier, spi.instance_name)
+ IS NOT NULL -- for cloud only
+ THEN ISNULL(d.name,RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value
+ WHEN RTRIM(object_name) LIKE ''%:Availability Replica''
+ AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only
+ THEN ISNULL(d.name,RTRIM(spi.instance_name)) + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name)))
+ ELSE RTRIM(spi.instance_name)
+ END AS instance_name,'
+ ELSE 'RTRIM(spi.instance_name) as instance_name, '
+ END
+ +
+ 'CAST(spi.cntr_value AS BIGINT) AS cntr_value,
+ spi.cntr_type
+ FROM sys.dm_os_performance_counters AS spi '
++
+CASE
+ WHEN @EngineEdition IN (5,8) --- Join is ONLY for managed instance and SQL DB, not for on-prem
+ THEN CAST(N'LEFT JOIN sys.databases AS d
+ ON LEFT(spi.instance_name, 36) -- some instance_name values have an additional identifier appended after the GUID
+ = CASE WHEN -- in SQL DB standalone, physical_database_name for master is the GUID of the user database
+ d.name = ''master'' AND TRY_CONVERT(uniqueidentifier, d.physical_database_name) IS NOT NULL
+ THEN d.name
+ ELSE d.physical_database_name
+ END ' as NVARCHAR(MAX))
+ ELSE N''
+END
+
+SET @SqlStatement = @SqlStatement + CAST(N' WHERE (
+ counter_name IN (
+ ''SQL Compilations/sec'',
+ ''SQL Re-Compilations/sec'',
+ ''User Connections'',
+ ''Batch Requests/sec'',
+ ''Logouts/sec'',
+ ''Logins/sec'',
+ ''Processes blocked'',
+ ''Latch Waits/sec'',
+ ''Full Scans/sec'',
+ ''Index Searches/sec'',
+ ''Page Splits/sec'',
+ ''Page lookups/sec'',
+ ''Page reads/sec'',
+ ''Page writes/sec'',
+ ''Readahead pages/sec'',
+ ''Lazy writes/sec'',
+ ''Checkpoint pages/sec'',
+ ''Page life expectancy'',
+ ''Log File(s) Size (KB)'',
+ ''Log File(s) Used Size (KB)'',
+ ''Data File(s) Size (KB)'',
+ ''Transactions/sec'',
+ ''Write Transactions/sec'',
+ ''Active Temp Tables'',
+ ''Temp Tables Creation Rate'',
+ ''Temp Tables For Destruction'',
+ ''Free Space in tempdb (KB)'',
+ ''Version Store Size (KB)'',
+ ''Memory Grants Pending'',
+ ''Memory Grants Outstanding'',
+ ''Free list stalls/sec'',
+ ''Buffer cache hit ratio'',
+ ''Buffer cache hit ratio base'',
+ ''Backup/Restore Throughput/sec'',
+ ''Total Server Memory (KB)'',
+ ''Target Server Memory (KB)'',
+ ''Log Flushes/sec'',
+ ''Log Flush Wait Time'',
+ ''Memory broker clerk size'',
+ ''Log Bytes Flushed/sec'',
+ ''Bytes Sent to Replica/sec'',
+ ''Log Send Queue'',
+ ''Bytes Sent to Transport/sec'',
+ ''Sends to Replica/sec'',
+ ''Bytes Sent to Transport/sec'',
+ ''Sends to Transport/sec'',
+ ''Bytes Received from Replica/sec'',
+ ''Receives from Replica/sec'',
+ ''Flow Control Time (ms/sec)'',
+ ''Flow Control/sec'',
+ ''Resent Messages/sec'',
+ ''Redone Bytes/sec'',
+ ''XTP Memory Used (KB)'',
+ ''Transaction Delay'',
+ ''Log Bytes Received/sec'',
+ ''Log Apply Pending Queue'',
+ ''Redone Bytes/sec'',
+ ''Recovery Queue'',
+ ''Log Apply Ready Queue'',
+ ''CPU usage %'',
+ ''CPU usage % base'',
+ ''Queued requests'',
+ ''Requests completed/sec'',
+ ''Blocked tasks'',
+ ''Active memory grant amount (KB)'',
+ ''Disk Read Bytes/sec'',
+ ''Disk Read IO Throttled/sec'',
+ ''Disk Read IO/sec'',
+ ''Disk Write Bytes/sec'',
+ ''Disk Write IO Throttled/sec'',
+ ''Disk Write IO/sec'',
+ ''Used memory (KB)'',
+ ''Forwarded Records/sec'',
+ ''Background Writer pages/sec'',
+ ''Percent Log Used'',
+ ''Log Send Queue KB'',
+ ''Redo Queue KB'',
+ ''Mirrored Write Transactions/sec'',
+ ''Group Commit Time'',
+ ''Group Commits/Sec''
+ )
+ ) OR (
+ object_name LIKE ''%User Settable%''
+ OR object_name LIKE ''%SQL Errors%''
+ ) OR (
+ object_name LIKE ''%Batch Resp Statistics%''
+ ) OR (
+ instance_name IN (''_Total'')
+ AND counter_name IN (
+ ''Lock Timeouts/sec'',
+ ''Lock Timeouts (timeout > 0)/sec'',
+ ''Number of Deadlocks/sec'',
+ ''Lock Waits/sec'',
+ ''Latch Waits/sec''
+ )
+ )
+' as NVARCHAR(MAX))
+INSERT INTO @PCounters
+EXEC (@SqlStatement)
+
+IF @MajorMinorVersion >= 1300 BEGIN
+ SET @Columns += N',rgwg.[total_cpu_usage_preemptive_ms] AS [Preemptive CPU Usage (time)]'
+ SET @PivotColumns += N',[Preemptive CPU Usage (time)]'
+END
+
+SET @SqlStatement = N'
+SELECT
+ ''SQLServer:Workload Group Stats'' AS [object]
+ ,[counter]
+ ,[instance]
+ ,CAST(vs.[value] AS BIGINT) AS [value]
+ ,1
+FROM
+(
+ SELECT
+ rgwg.name AS instance
+ ,rgwg.total_request_count AS [Request Count]
+ ,rgwg.total_queued_request_count AS [Queued Request Count]
+ ,rgwg.total_cpu_limit_violation_count AS [CPU Limit Violation Count]
+ ,rgwg.total_cpu_usage_ms AS [CPU Usage (time)]
+ ,rgwg.total_lock_wait_count AS [Lock Wait Count]
+ ,rgwg.total_lock_wait_time_ms AS [Lock Wait Time]
+ ,rgwg.total_reduced_memgrant_count AS [Reduced Memory Grant Count]
+ ' + @Columns + N'
+ FROM sys.[dm_resource_governor_workload_groups] AS rgwg
+ INNER JOIN sys.[dm_resource_governor_resource_pools] AS rgrp
+ ON rgwg.[pool_id] = rgrp.[pool_id]
+) AS rg
+UNPIVOT (
+ value FOR counter IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], [Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] ' + @PivotColumns + N')
+) AS vs'
+
+INSERT INTO @PCounters
+EXEC( @SqlStatement )
+
+SELECT 'sqlserver_performance' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ DB_NAME() as [database_name],
+ pc.object_name AS [object],
+ pc.counter_name AS [counter],
+ CASE pc.instance_name WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.instance_name,'') END AS [instance],
+ CAST(CASE WHEN pc.cntr_type = 537003264 AND pc1.cntr_value > 0 THEN (pc.cntr_value * 1.0) / (pc1.cntr_value * 1.0) * 100 ELSE pc.cntr_value END AS float(10)) AS [value],
+ -- cast to string as TAG
+ cast(pc.cntr_type as varchar(25)) as [counter_type]
+FROM @PCounters AS pc
+ LEFT OUTER JOIN @PCounters AS pc1
+ ON (
+ pc.counter_name = REPLACE(pc1.counter_name,' base','')
+ OR pc.counter_name = REPLACE(pc1.counter_name,' base',' (ms)')
+ )
+ AND pc.object_name = pc1.object_name
+ AND pc.instance_name = pc1.instance_name
+ AND pc1.counter_name LIKE '%base'
+WHERE pc.counter_name NOT LIKE '% base'
+OPTION(RECOMPILE);
+`
+
+// Conditional check based on Azure SQL DB v/s the rest aka (Azure SQL Managed instance OR On-prem SQL Server)
+// EngineEdition=5 is Azure SQL DB
+const sqlWaitStatsCategorizedV2 string = `
+SET DEADLOCK_PRIORITY -10;
+
+IF SERVERPROPERTY('EngineEdition') != 5
+SELECT
+ 'sqlserver_waitstats' AS [measurement],
+REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+DB_NAME() as [database_name],
+ws.wait_type,
+wait_time_ms,
+wait_time_ms - signal_wait_time_ms AS [resource_wait_ms],
+signal_wait_time_ms,
+max_wait_time_ms,
+waiting_tasks_count,
+ISNULL(wc.wait_category,'OTHER') AS [wait_category]
+FROM
+sys.dm_os_wait_stats AS ws WITH (NOLOCK)
+LEFT OUTER JOIN ( VALUES
+('ASYNC_IO_COMPLETION','Other Disk IO'),
+('ASYNC_NETWORK_IO','Network IO'),
+('BACKUPIO','Other Disk IO'),
+('BROKER_CONNECTION_RECEIVE_TASK','Service Broker'),
+('BROKER_DISPATCHER','Service Broker'),
+('BROKER_ENDPOINT_STATE_MUTEX','Service Broker'),
+('BROKER_EVENTHANDLER','Service Broker'),
+('BROKER_FORWARDER','Service Broker'),
+('BROKER_INIT','Service Broker'),
+('BROKER_MASTERSTART','Service Broker'),
+('BROKER_RECEIVE_WAITFOR','User Wait'),
+('BROKER_REGISTERALLENDPOINTS','Service Broker'),
+('BROKER_SERVICE','Service Broker'),
+('BROKER_SHUTDOWN','Service Broker'),
+('BROKER_START','Service Broker'),
+('BROKER_TASK_SHUTDOWN','Service Broker'),
+('BROKER_TASK_STOP','Service Broker'),
+('BROKER_TASK_SUBMIT','Service Broker'),
+('BROKER_TO_FLUSH','Service Broker'),
+('BROKER_TRANSMISSION_OBJECT','Service Broker'),
+('BROKER_TRANSMISSION_TABLE','Service Broker'),
+('BROKER_TRANSMISSION_WORK','Service Broker'),
+('BROKER_TRANSMITTER','Service Broker'),
+('CHECKPOINT_QUEUE','Idle'),
+('CHKPT','Tran Log IO'),
+('CLR_AUTO_EVENT','SQL CLR'),
+('CLR_CRST','SQL CLR'),
+('CLR_JOIN','SQL CLR'),
+('CLR_MANUAL_EVENT','SQL CLR'),
+('CLR_MEMORY_SPY','SQL CLR'),
+('CLR_MONITOR','SQL CLR'),
+('CLR_RWLOCK_READER','SQL CLR'),
+('CLR_RWLOCK_WRITER','SQL CLR'),
+('CLR_SEMAPHORE','SQL CLR'),
+('CLR_TASK_START','SQL CLR'),
+('CLRHOST_STATE_ACCESS','SQL CLR'),
+('CMEMPARTITIONED','Memory'),
+('CMEMTHREAD','Memory'),
+('CXPACKET','Parallelism'),
+('CXCONSUMER','Parallelism'),
+('DBMIRROR_DBM_EVENT','Mirroring'),
+('DBMIRROR_DBM_MUTEX','Mirroring'),
+('DBMIRROR_EVENTS_QUEUE','Mirroring'),
+('DBMIRROR_SEND','Mirroring'),
+('DBMIRROR_WORKER_QUEUE','Mirroring'),
+('DBMIRRORING_CMD','Mirroring'),
+('DTC','Transaction'),
+('DTC_ABORT_REQUEST','Transaction'),
+('DTC_RESOLVE','Transaction'),
+('DTC_STATE','Transaction'),
+('DTC_TMDOWN_REQUEST','Transaction'),
+('DTC_WAITFOR_OUTCOME','Transaction'),
+('DTCNEW_ENLIST','Transaction'),
+('DTCNEW_PREPARE','Transaction'),
+('DTCNEW_RECOVERY','Transaction'),
+('DTCNEW_TM','Transaction'),
+('DTCNEW_TRANSACTION_ENLISTMENT','Transaction'),
+('DTCPNTSYNC','Transaction'),
+('EE_PMOLOCK','Memory'),
+('EXCHANGE','Parallelism'),
+('EXTERNAL_SCRIPT_NETWORK_IOF','Network IO'),
+('FCB_REPLICA_READ','Replication'),
+('FCB_REPLICA_WRITE','Replication'),
+('FT_COMPROWSET_RWLOCK','Full Text Search'),
+('FT_IFTS_RWLOCK','Full Text Search'),
+('FT_IFTS_SCHEDULER_IDLE_WAIT','Idle'),
+('FT_IFTSHC_MUTEX','Full Text Search'),
+('FT_IFTSISM_MUTEX','Full Text Search'),
+('FT_MASTER_MERGE','Full Text Search'),
+('FT_MASTER_MERGE_COORDINATOR','Full Text Search'),
+('FT_METADATA_MUTEX','Full Text Search'),
+('FT_PROPERTYLIST_CACHE','Full Text Search'),
+('FT_RESTART_CRAWL','Full Text Search'),
+('FULLTEXT GATHERER','Full Text Search'),
+('HADR_AG_MUTEX','Replication'),
+('HADR_AR_CRITICAL_SECTION_ENTRY','Replication'),
+('HADR_AR_MANAGER_MUTEX','Replication'),
+('HADR_AR_UNLOAD_COMPLETED','Replication'),
+('HADR_ARCONTROLLER_NOTIFICATIONS_SUBSCRIBER_LIST','Replication'),
+('HADR_BACKUP_BULK_LOCK','Replication'),
+('HADR_BACKUP_QUEUE','Replication'),
+('HADR_CLUSAPI_CALL','Replication'),
+('HADR_COMPRESSED_CACHE_SYNC','Replication'),
+('HADR_CONNECTIVITY_INFO','Replication'),
+('HADR_DATABASE_FLOW_CONTROL','Replication'),
+('HADR_DATABASE_VERSIONING_STATE','Replication'),
+('HADR_DATABASE_WAIT_FOR_RECOVERY','Replication'),
+('HADR_DATABASE_WAIT_FOR_RESTART','Replication'),
+('HADR_DATABASE_WAIT_FOR_TRANSITION_TO_VERSIONING','Replication'),
+('HADR_DB_COMMAND','Replication'),
+('HADR_DB_OP_COMPLETION_SYNC','Replication'),
+('HADR_DB_OP_START_SYNC','Replication'),
+('HADR_DBR_SUBSCRIBER','Replication'),
+('HADR_DBR_SUBSCRIBER_FILTER_LIST','Replication'),
+('HADR_DBSEEDING','Replication'),
+('HADR_DBSEEDING_LIST','Replication'),
+('HADR_DBSTATECHANGE_SYNC','Replication'),
+('HADR_FABRIC_CALLBACK','Replication'),
+('HADR_FILESTREAM_BLOCK_FLUSH','Replication'),
+('HADR_FILESTREAM_FILE_CLOSE','Replication'),
+('HADR_FILESTREAM_FILE_REQUEST','Replication'),
+('HADR_FILESTREAM_IOMGR','Replication'),
+('HADR_FILESTREAM_IOMGR_IOCOMPLETION','Replication'),
+('HADR_FILESTREAM_MANAGER','Replication'),
+('HADR_FILESTREAM_PREPROC','Replication'),
+('HADR_GROUP_COMMIT','Replication'),
+('HADR_LOGCAPTURE_SYNC','Replication'),
+('HADR_LOGCAPTURE_WAIT','Replication'),
+('HADR_LOGPROGRESS_SYNC','Replication'),
+('HADR_NOTIFICATION_DEQUEUE','Replication'),
+('HADR_NOTIFICATION_WORKER_EXCLUSIVE_ACCESS','Replication'),
+('HADR_NOTIFICATION_WORKER_STARTUP_SYNC','Replication'),
+('HADR_NOTIFICATION_WORKER_TERMINATION_SYNC','Replication'),
+('HADR_PARTNER_SYNC','Replication'),
+('HADR_READ_ALL_NETWORKS','Replication'),
+('HADR_RECOVERY_WAIT_FOR_CONNECTION','Replication'),
+('HADR_RECOVERY_WAIT_FOR_UNDO','Replication'),
+('HADR_REPLICAINFO_SYNC','Replication'),
+('HADR_SEEDING_CANCELLATION','Replication'),
+('HADR_SEEDING_FILE_LIST','Replication'),
+('HADR_SEEDING_LIMIT_BACKUPS','Replication'),
+('HADR_SEEDING_SYNC_COMPLETION','Replication'),
+('HADR_SEEDING_TIMEOUT_TASK','Replication'),
+('HADR_SEEDING_WAIT_FOR_COMPLETION','Replication'),
+('HADR_SYNC_COMMIT','Replication'),
+('HADR_SYNCHRONIZING_THROTTLE','Replication'),
+('HADR_TDS_LISTENER_SYNC','Replication'),
+('HADR_TDS_LISTENER_SYNC_PROCESSING','Replication'),
+('HADR_THROTTLE_LOG_RATE_GOVERNOR','Log Rate Governor'),
+('HADR_TIMER_TASK','Replication'),
+('HADR_TRANSPORT_DBRLIST','Replication'),
+('HADR_TRANSPORT_FLOW_CONTROL','Replication'),
+('HADR_TRANSPORT_SESSION','Replication'),
+('HADR_WORK_POOL','Replication'),
+('HADR_WORK_QUEUE','Replication'),
+('HADR_XRF_STACK_ACCESS','Replication'),
+('INSTANCE_LOG_RATE_GOVERNOR','Log Rate Governor'),
+('IO_COMPLETION','Other Disk IO'),
+('IO_QUEUE_LIMIT','Other Disk IO'),
+('IO_RETRY','Other Disk IO'),
+('LATCH_DT','Latch'),
+('LATCH_EX','Latch'),
+('LATCH_KP','Latch'),
+('LATCH_NL','Latch'),
+('LATCH_SH','Latch'),
+('LATCH_UP','Latch'),
+('LAZYWRITER_SLEEP','Idle'),
+('LCK_M_BU','Lock'),
+('LCK_M_BU_ABORT_BLOCKERS','Lock'),
+('LCK_M_BU_LOW_PRIORITY','Lock'),
+('LCK_M_IS','Lock'),
+('LCK_M_IS_ABORT_BLOCKERS','Lock'),
+('LCK_M_IS_LOW_PRIORITY','Lock'),
+('LCK_M_IU','Lock'),
+('LCK_M_IU_ABORT_BLOCKERS','Lock'),
+('LCK_M_IU_LOW_PRIORITY','Lock'),
+('LCK_M_IX','Lock'),
+('LCK_M_IX_ABORT_BLOCKERS','Lock'),
+('LCK_M_IX_LOW_PRIORITY','Lock'),
+('LCK_M_RIn_NL','Lock'),
+('LCK_M_RIn_NL_ABORT_BLOCKERS','Lock'),
+('LCK_M_RIn_NL_LOW_PRIORITY','Lock'),
+('LCK_M_RIn_S','Lock'),
+('LCK_M_RIn_S_ABORT_BLOCKERS','Lock'),
+('LCK_M_RIn_S_LOW_PRIORITY','Lock'),
+('LCK_M_RIn_U','Lock'),
+('LCK_M_RIn_U_ABORT_BLOCKERS','Lock'),
+('LCK_M_RIn_U_LOW_PRIORITY','Lock'),
+('LCK_M_RIn_X','Lock'),
+('LCK_M_RIn_X_ABORT_BLOCKERS','Lock'),
+('LCK_M_RIn_X_LOW_PRIORITY','Lock'),
+('LCK_M_RS_S','Lock'),
+('LCK_M_RS_S_ABORT_BLOCKERS','Lock'),
+('LCK_M_RS_S_LOW_PRIORITY','Lock'),
+('LCK_M_RS_U','Lock'),
+('LCK_M_RS_U_ABORT_BLOCKERS','Lock'),
+('LCK_M_RS_U_LOW_PRIORITY','Lock'),
+('LCK_M_RX_S','Lock'),
+('LCK_M_RX_S_ABORT_BLOCKERS','Lock'),
+('LCK_M_RX_S_LOW_PRIORITY','Lock'),
+('LCK_M_RX_U','Lock'),
+('LCK_M_RX_U_ABORT_BLOCKERS','Lock'),
+('LCK_M_RX_U_LOW_PRIORITY','Lock'),
+('LCK_M_RX_X','Lock'),
+('LCK_M_RX_X_ABORT_BLOCKERS','Lock'),
+('LCK_M_RX_X_LOW_PRIORITY','Lock'),
+('LCK_M_S','Lock'),
+('LCK_M_S_ABORT_BLOCKERS','Lock'),
+('LCK_M_S_LOW_PRIORITY','Lock'),
+('LCK_M_SCH_M','Lock'),
+('LCK_M_SCH_M_ABORT_BLOCKERS','Lock'),
+('LCK_M_SCH_M_LOW_PRIORITY','Lock'),
+('LCK_M_SCH_S','Lock'),
+('LCK_M_SCH_S_ABORT_BLOCKERS','Lock'),
+('LCK_M_SCH_S_LOW_PRIORITY','Lock'),
+('LCK_M_SIU','Lock'),
+('LCK_M_SIU_ABORT_BLOCKERS','Lock'),
+('LCK_M_SIU_LOW_PRIORITY','Lock'),
+('LCK_M_SIX','Lock'),
+('LCK_M_SIX_ABORT_BLOCKERS','Lock'),
+('LCK_M_SIX_LOW_PRIORITY','Lock'),
+('LCK_M_U','Lock'),
+('LCK_M_U_ABORT_BLOCKERS','Lock'),
+('LCK_M_U_LOW_PRIORITY','Lock'),
+('LCK_M_UIX','Lock'),
+('LCK_M_UIX_ABORT_BLOCKERS','Lock'),
+('LCK_M_UIX_LOW_PRIORITY','Lock'),
+('LCK_M_X','Lock'),
+('LCK_M_X_ABORT_BLOCKERS','Lock'),
+('LCK_M_X_LOW_PRIORITY','Lock'),
+('LOGBUFFER','Tran Log IO'),
+('LOGMGR','Tran Log IO'),
+('LOGMGR_FLUSH','Tran Log IO'),
+('LOGMGR_PMM_LOG','Tran Log IO'),
+('LOGMGR_QUEUE','Idle'),
+('LOGMGR_RESERVE_APPEND','Tran Log IO'),
+('MEMORY_ALLOCATION_EXT','Memory'),
+('MEMORY_GRANT_UPDATE','Memory'),
+('MSQL_XACT_MGR_MUTEX','Transaction'),
+('MSQL_XACT_MUTEX','Transaction'),
+('MSSEARCH','Full Text Search'),
+('NET_WAITFOR_PACKET','Network IO'),
+('ONDEMAND_TASK_QUEUE','Idle'),
+('PAGEIOLATCH_DT','Buffer IO'),
+('PAGEIOLATCH_EX','Buffer IO'),
+('PAGEIOLATCH_KP','Buffer IO'),
+('PAGEIOLATCH_NL','Buffer IO'),
+('PAGEIOLATCH_SH','Buffer IO'),
+('PAGEIOLATCH_UP','Buffer IO'),
+('PAGELATCH_DT','Buffer Latch'),
+('PAGELATCH_EX','Buffer Latch'),
+('PAGELATCH_KP','Buffer Latch'),
+('PAGELATCH_NL','Buffer Latch'),
+('PAGELATCH_SH','Buffer Latch'),
+('PAGELATCH_UP','Buffer Latch'),
+('POOL_LOG_RATE_GOVERNOR','Log Rate Governor'),
+('PREEMPTIVE_ABR','Preemptive'),
+('PREEMPTIVE_CLOSEBACKUPMEDIA','Preemptive'),
+('PREEMPTIVE_CLOSEBACKUPTAPE','Preemptive'),
+('PREEMPTIVE_CLOSEBACKUPVDIDEVICE','Preemptive'),
+('PREEMPTIVE_CLUSAPI_CLUSTERRESOURCECONTROL','Preemptive'),
+('PREEMPTIVE_COM_COCREATEINSTANCE','Preemptive'),
+('PREEMPTIVE_COM_COGETCLASSOBJECT','Preemptive'),
+('PREEMPTIVE_COM_CREATEACCESSOR','Preemptive'),
+('PREEMPTIVE_COM_DELETEROWS','Preemptive'),
+('PREEMPTIVE_COM_GETCOMMANDTEXT','Preemptive'),
+('PREEMPTIVE_COM_GETDATA','Preemptive'),
+('PREEMPTIVE_COM_GETNEXTROWS','Preemptive'),
+('PREEMPTIVE_COM_GETRESULT','Preemptive'),
+('PREEMPTIVE_COM_GETROWSBYBOOKMARK','Preemptive'),
+('PREEMPTIVE_COM_LBFLUSH','Preemptive'),
+('PREEMPTIVE_COM_LBLOCKREGION','Preemptive'),
+('PREEMPTIVE_COM_LBREADAT','Preemptive'),
+('PREEMPTIVE_COM_LBSETSIZE','Preemptive'),
+('PREEMPTIVE_COM_LBSTAT','Preemptive'),
+('PREEMPTIVE_COM_LBUNLOCKREGION','Preemptive'),
+('PREEMPTIVE_COM_LBWRITEAT','Preemptive'),
+('PREEMPTIVE_COM_QUERYINTERFACE','Preemptive'),
+('PREEMPTIVE_COM_RELEASE','Preemptive'),
+('PREEMPTIVE_COM_RELEASEACCESSOR','Preemptive'),
+('PREEMPTIVE_COM_RELEASEROWS','Preemptive'),
+('PREEMPTIVE_COM_RELEASESESSION','Preemptive'),
+('PREEMPTIVE_COM_RESTARTPOSITION','Preemptive'),
+('PREEMPTIVE_COM_SEQSTRMREAD','Preemptive'),
+('PREEMPTIVE_COM_SEQSTRMREADANDWRITE','Preemptive'),
+('PREEMPTIVE_COM_SETDATAFAILURE','Preemptive'),
+('PREEMPTIVE_COM_SETPARAMETERINFO','Preemptive'),
+('PREEMPTIVE_COM_SETPARAMETERPROPERTIES','Preemptive'),
+('PREEMPTIVE_COM_STRMLOCKREGION','Preemptive'),
+('PREEMPTIVE_COM_STRMSEEKANDREAD','Preemptive'),
+('PREEMPTIVE_COM_STRMSEEKANDWRITE','Preemptive'),
+('PREEMPTIVE_COM_STRMSETSIZE','Preemptive'),
+('PREEMPTIVE_COM_STRMSTAT','Preemptive'),
+('PREEMPTIVE_COM_STRMUNLOCKREGION','Preemptive'),
+('PREEMPTIVE_CONSOLEWRITE','Preemptive'),
+('PREEMPTIVE_CREATEPARAM','Preemptive'),
+('PREEMPTIVE_DEBUG','Preemptive'),
+('PREEMPTIVE_DFSADDLINK','Preemptive'),
+('PREEMPTIVE_DFSLINKEXISTCHECK','Preemptive'),
+('PREEMPTIVE_DFSLINKHEALTHCHECK','Preemptive'),
+('PREEMPTIVE_DFSREMOVELINK','Preemptive'),
+('PREEMPTIVE_DFSREMOVEROOT','Preemptive'),
+('PREEMPTIVE_DFSROOTFOLDERCHECK','Preemptive'),
+('PREEMPTIVE_DFSROOTINIT','Preemptive'),
+('PREEMPTIVE_DFSROOTSHARECHECK','Preemptive'),
+('PREEMPTIVE_DTC_ABORT','Preemptive'),
+('PREEMPTIVE_DTC_ABORTREQUESTDONE','Preemptive'),
+('PREEMPTIVE_DTC_BEGINTRANSACTION','Preemptive'),
+('PREEMPTIVE_DTC_COMMITREQUESTDONE','Preemptive'),
+('PREEMPTIVE_DTC_ENLIST','Preemptive'),
+('PREEMPTIVE_DTC_PREPAREREQUESTDONE','Preemptive'),
+('PREEMPTIVE_FILESIZEGET','Preemptive'),
+('PREEMPTIVE_FSAOLEDB_ABORTTRANSACTION','Preemptive'),
+('PREEMPTIVE_FSAOLEDB_COMMITTRANSACTION','Preemptive'),
+('PREEMPTIVE_FSAOLEDB_STARTTRANSACTION','Preemptive'),
+('PREEMPTIVE_FSRECOVER_UNCONDITIONALUNDO','Preemptive'),
+('PREEMPTIVE_GETRMINFO','Preemptive'),
+('PREEMPTIVE_HADR_LEASE_MECHANISM','Preemptive'),
+('PREEMPTIVE_HTTP_EVENT_WAIT','Preemptive'),
+('PREEMPTIVE_HTTP_REQUEST','Preemptive'),
+('PREEMPTIVE_LOCKMONITOR','Preemptive'),
+('PREEMPTIVE_MSS_RELEASE','Preemptive'),
+('PREEMPTIVE_ODBCOPS','Preemptive'),
+('PREEMPTIVE_OLE_UNINIT','Preemptive'),
+('PREEMPTIVE_OLEDB_ABORTORCOMMITTRAN','Preemptive'),
+('PREEMPTIVE_OLEDB_ABORTTRAN','Preemptive'),
+('PREEMPTIVE_OLEDB_GETDATASOURCE','Preemptive'),
+('PREEMPTIVE_OLEDB_GETLITERALINFO','Preemptive'),
+('PREEMPTIVE_OLEDB_GETPROPERTIES','Preemptive'),
+('PREEMPTIVE_OLEDB_GETPROPERTYINFO','Preemptive'),
+('PREEMPTIVE_OLEDB_GETSCHEMALOCK','Preemptive'),
+('PREEMPTIVE_OLEDB_JOINTRANSACTION','Preemptive'),
+('PREEMPTIVE_OLEDB_RELEASE','Preemptive'),
+('PREEMPTIVE_OLEDB_SETPROPERTIES','Preemptive'),
+('PREEMPTIVE_OLEDBOPS','Preemptive'),
+('PREEMPTIVE_OS_ACCEPTSECURITYCONTEXT','Preemptive'),
+('PREEMPTIVE_OS_ACQUIRECREDENTIALSHANDLE','Preemptive'),
+('PREEMPTIVE_OS_AUTHENTICATIONOPS','Preemptive'),
+('PREEMPTIVE_OS_AUTHORIZATIONOPS','Preemptive'),
+('PREEMPTIVE_OS_AUTHZGETINFORMATIONFROMCONTEXT','Preemptive'),
+('PREEMPTIVE_OS_AUTHZINITIALIZECONTEXTFROMSID','Preemptive'),
+('PREEMPTIVE_OS_AUTHZINITIALIZERESOURCEMANAGER','Preemptive'),
+('PREEMPTIVE_OS_BACKUPREAD','Preemptive'),
+('PREEMPTIVE_OS_CLOSEHANDLE','Preemptive'),
+('PREEMPTIVE_OS_CLUSTEROPS','Preemptive'),
+('PREEMPTIVE_OS_COMOPS','Preemptive'),
+('PREEMPTIVE_OS_COMPLETEAUTHTOKEN','Preemptive'),
+('PREEMPTIVE_OS_COPYFILE','Preemptive'),
+('PREEMPTIVE_OS_CREATEDIRECTORY','Preemptive'),
+('PREEMPTIVE_OS_CREATEFILE','Preemptive'),
+('PREEMPTIVE_OS_CRYPTACQUIRECONTEXT','Preemptive'),
+('PREEMPTIVE_OS_CRYPTIMPORTKEY','Preemptive'),
+('PREEMPTIVE_OS_CRYPTOPS','Preemptive'),
+('PREEMPTIVE_OS_DECRYPTMESSAGE','Preemptive'),
+('PREEMPTIVE_OS_DELETEFILE','Preemptive'),
+('PREEMPTIVE_OS_DELETESECURITYCONTEXT','Preemptive'),
+('PREEMPTIVE_OS_DEVICEIOCONTROL','Preemptive'),
+('PREEMPTIVE_OS_DEVICEOPS','Preemptive'),
+('PREEMPTIVE_OS_DIRSVC_NETWORKOPS','Preemptive'),
+('PREEMPTIVE_OS_DISCONNECTNAMEDPIPE','Preemptive'),
+('PREEMPTIVE_OS_DOMAINSERVICESOPS','Preemptive'),
+('PREEMPTIVE_OS_DSGETDCNAME','Preemptive'),
+('PREEMPTIVE_OS_DTCOPS','Preemptive'),
+('PREEMPTIVE_OS_ENCRYPTMESSAGE','Preemptive'),
+('PREEMPTIVE_OS_FILEOPS','Preemptive'),
+('PREEMPTIVE_OS_FINDFILE','Preemptive'),
+('PREEMPTIVE_OS_FLUSHFILEBUFFERS','Preemptive'),
+('PREEMPTIVE_OS_FORMATMESSAGE','Preemptive'),
+('PREEMPTIVE_OS_FREECREDENTIALSHANDLE','Preemptive'),
+('PREEMPTIVE_OS_FREELIBRARY','Preemptive'),
+('PREEMPTIVE_OS_GENERICOPS','Preemptive'),
+('PREEMPTIVE_OS_GETADDRINFO','Preemptive'),
+('PREEMPTIVE_OS_GETCOMPRESSEDFILESIZE','Preemptive'),
+('PREEMPTIVE_OS_GETDISKFREESPACE','Preemptive'),
+('PREEMPTIVE_OS_GETFILEATTRIBUTES','Preemptive'),
+('PREEMPTIVE_OS_GETFILESIZE','Preemptive'),
+('PREEMPTIVE_OS_GETFINALFILEPATHBYHANDLE','Preemptive'),
+('PREEMPTIVE_OS_GETLONGPATHNAME','Preemptive'),
+('PREEMPTIVE_OS_GETPROCADDRESS','Preemptive'),
+('PREEMPTIVE_OS_GETVOLUMENAMEFORVOLUMEMOUNTPOINT','Preemptive'),
+('PREEMPTIVE_OS_GETVOLUMEPATHNAME','Preemptive'),
+('PREEMPTIVE_OS_INITIALIZESECURITYCONTEXT','Preemptive'),
+('PREEMPTIVE_OS_LIBRARYOPS','Preemptive'),
+('PREEMPTIVE_OS_LOADLIBRARY','Preemptive'),
+('PREEMPTIVE_OS_LOGONUSER','Preemptive'),
+('PREEMPTIVE_OS_LOOKUPACCOUNTSID','Preemptive'),
+('PREEMPTIVE_OS_MESSAGEQUEUEOPS','Preemptive'),
+('PREEMPTIVE_OS_MOVEFILE','Preemptive'),
+('PREEMPTIVE_OS_NETGROUPGETUSERS','Preemptive'),
+('PREEMPTIVE_OS_NETLOCALGROUPGETMEMBERS','Preemptive'),
+('PREEMPTIVE_OS_NETUSERGETGROUPS','Preemptive'),
+('PREEMPTIVE_OS_NETUSERGETLOCALGROUPS','Preemptive'),
+('PREEMPTIVE_OS_NETUSERMODALSGET','Preemptive'),
+('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICY','Preemptive'),
+('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICYFREE','Preemptive'),
+('PREEMPTIVE_OS_OPENDIRECTORY','Preemptive'),
+('PREEMPTIVE_OS_PDH_WMI_INIT','Preemptive'),
+('PREEMPTIVE_OS_PIPEOPS','Preemptive'),
+('PREEMPTIVE_OS_PROCESSOPS','Preemptive'),
+('PREEMPTIVE_OS_QUERYCONTEXTATTRIBUTES','Preemptive'),
+('PREEMPTIVE_OS_QUERYREGISTRY','Preemptive'),
+('PREEMPTIVE_OS_QUERYSECURITYCONTEXTTOKEN','Preemptive'),
+('PREEMPTIVE_OS_REMOVEDIRECTORY','Preemptive'),
+('PREEMPTIVE_OS_REPORTEVENT','Preemptive'),
+('PREEMPTIVE_OS_REVERTTOSELF','Preemptive'),
+('PREEMPTIVE_OS_RSFXDEVICEOPS','Preemptive'),
+('PREEMPTIVE_OS_SECURITYOPS','Preemptive'),
+('PREEMPTIVE_OS_SERVICEOPS','Preemptive'),
+('PREEMPTIVE_OS_SETENDOFFILE','Preemptive'),
+('PREEMPTIVE_OS_SETFILEPOINTER','Preemptive'),
+('PREEMPTIVE_OS_SETFILEVALIDDATA','Preemptive'),
+('PREEMPTIVE_OS_SETNAMEDSECURITYINFO','Preemptive'),
+('PREEMPTIVE_OS_SQLCLROPS','Preemptive'),
+('PREEMPTIVE_OS_SQMLAUNCH','Preemptive'),
+('PREEMPTIVE_OS_VERIFYSIGNATURE','Preemptive'),
+('PREEMPTIVE_OS_VERIFYTRUST','Preemptive'),
+('PREEMPTIVE_OS_VSSOPS','Preemptive'),
+('PREEMPTIVE_OS_WAITFORSINGLEOBJECT','Preemptive'),
+('PREEMPTIVE_OS_WINSOCKOPS','Preemptive'),
+('PREEMPTIVE_OS_WRITEFILE','Preemptive'),
+('PREEMPTIVE_OS_WRITEFILEGATHER','Preemptive'),
+('PREEMPTIVE_OS_WSASETLASTERROR','Preemptive'),
+('PREEMPTIVE_REENLIST','Preemptive'),
+('PREEMPTIVE_RESIZELOG','Preemptive'),
+('PREEMPTIVE_ROLLFORWARDREDO','Preemptive'),
+('PREEMPTIVE_ROLLFORWARDUNDO','Preemptive'),
+('PREEMPTIVE_SB_STOPENDPOINT','Preemptive'),
+('PREEMPTIVE_SERVER_STARTUP','Preemptive'),
+('PREEMPTIVE_SETRMINFO','Preemptive'),
+('PREEMPTIVE_SHAREDMEM_GETDATA','Preemptive'),
+('PREEMPTIVE_SNIOPEN','Preemptive'),
+('PREEMPTIVE_SOSHOST','Preemptive'),
+('PREEMPTIVE_SOSTESTING','Preemptive'),
+('PREEMPTIVE_SP_SERVER_DIAGNOSTICS','Preemptive'),
+('PREEMPTIVE_STARTRM','Preemptive'),
+('PREEMPTIVE_STREAMFCB_CHECKPOINT','Preemptive'),
+('PREEMPTIVE_STREAMFCB_RECOVER','Preemptive'),
+('PREEMPTIVE_STRESSDRIVER','Preemptive'),
+('PREEMPTIVE_TESTING','Preemptive'),
+('PREEMPTIVE_TRANSIMPORT','Preemptive'),
+('PREEMPTIVE_UNMARSHALPROPAGATIONTOKEN','Preemptive'),
+('PREEMPTIVE_VSS_CREATESNAPSHOT','Preemptive'),
+('PREEMPTIVE_VSS_CREATEVOLUMESNAPSHOT','Preemptive'),
+('PREEMPTIVE_XE_CALLBACKEXECUTE','Preemptive'),
+('PREEMPTIVE_XE_CX_FILE_OPEN','Preemptive'),
+('PREEMPTIVE_XE_CX_HTTP_CALL','Preemptive'),
+('PREEMPTIVE_XE_DISPATCHER','Preemptive'),
+('PREEMPTIVE_XE_ENGINEINIT','Preemptive'),
+('PREEMPTIVE_XE_GETTARGETSTATE','Preemptive'),
+('PREEMPTIVE_XE_SESSIONCOMMIT','Preemptive'),
+('PREEMPTIVE_XE_TARGETFINALIZE','Preemptive'),
+('PREEMPTIVE_XE_TARGETINIT','Preemptive'),
+('PREEMPTIVE_XE_TIMERRUN','Preemptive'),
+('PREEMPTIVE_XETESTING','Preemptive'),
+('PWAIT_HADR_ACTION_COMPLETED','Replication'),
+('PWAIT_HADR_CHANGE_NOTIFIER_TERMINATION_SYNC','Replication'),
+('PWAIT_HADR_CLUSTER_INTEGRATION','Replication'),
+('PWAIT_HADR_FAILOVER_COMPLETED','Replication'),
+('PWAIT_HADR_JOIN','Replication'),
+('PWAIT_HADR_OFFLINE_COMPLETED','Replication'),
+('PWAIT_HADR_ONLINE_COMPLETED','Replication'),
+('PWAIT_HADR_POST_ONLINE_COMPLETED','Replication'),
+('PWAIT_HADR_SERVER_READY_CONNECTIONS','Replication'),
+('PWAIT_HADR_WORKITEM_COMPLETED','Replication'),
+('PWAIT_HADRSIM','Replication'),
+('PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC','Full Text Search'),
+('QUERY_TRACEOUT','Tracing'),
+('REPL_CACHE_ACCESS','Replication'),
+('REPL_HISTORYCACHE_ACCESS','Replication'),
+('REPL_SCHEMA_ACCESS','Replication'),
+('REPL_TRANFSINFO_ACCESS','Replication'),
+('REPL_TRANHASHTABLE_ACCESS','Replication'),
+('REPL_TRANTEXTINFO_ACCESS','Replication'),
+('REPLICA_WRITES','Replication'),
+('REQUEST_FOR_DEADLOCK_SEARCH','Idle'),
+('RESERVED_MEMORY_ALLOCATION_EXT','Memory'),
+('RESOURCE_SEMAPHORE','Memory'),
+('RESOURCE_SEMAPHORE_QUERY_COMPILE','Compilation'),
+('SLEEP_BPOOL_FLUSH','Idle'),
+('SLEEP_BUFFERPOOL_HELPLW','Idle'),
+('SLEEP_DBSTARTUP','Idle'),
+('SLEEP_DCOMSTARTUP','Idle'),
+('SLEEP_MASTERDBREADY','Idle'),
+('SLEEP_MASTERMDREADY','Idle'),
+('SLEEP_MASTERUPGRADED','Idle'),
+('SLEEP_MEMORYPOOL_ALLOCATEPAGES','Idle'),
+('SLEEP_MSDBSTARTUP','Idle'),
+('SLEEP_RETRY_VIRTUALALLOC','Idle'),
+('SLEEP_SYSTEMTASK','Idle'),
+('SLEEP_TASK','Idle'),
+('SLEEP_TEMPDBSTARTUP','Idle'),
+('SLEEP_WORKSPACE_ALLOCATEPAGE','Idle'),
+('SOS_SCHEDULER_YIELD','CPU'),
+('SQLCLR_APPDOMAIN','SQL CLR'),
+('SQLCLR_ASSEMBLY','SQL CLR'),
+('SQLCLR_DEADLOCK_DETECTION','SQL CLR'),
+('SQLCLR_QUANTUM_PUNISHMENT','SQL CLR'),
+('SQLTRACE_BUFFER_FLUSH','Idle'),
+('SQLTRACE_FILE_BUFFER','Tracing'),
+('SQLTRACE_FILE_READ_IO_COMPLETION','Tracing'),
+('SQLTRACE_FILE_WRITE_IO_COMPLETION','Tracing'),
+('SQLTRACE_INCREMENTAL_FLUSH_SLEEP','Idle'),
+('SQLTRACE_PENDING_BUFFER_WRITERS','Tracing'),
+('SQLTRACE_SHUTDOWN','Tracing'),
+('SQLTRACE_WAIT_ENTRIES','Idle'),
+('THREADPOOL','Worker Thread'),
+('TRACE_EVTNOTIF','Tracing'),
+('TRACEWRITE','Tracing'),
+('TRAN_MARKLATCH_DT','Transaction'),
+('TRAN_MARKLATCH_EX','Transaction'),
+('TRAN_MARKLATCH_KP','Transaction'),
+('TRAN_MARKLATCH_NL','Transaction'),
+('TRAN_MARKLATCH_SH','Transaction'),
+('TRAN_MARKLATCH_UP','Transaction'),
+('TRANSACTION_MUTEX','Transaction'),
+('WAIT_FOR_RESULTS','User Wait'),
+('WAITFOR','User Wait'),
+('WRITE_COMPLETION','Other Disk IO'),
+('WRITELOG','Tran Log IO'),
+('XACT_OWN_TRANSACTION','Transaction'),
+('XACT_RECLAIM_SESSION','Transaction'),
+('XACTLOCKINFO','Transaction'),
+('XACTWORKSPACE_MUTEX','Transaction'),
+('XE_DISPATCHER_WAIT','Idle'),
+('XE_TIMER_EVENT','Idle')) AS wc(wait_type, wait_category)
+ ON ws.wait_type = wc.wait_type
+WHERE
+ws.wait_type NOT IN (
+ N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP',
+ N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE',
+ N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE',
+ N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE',
+ N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE',
+ N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX',
+ N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT',
+ N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE',
+ N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE',
+ N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE',
+ N'PARALLEL_REDO_WORKER_WAIT_WORK',
+ N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS',
+ N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS',
+ N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST',
+ N'PREEMPTIVE_OS_DEVICEOPS',
+ N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER',
+ N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT',
+ N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE',
+ N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT',
+ N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP',
+ N'QDS_ASYNC_QUEUE',
+ N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH',
+ N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP',
+ N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY',
+ N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK',
+ N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP',
+ N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
+ N'SQLTRACE_WAIT_ENTRIES',
+ N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT',
+ N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE',
+ N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN',
+ N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT',
+ N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT')
+AND waiting_tasks_count > 0
+AND wait_time_ms > 100;
+
+ELSE
+ SELECT
+ 'sqlserver_azuredb_waitstats' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ DB_NAME() as [database_name'],
+ dbws.wait_type,
+ dbws.wait_time_ms,
+ dbws.wait_time_ms - signal_wait_time_ms AS [resource_wait_ms],
+ dbws.signal_wait_time_ms,
+ dbws.max_wait_time_ms,
+ dbws.waiting_tasks_count
+ FROM
+ sys.dm_db_wait_stats AS dbws WITH (NOLOCK)
+ WHERE
+ dbws.wait_type NOT IN (
+ N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP',
+ N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE',
+ N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE',
+ N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE',
+ N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE',
+ N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX',
+ N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT',
+ N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE',
+ N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE',
+ N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE',
+ N'PARALLEL_REDO_WORKER_WAIT_WORK',
+ N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS',
+ N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS',
+ N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST',
+ N'PREEMPTIVE_OS_DEVICEOPS',
+ N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER',
+ N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT',
+ N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE',
+ N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT',
+ N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP',
+ N'QDS_ASYNC_QUEUE',
+ N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH',
+ N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP',
+ N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY',
+ N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK',
+ N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP',
+ N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
+ N'SQLTRACE_WAIT_ENTRIES',
+ N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT',
+ N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE',
+ N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN',
+ N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT',
+ N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT')
+ AND waiting_tasks_count > 0
+ AND wait_time_ms > 100;
+`
+
+const sqlServerRequestsV2 string = `
+SET NOCOUNT ON;
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int) * 100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+
+-- 2008R2 and before doesn't have open_transaction_count in sys.dm_exec_sessions
+DECLARE @Columns as nvarchar(max) = ''
+DECLARE @DatabaseColumn as nvarchar(max) = ''
+IF @MajorMinorVersion >= 1200
+ BEGIN
+ SET @Columns = ',s.open_transaction_count as open_transaction '
+ SET @DatabaseColumn = ' , DB_NAME(s.database_id) as session_db_name '
+ END
+ELSE
+ BEGIN
+ SET @Columns = ',r.open_transaction_count as open_transaction '
+ SET @DatabaseColumn = ' , DB_NAME(r.database_id) as session_db_name '
+ END
+
+SET @SqlStatement = N'
+SELECT blocking_session_id into #blockingSessions FROM sys.dm_exec_requests WHERE blocking_session_id != 0
+create index ix_blockingSessions_1 on #blockingSessions (blocking_session_id)
+SELECT
+''sqlserver_requests'' AS [measurement]
+, REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
+, DB_NAME() as [database_name]
+, s.session_id
+, ISNULL(r.request_id,0) as request_id '
++ @DatabaseColumn +
+N' , COALESCE(r.status,s.status) AS status
+, COALESCE(r.cpu_time,s.cpu_time) AS cpu_time_ms
+, COALESCE(r.total_elapsed_time,s.total_elapsed_time) AS total_elapsed_time_ms
+, COALESCE(r.logical_reads,s.logical_reads) AS logical_reads
+, COALESCE(r.writes,s.writes) AS writes
+, r.command
+, r.wait_time as wait_time_ms
+, r.wait_type
+, r.wait_resource
+, r.blocking_session_id
+, s.program_name
+, s.host_name
+, s.nt_user_name '
++ @Columns +
+N', LEFT (CASE COALESCE(r.transaction_isolation_level, s.transaction_isolation_level)
+ WHEN 0 THEN ''0-Read Committed''
+ WHEN 1 THEN ''1-Read Uncommitted (NOLOCK)''
+ WHEN 2 THEN ''2-Read Committed''
+ WHEN 3 THEN ''3-Repeatable Read''
+ WHEN 4 THEN ''4-Serializable''
+ WHEN 5 THEN ''5-Snapshot''
+ ELSE CONVERT (varchar(30), r.transaction_isolation_level) + ''-UNKNOWN''
+ END, 30) AS transaction_isolation_level
+, r.granted_query_memory as granted_query_memory_pages
+, r.percent_complete
+, SUBSTRING(
+ qt.text,
+ r.statement_start_offset / 2 + 1,
+ (CASE WHEN r.statement_end_offset = -1
+ THEN DATALENGTH(qt.text)
+ ELSE r.statement_end_offset
+ END - r.statement_start_offset) / 2 + 1
+ ) AS statement_text
+, qt.objectid
+, QUOTENAME(OBJECT_SCHEMA_NAME(qt.objectid,qt.dbid)) + ''.'' + QUOTENAME(OBJECT_NAME(qt.objectid,qt.dbid)) as stmt_object_name
+, DB_NAME(qt.dbid) stmt_db_name
+, CONVERT(varchar(20),[query_hash],1) as [query_hash]
+, CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash]
+FROM sys.dm_exec_sessions AS s
+LEFT OUTER JOIN sys.dm_exec_requests AS r
+ ON s.session_id = r.session_id
+OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt
+WHERE 1 = 1
+ AND (r.session_id IS NOT NULL AND (s.is_user_process = 1
+ OR r.status COLLATE Latin1_General_BIN NOT IN (''background'', ''sleeping'')))
+ OR (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions))
+OPTION(MAXDOP 1)'
+
+EXEC sp_executesql @SqlStatement
+`
+
+const sqlServerVolumeSpaceV2 string = `
+/* Only for on-prem version of SQL Server
+Gets data about disk space, only for volumes used by SQL Server (data available form sql 2008R2 and later)
+*/
+DECLARE
+ @EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+
+IF @EngineEdition IN (2,3,4) AND @MajorMinorVersion >= 1050
+ BEGIN
+ SELECT DISTINCT
+ 'sqlserver_volume_space' AS [measurement]
+ ,SERVERPROPERTY('machinename') AS [server_name]
+ ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ /*in [volume_mount_point] any trailing "\" char will be removed by telegraf */
+ ,[volume_mount_point]
+ ,vs.[total_bytes] AS [total_space_bytes]
+ ,vs.[available_bytes] AS [available_space_bytes]
+ ,vs.[total_bytes] - vs.[available_bytes] AS [used_space_bytes]
+ FROM
+ sys.master_files as mf
+ CROSS APPLY sys.dm_os_volume_stats(mf.database_id, mf.file_id) as vs
+ END
+`
+
+const sqlServerCpuV2 string = `
+/*The ring buffer has a new value every minute*/
+IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/
+BEGIN
+SELECT
+ 'sqlserver_cpu' AS [measurement]
+ ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ ,[SQLProcessUtilization] AS [sqlserver_process_cpu]
+ ,[SystemIdle] AS [system_idle_cpu]
+ ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu]
+FROM (
+ SELECT TOP 1
+ [record_id]
+ /*,dateadd(ms, (y.[timestamp] - (SELECT CAST([ms_ticks] AS BIGINT) FROM sys.dm_os_sys_info)), GETDATE()) AS [EventTime] --use for check/debug purpose*/
+ ,[SQLProcessUtilization]
+ ,[SystemIdle]
+ FROM (
+ SELECT record.value('(./Record/@id)[1]', 'int') AS [record_id]
+ ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle]
+ ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization]
+ ,[TIMESTAMP]
+ FROM (
+ SELECT [TIMESTAMP]
+ ,convert(XML, [record]) AS [record]
+ FROM sys.dm_os_ring_buffers
+ WHERE [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR'
+ AND [record] LIKE '%%'
+ ) AS x
+ ) AS y
+ ORDER BY record_id DESC
+) as z
+
+END
+`
diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go
index dc57c87a4faa3..942c152258cb3 100644
--- a/plugins/inputs/sqlserver/sqlserver.go
+++ b/plugins/inputs/sqlserver/sqlserver.go
@@ -2,24 +2,32 @@ package sqlserver
import (
"database/sql"
+ "fmt"
+ "log"
"sync"
"time"
_ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/plugins/inputs"
)
// SQLServer struct
type SQLServer struct {
- Servers []string `toml:"servers"`
- QueryVersion int `toml:"query_version"`
- AzureDB bool `toml:"azuredb"`
- ExcludeQuery []string `toml:"exclude_query"`
+ Servers []string `toml:"servers"`
+ QueryVersion int `toml:"query_version"`
+ AzureDB bool `toml:"azuredb"`
+ DatabaseType string `toml:"database_type"`
+ IncludeQuery []string `toml:"include_query"`
+ ExcludeQuery []string `toml:"exclude_query"`
+ queries MapQuery
+ isInitialized bool
}
// Query struct
type Query struct {
+ ScriptName string
Script string
ResultByRow bool
OrderedColumns []string
@@ -28,45 +36,72 @@ type Query struct {
// MapQuery type
type MapQuery map[string]Query
-var queries MapQuery
-
-// Initialized flag
-var isInitialized = false
-
-var defaultServer = "Server=.;app name=telegraf;log=1;"
-
-var sampleConfig = `
- ## Specify instances to monitor with a list of connection strings.
- ## All connection parameters are optional.
- ## By default, the host is localhost, listening on default port, TCP 1433.
- ## for Windows, the user is the currently running AD user (SSO).
- ## See https://github.com/denisenkom/go-mssqldb for detailed connection
- ## parameters.
- # servers = [
- # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;",
- # ]
-
- ## Optional parameter, setting this to 2 will use a new version
- ## of the collection queries that break compatibility with the original
- ## dashboards.
- query_version = 2
-
- ## If you are using AzureDB, setting this to true will gather resource utilization metrics
- # azuredb = false
-
- ## If you would like to exclude some of the metrics queries, list them here
- ## Possible choices:
- ## - PerformanceCounters
- ## - WaitStatsCategorized
- ## - DatabaseIO
- ## - DatabaseProperties
- ## - CPUHistory
- ## - DatabaseSize
- ## - DatabaseStats
- ## - MemoryClerk
- ## - VolumeSpace
- ## - PerformanceMetrics
- # exclude_query = [ 'DatabaseIO' ]
+const defaultServer = "Server=.;app name=telegraf;log=1;"
+
+const sampleConfig = `
+## Specify instances to monitor with a list of connection strings.
+## All connection parameters are optional.
+## By default, the host is localhost, listening on default port, TCP 1433.
+## for Windows, the user is the currently running AD user (SSO).
+## See https://github.com/denisenkom/go-mssqldb for detailed connection
+## parameters, in particular, tls connections can be created like so:
+## "encrypt=true;certificate=;hostNameInCertificate="
+# servers = [
+# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;",
+# ]
+
+## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
+## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
+## Possible values for database_type are
+## "AzureSQLDB"
+## "SQLServer"
+## "AzureSQLManagedInstance"
+# database_type = "AzureSQLDB"
+
+
+## Optional parameter, setting this to 2 will use a new version
+## of the collection queries that break compatibility with the original
+## dashboards.
+## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB
+query_version = 2
+
+## If you are using AzureDB, setting this to true will gather resource utilization metrics
+# azuredb = false
+
+## Possible queries
+## Version 2:
+## - PerformanceCounters
+## - WaitStatsCategorized
+## - DatabaseIO
+## - ServerProperties
+## - MemoryClerk
+## - Schedulers
+## - SqlRequests
+## - VolumeSpace
+## - Cpu
+
+## Version 1:
+## - PerformanceCounters
+## - WaitStatsCategorized
+## - CPUHistory
+## - DatabaseIO
+## - DatabaseSize
+## - DatabaseStats
+## - DatabaseProperties
+## - MemoryClerk
+## - VolumeSpace
+## - PerformanceMetrics
+
+
+## Queries enabled by default for specific Database Type
+## database_type = AzureSQLDB
+ ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO
+
+## A list of queries to include. If not specified, all the above listed queries are used.
+# include_query = []
+
+## A list of queries to explicitly ignore.
+exclude_query = [ 'Schedulers' , 'SqlRequests']
`
// SampleConfig return the sample configuration
@@ -83,46 +118,109 @@ type scanner interface {
Scan(dest ...interface{}) error
}
-func initQueries(s *SQLServer) {
- queries = make(MapQuery)
-
- // If this is an AzureDB instance, grab some extra metrics
- if s.AzureDB {
- queries["AzureDB"] = Query{Script: sqlAzureDB, ResultByRow: false}
+func initQueries(s *SQLServer) error {
+ s.queries = make(MapQuery)
+ queries := s.queries
+ log.Printf("I! [inputs.sqlserver] Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB)
+
+ // New config option database_type
+ // To prevent query definition conflicts
+ // Constant defintiions for type "AzureSQLDB" start with sqlAzureDB
+ // Constant defintiions for type "AzureSQLManagedInstance" start with sqlAzureMI
+ // Constant defintiions for type "SQLServer" start with sqlServer
+ if s.DatabaseType == "AzureSQLDB" {
+ queries["AzureSQLDBResourceStats"] = Query{ScriptName: "AzureSQLDBResourceStats", Script: sqlAzureDBResourceStats, ResultByRow: false}
+ queries["AzureSQLDBResourceGovernance"] = Query{ScriptName: "AzureSQLDBResourceGovernance", Script: sqlAzureDBResourceGovernance, ResultByRow: false}
+ queries["AzureSQLDBWaitStats"] = Query{ScriptName: "AzureSQLDBWaitStats", Script: sqlAzureDBWaitStats, ResultByRow: false}
+ queries["AzureSQLDBDatabaseIO"] = Query{ScriptName: "AzureSQLDBDatabaseIO", Script: sqlAzureDBDatabaseIO, ResultByRow: false}
+ queries["AzureSQLDBServerProperties"] = Query{ScriptName: "AzureSQLDBServerProperties", Script: sqlAzureDBProperties, ResultByRow: false}
+ queries["AzureSQLDBOsWaitstats"] = Query{ScriptName: "AzureSQLOsWaitstats", Script: sqlAzureDBOsWaitStats, ResultByRow: false}
+ queries["AzureSQLDBMemoryClerks"] = Query{ScriptName: "AzureSQLDBMemoryClerks", Script: sqlAzureDBMemoryClerks, ResultByRow: false}
+ queries["AzureSQLDBPerformanceCounters"] = Query{ScriptName: "AzureSQLDBPerformanceCounters", Script: sqlAzureDBPerformanceCounters, ResultByRow: false}
+ queries["AzureSQLDBRequests"] = Query{ScriptName: "AzureSQLDBRequests", Script: sqlAzureDBRequests, ResultByRow: false}
+ queries["AzureSQLDBSchedulers"] = Query{ScriptName: "AzureSQLDBSchedulers", Script: sqlServerSchedulers, ResultByRow: false}
+ } else if s.DatabaseType == "AzureSQLManagedInstance" {
+ queries["AzureSQLMIResourceStats"] = Query{ScriptName: "AzureSQLMIResourceStats", Script: sqlAzureMIResourceStats, ResultByRow: false}
+ queries["AzureSQLMIResourceGovernance"] = Query{ScriptName: "AzureSQLMIResourceGovernance", Script: sqlAzureMIResourceGovernance, ResultByRow: false}
+ queries["AzureSQLMIDatabaseIO"] = Query{ScriptName: "AzureSQLMIDatabaseIO", Script: sqlAzureMIDatabaseIO, ResultByRow: false}
+ queries["AzureSQLMIServerProperties"] = Query{ScriptName: "AzureSQLMIServerProperties", Script: sqlAzureMIProperties, ResultByRow: false}
+ queries["AzureSQLMIOsWaitstats"] = Query{ScriptName: "AzureSQLMIOsWaitstats", Script: sqlAzureMIOsWaitStats, ResultByRow: false}
+ queries["AzureSQLMIMemoryClerks"] = Query{ScriptName: "AzureSQLMIMemoryClerks", Script: sqlAzureMIMemoryClerks, ResultByRow: false}
+ queries["AzureSQLMIPerformanceCounters"] = Query{ScriptName: "AzureSQLMIPerformanceCounters", Script: sqlAzureMIPerformanceCounters, ResultByRow: false}
+ queries["AzureSQLMISqlRequests"] = Query{ScriptName: "AzureSQLMISqlRequests", Script: sqlAzureMIRequests, ResultByRow: false}
+ queries["AzureSQLMISchedulers"] = Query{ScriptName: "AzureSQLMISchedulers", Script: sqlServerSchedulers, ResultByRow: false}
+ } else if s.DatabaseType == "SQLServer" { //These are still V2 queries and have not been refactored yet.
+ queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false}
+ queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false}
+ queries["SQLServerDatabaseIO"] = Query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false}
+ queries["SQLServerProperties"] = Query{ScriptName: "SQLServerProperties", Script: sqlServerProperties, ResultByRow: false}
+ queries["SQLServerMemoryClerks"] = Query{ScriptName: "SQLServerMemoryClerks", Script: sqlServerMemoryClerks, ResultByRow: false}
+ queries["SQLServerSchedulers"] = Query{ScriptName: "SQLServerSchedulers", Script: sqlServerSchedulers, ResultByRow: false}
+ queries["SQLServerRequests"] = Query{ScriptName: "SQLServerRequests", Script: sqlServerRequests, ResultByRow: false}
+ queries["SQLServerVolumeSpace"] = Query{ScriptName: "SQLServerVolumeSpace", Script: sqlServerVolumeSpace, ResultByRow: false}
+ queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCpu, ResultByRow: false}
+ } else {
+ // If this is an AzureDB instance, grab some extra metrics
+ if s.AzureDB {
+ queries["AzureDBResourceStats"] = Query{ScriptName: "AzureDBPerformanceCounters", Script: sqlAzureDBResourceStats, ResultByRow: false}
+ queries["AzureDBResourceGovernance"] = Query{ScriptName: "AzureDBPerformanceCounters", Script: sqlAzureDBResourceGovernance, ResultByRow: false}
+ }
+ // Decide if we want to run version 1 or version 2 queries
+ if s.QueryVersion == 2 {
+ log.Println("W! DEPRECATION NOTICE: query_version=2 is being deprecated in favor of database_type.")
+ queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCountersV2, ResultByRow: true}
+ queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorizedV2, ResultByRow: false}
+ queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIOV2, ResultByRow: false}
+ queries["ServerProperties"] = Query{ScriptName: "ServerProperties", Script: sqlServerPropertiesV2, ResultByRow: false}
+ queries["MemoryClerk"] = Query{ScriptName: "MemoryClerk", Script: sqlMemoryClerkV2, ResultByRow: false}
+ queries["Schedulers"] = Query{ScriptName: "Schedulers", Script: sqlServerSchedulersV2, ResultByRow: false}
+ queries["SqlRequests"] = Query{ScriptName: "SqlRequests", Script: sqlServerRequestsV2, ResultByRow: false}
+ queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false}
+ queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCpuV2, ResultByRow: false}
+ } else {
+ log.Println("W! DEPRECATED: query_version=1 has been deprecated in favor of database_type.")
+ queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true}
+ queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorized, ResultByRow: false}
+ queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: sqlCPUHistory, ResultByRow: false}
+ queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIO, ResultByRow: false}
+ queries["DatabaseSize"] = Query{ScriptName: "DatabaseSize", Script: sqlDatabaseSize, ResultByRow: false}
+ queries["DatabaseStats"] = Query{ScriptName: "DatabaseStats", Script: sqlDatabaseStats, ResultByRow: false}
+ queries["DatabaseProperties"] = Query{ScriptName: "DatabaseProperties", Script: sqlDatabaseProperties, ResultByRow: false}
+ queries["MemoryClerk"] = Query{ScriptName: "MemoryClerk", Script: sqlMemoryClerk, ResultByRow: false}
+ queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlVolumeSpace, ResultByRow: false}
+ queries["PerformanceMetrics"] = Query{ScriptName: "PerformanceMetrics", Script: sqlPerformanceMetrics, ResultByRow: false}
+ }
}
- // Decide if we want to run version 1 or version 2 queries
- if s.QueryVersion == 2 {
- queries["PerformanceCounters"] = Query{Script: sqlPerformanceCountersV2, ResultByRow: true}
- queries["WaitStatsCategorized"] = Query{Script: sqlWaitStatsCategorizedV2, ResultByRow: false}
- queries["DatabaseIO"] = Query{Script: sqlDatabaseIOV2, ResultByRow: false}
- queries["ServerProperties"] = Query{Script: sqlServerPropertiesV2, ResultByRow: false}
- queries["MemoryClerk"] = Query{Script: sqlMemoryClerkV2, ResultByRow: false}
- } else {
- queries["PerformanceCounters"] = Query{Script: sqlPerformanceCounters, ResultByRow: true}
- queries["WaitStatsCategorized"] = Query{Script: sqlWaitStatsCategorized, ResultByRow: false}
- queries["CPUHistory"] = Query{Script: sqlCPUHistory, ResultByRow: false}
- queries["DatabaseIO"] = Query{Script: sqlDatabaseIO, ResultByRow: false}
- queries["DatabaseSize"] = Query{Script: sqlDatabaseSize, ResultByRow: false}
- queries["DatabaseStats"] = Query{Script: sqlDatabaseStats, ResultByRow: false}
- queries["DatabaseProperties"] = Query{Script: sqlDatabaseProperties, ResultByRow: false}
- queries["MemoryClerk"] = Query{Script: sqlMemoryClerk, ResultByRow: false}
- queries["VolumeSpace"] = Query{Script: sqlVolumeSpace, ResultByRow: false}
- queries["PerformanceMetrics"] = Query{Script: sqlPerformanceMetrics, ResultByRow: false}
+ filterQueries, err := filter.NewIncludeExcludeFilter(s.IncludeQuery, s.ExcludeQuery)
+ if err != nil {
+ return err
}
- for _, query := range s.ExcludeQuery {
- delete(queries, query)
+ for query := range queries {
+ if !filterQueries.Match(query) {
+ delete(queries, query)
+ }
}
// Set a flag so we know that queries have already been initialized
- isInitialized = true
+ s.isInitialized = true
+ var querylist []string
+ for query := range queries {
+ querylist = append(querylist, query)
+ }
+ log.Printf("I! [inputs.sqlserver] Config: Effective Queries: %#v\n", querylist)
+
+ return nil
}
// Gather collect data from SQL Server
func (s *SQLServer) Gather(acc telegraf.Accumulator) error {
- if !isInitialized {
- initQueries(s)
+ if !s.isInitialized {
+ if err := initQueries(s); err != nil {
+ acc.AddError(err)
+ return err
+ }
}
if len(s.Servers) == 0 {
@@ -132,7 +230,7 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
for _, serv := range s.Servers {
- for _, query := range queries {
+ for _, query := range s.queries {
wg.Add(1)
go func(serv string, query Query) {
defer wg.Done()
@@ -151,18 +249,13 @@ func (s *SQLServer) gatherServer(server string, query Query, acc telegraf.Accumu
if err != nil {
return err
}
- // verify that a connection can be made before making a query
- err = conn.Ping()
- if err != nil {
- // Handle error
- return err
- }
defer conn.Close()
// execute query
rows, err := conn.Query(query.Script)
if err != nil {
- return err
+ return fmt.Errorf("Script %s failed: %w", query.ScriptName, err)
+ //return err
}
defer rows.Close()
@@ -237,2383 +330,3 @@ func init() {
return &SQLServer{}
})
}
-
-// Queries - V2
-// Thanks Bob Ward (http://aka.ms/bobwardms)
-// and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs)
-// for putting most of the memory clerk definitions online!
-const sqlMemoryClerkV2 = `SET DEADLOCK_PRIORITY -10;
-DECLARE @SQL NVARCHAR(MAX) = 'SELECT
-"sqlserver_memory_clerks" As [measurement],
-REPLACE(@@SERVERNAME,"\",":") AS [sql_instance],
-ISNULL(clerk_names.name,mc.type) AS clerk_type,
-SUM({pages_kb}) AS size_kb
-FROM
-sys.dm_os_memory_clerks AS mc WITH (NOLOCK)
-LEFT OUTER JOIN ( VALUES
-("CACHESTORE_BROKERDSH","Service Broker Dialog Security Header Cache"),
-("CACHESTORE_BROKERKEK","Service Broker Key Exchange Key Cache"),
-("CACHESTORE_BROKERREADONLY","Service Broker (Read-Only)"),
-("CACHESTORE_BROKERRSB","Service Broker Null Remote Service Binding Cache"),
-("CACHESTORE_BROKERTBLACS","Broker dormant rowsets"),
-("CACHESTORE_BROKERTO","Service Broker Transmission Object Cache"),
-("CACHESTORE_BROKERUSERCERTLOOKUP","Service Broker user certificates lookup result cache"),
-("CACHESTORE_CLRPROC","CLR Procedure Cache"),
-("CACHESTORE_CLRUDTINFO","CLR UDT Info"),
-("CACHESTORE_COLUMNSTOREOBJECTPOOL","Column Store Object Pool"),
-("CACHESTORE_CONVPRI","Conversation Priority Cache"),
-("CACHESTORE_EVENTS","Event Notification Cache"),
-("CACHESTORE_FULLTEXTSTOPLIST","Full Text Stoplist Cache"),
-("CACHESTORE_NOTIF","Notification Store"),
-("CACHESTORE_OBJCP","Object Plans"),
-("CACHESTORE_PHDR","Bound Trees"),
-("CACHESTORE_SEARCHPROPERTYLIST","Search Property List Cache"),
-("CACHESTORE_SEHOBTCOLUMNATTRIBUTE","SE Shared Column Metadata Cache"),
-("CACHESTORE_SQLCP","SQL Plans"),
-("CACHESTORE_STACKFRAMES","SOS_StackFramesStore"),
-("CACHESTORE_SYSTEMROWSET","System Rowset Store"),
-("CACHESTORE_TEMPTABLES","Temporary Tables & Table Variables"),
-("CACHESTORE_VIEWDEFINITIONS","View Definition Cache"),
-("CACHESTORE_XML_SELECTIVE_DG","XML DB Cache (Selective)"),
-("CACHESTORE_XMLDBATTRIBUTE","XML DB Cache (Attribute)"),
-("CACHESTORE_XMLDBELEMENT","XML DB Cache (Element)"),
-("CACHESTORE_XMLDBTYPE","XML DB Cache (Type)"),
-("CACHESTORE_XPROC","Extended Stored Procedures"),
-("MEMORYCLERK_FILETABLE","Memory Clerk (File Table)"),
-("MEMORYCLERK_FSCHUNKER","Memory Clerk (FS Chunker)"),
-("MEMORYCLERK_FULLTEXT","Full Text"),
-("MEMORYCLERK_FULLTEXT_SHMEM","Full-text IG"),
-("MEMORYCLERK_HADR","HADR"),
-("MEMORYCLERK_HOST","Host"),
-("MEMORYCLERK_LANGSVC","Language Service"),
-("MEMORYCLERK_LWC","Light Weight Cache"),
-("MEMORYCLERK_QSRANGEPREFETCH","QS Range Prefetch"),
-("MEMORYCLERK_SERIALIZATION","Serialization"),
-("MEMORYCLERK_SNI","SNI"),
-("MEMORYCLERK_SOSMEMMANAGER","SOS Memory Manager"),
-("MEMORYCLERK_SOSNODE","SOS Node"),
-("MEMORYCLERK_SOSOS","SOS Memory Clerk"),
-("MEMORYCLERK_SQLBUFFERPOOL","Buffer Pool"),
-("MEMORYCLERK_SQLCLR","CLR"),
-("MEMORYCLERK_SQLCLRASSEMBLY","CLR Assembly"),
-("MEMORYCLERK_SQLCONNECTIONPOOL","Connection Pool"),
-("MEMORYCLERK_SQLGENERAL","General"),
-("MEMORYCLERK_SQLHTTP","HTTP"),
-("MEMORYCLERK_SQLLOGPOOL","Log Pool"),
-("MEMORYCLERK_SQLOPTIMIZER","SQL Optimizer"),
-("MEMORYCLERK_SQLQERESERVATIONS","SQL Reservations"),
-("MEMORYCLERK_SQLQUERYCOMPILE","SQL Query Compile"),
-("MEMORYCLERK_SQLQUERYEXEC","SQL Query Exec"),
-("MEMORYCLERK_SQLQUERYPLAN","SQL Query Plan"),
-("MEMORYCLERK_SQLSERVICEBROKER","SQL Service Broker"),
-("MEMORYCLERK_SQLSERVICEBROKERTRANSPORT","Unified Communication Stack"),
-("MEMORYCLERK_SQLSOAP","SQL SOAP"),
-("MEMORYCLERK_SQLSOAPSESSIONSTORE","SQL SOAP (Session Store)"),
-("MEMORYCLERK_SQLSTORENG","SQL Storage Engine"),
-("MEMORYCLERK_SQLUTILITIES","SQL Utilities"),
-("MEMORYCLERK_SQLXML","SQL XML"),
-("MEMORYCLERK_SQLXP","SQL XP"),
-("MEMORYCLERK_TRACE_EVTNOTIF","Trace Event Notification"),
-("MEMORYCLERK_XE","XE Engine"),
-("MEMORYCLERK_XE_BUFFER","XE Buffer"),
-("MEMORYCLERK_XTP","In-Memory OLTP"),
-("OBJECTSTORE_LBSS","Lbss Cache (Object Store)"),
-("OBJECTSTORE_LOCK_MANAGER","Lock Manager (Object Store)"),
-("OBJECTSTORE_SECAUDIT_EVENT_BUFFER","Audit Event Buffer (Object Store)"),
-("OBJECTSTORE_SERVICE_BROKER","Service Broker (Object Store)"),
-("OBJECTSTORE_SNI_PACKET","SNI Packet (Object Store)"),
-("OBJECTSTORE_XACT_CACHE","Transactions Cache (Object Store)"),
-("USERSTORE_DBMETADATA","DB Metadata (User Store)"),
-("USERSTORE_OBJPERM","Object Permissions (User Store)"),
-("USERSTORE_SCHEMAMGR","Schema Manager (User Store)"),
-("USERSTORE_SXC","SXC (User Store)"),
-("USERSTORE_TOKENPERM","Token Permissions (User Store)"),
-("USERSTORE_QDSSTMT","QDS Statement Buffer (Pre-persist)"),
-("CACHESTORE_QDSRUNTIMESTATS","QDS Runtime Stats (Pre-persist)"),
-("CACHESTORE_QDSCONTEXTSETTINGS","QDS Unique Context Settings"),
-("MEMORYCLERK_QUERYDISKSTORE","QDS General"),
-("MEMORYCLERK_QUERYDISKSTORE_HASHMAP","QDS Query/Plan Hash Table")
-) AS clerk_names(system_name,name)
-ON mc.type = clerk_names.system_name
-GROUP BY ISNULL(clerk_names.name,mc.type)
-HAVING SUM({pages_kb}) >= 1024
-OPTION( RECOMPILE );'
-
-IF CAST(LEFT(CAST(SERVERPROPERTY('productversion') as varchar), 2) AS INT) > 10 -- SQL Server 2008 Compat
- SET @SQL = REPLACE(REPLACE(@SQL,'{pages_kb}','mc.pages_kb'),'"','''')
-ELSE
- SET @SQL = REPLACE(REPLACE(@SQL,'{pages_kb}','mc.single_pages_kb + mc.multi_pages_kb'),'"','''')
-
-EXEC(@SQL)
-`
-
-const sqlDatabaseIOV2 = `SET DEADLOCK_PRIORITY -10;
-IF SERVERPROPERTY('EngineEdition') = 5
-BEGIN
-SELECT
-'sqlserver_database_io' As [measurement],
-REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
-DB_NAME([vfs].[database_id]) [database_name],
-vfs.io_stall_read_ms AS read_latency_ms,
-vfs.num_of_reads AS reads,
-vfs.num_of_bytes_read AS read_bytes,
-vfs.io_stall_write_ms AS write_latency_ms,
-vfs.num_of_writes AS writes,
-vfs.num_of_bytes_written AS write_bytes,
-b.name as logical_filename,
-b.physical_name as physical_filename,
-CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'DATA' END AS file_type
-FROM
-[sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs
-inner join sys.database_files b on b.file_id = vfs.file_id
-END
-ELSE
-BEGIN
-SELECT
-'sqlserver_database_io' As [measurement],
-REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
-DB_NAME([vfs].[database_id]) [database_name],
-vfs.io_stall_read_ms AS read_latency_ms,
-vfs.num_of_reads AS reads,
-vfs.num_of_bytes_read AS read_bytes,
-vfs.io_stall_write_ms AS write_latency_ms,
-vfs.num_of_writes AS writes,
-vfs.num_of_bytes_written AS write_bytes,
-b.name as logical_filename,
-b.physical_name as physical_filename,
-CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'DATA' END AS file_type
-FROM
-[sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs
-inner join sys.master_files b on b.database_id = vfs.database_id and b.file_id = vfs.file_id
-END
-`
-
-const sqlServerPropertiesV2 = `SET DEADLOCK_PRIORITY -10;
-DECLARE @sys_info TABLE (
- cpu_count INT,
- server_memory BIGINT,
- sku NVARCHAR(64),
- engine_edition SMALLINT,
- hardware_type VARCHAR(16),
- total_storage_mb BIGINT,
- available_storage_mb BIGINT,
- uptime INT
-)
-
-IF OBJECT_ID('master.sys.dm_os_sys_info') IS NOT NULL
-BEGIN
- IF SERVERPROPERTY('EngineEdition') = 8 -- Managed Instance
- INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime )
- SELECT TOP(1)
- virtual_core_count AS cpu_count,
- (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory,
- sku,
- cast(SERVERPROPERTY('EngineEdition') as smallint) AS engine_edition,
- hardware_generation AS hardware_type,
- reserved_storage_mb AS total_storage_mb,
- (reserved_storage_mb - storage_space_used_mb) AS available_storage_mb,
- (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime
- FROM sys.server_resource_stats
- ORDER BY start_time DESC
-
- ELSE
- BEGIN
- INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime )
- SELECT cpu_count,
- (SELECT total_physical_memory_kb FROM sys.dm_os_sys_memory) AS server_memory,
- CAST(SERVERPROPERTY('Edition') AS NVARCHAR(64)) as sku,
- CAST(SERVERPROPERTY('EngineEdition') as smallint) as engine_edition,
- CASE virtual_machine_type_desc
- WHEN 'NONE' THEN 'PHYSICAL Machine'
- ELSE virtual_machine_type_desc
- END AS hardware_type,
- NULL,
- NULL,
- DATEDIFF(MINUTE,sqlserver_start_time,GETDATE())
- FROM sys.dm_os_sys_info
- END
-END
-SELECT 'sqlserver_server_properties' AS [measurement],
- REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
- s.cpu_count,
- s.server_memory,
- s.sku,
- s.engine_edition,
- s.hardware_type,
- s.total_storage_mb,
- s.available_storage_mb,
- s.uptime,
- SERVERPROPERTY('ProductVersion') AS sql_version,
- db_online,
- db_restoring,
- db_recovering,
- db_recoveryPending,
- db_suspect,
- db_offline
-FROM (
- SELECT SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online,
- SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring,
- SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering,
- SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending,
- SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect,
- SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline
- FROM sys.databases
- ) AS dbs
- CROSS APPLY (
- SELECT cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime
- FROM @sys_info
- ) AS s
-OPTION( RECOMPILE )
-`
-
-const sqlPerformanceCountersV2 string = `SET DEADLOCK_PRIORITY -10;
-DECLARE @PCounters TABLE
-(
- object_name nvarchar(128),
- counter_name nvarchar(128),
- instance_name nvarchar(128),
- cntr_value bigint,
- cntr_type INT,
- Primary Key(object_name, counter_name, instance_name)
-);
-INSERT INTO @PCounters
-SELECT DISTINCT
- RTrim(spi.object_name) object_name,
- RTrim(spi.counter_name) counter_name,
- RTrim(spi.instance_name) instance_name,
- CAST(spi.cntr_value AS BIGINT) AS cntr_value,
- spi.cntr_type
-FROM sys.dm_os_performance_counters AS spi
-WHERE (
- counter_name IN (
- 'SQL Compilations/sec',
- 'SQL Re-Compilations/sec',
- 'User Connections',
- 'Batch Requests/sec',
- 'Logouts/sec',
- 'Logins/sec',
- 'Processes blocked',
- 'Latch Waits/sec',
- 'Full Scans/sec',
- 'Index Searches/sec',
- 'Page Splits/sec',
- 'Page Lookups/sec',
- 'Page Reads/sec',
- 'Page Writes/sec',
- 'Readahead Pages/sec',
- 'Lazy Writes/sec',
- 'Checkpoint Pages/sec',
- 'Page life expectancy',
- 'Log File(s) Size (KB)',
- 'Log File(s) Used Size (KB)',
- 'Data File(s) Size (KB)',
- 'Transactions/sec',
- 'Write Transactions/sec',
- 'Active Temp Tables',
- 'Temp Tables Creation Rate',
- 'Temp Tables For Destruction',
- 'Free Space in tempdb (KB)',
- 'Version Store Size (KB)',
- 'Memory Grants Pending',
- 'Memory Grants Outstanding',
- 'Free list stalls/sec',
- 'Buffer cache hit ratio',
- 'Buffer cache hit ratio base',
- 'Backup/Restore Throughput/sec',
- 'Total Server Memory (KB)',
- 'Target Server Memory (KB)',
- 'Log Flushes/sec',
- 'Log Flush Wait Time',
- 'Memory broker clerk size',
- 'Log Bytes Flushed/sec',
- 'Bytes Sent to Replica/sec',
- 'Log Send Queue',
- 'Bytes Sent to Transport/sec',
- 'Sends to Replica/sec',
- 'Bytes Sent to Transport/sec',
- 'Sends to Transport/sec',
- 'Bytes Received from Replica/sec',
- 'Receives from Replica/sec',
- 'Flow Control Time (ms/sec)',
- 'Flow Control/sec',
- 'Resent Messages/sec',
- 'Redone Bytes/sec',
- 'XTP Memory Used (KB)',
- 'Transaction Delay',
- 'Log Bytes Received/sec',
- 'Log Apply Pending Queue',
- 'Redone Bytes/sec',
- 'Recovery Queue',
- 'Log Apply Ready Queue',
- 'CPU usage %',
- 'CPU usage % base',
- 'Queued requests',
- 'Requests completed/sec',
- 'Blocked tasks',
- 'Active memory grant amount (KB)',
- 'Disk Read Bytes/sec',
- 'Disk Read IO Throttled/sec',
- 'Disk Read IO/sec',
- 'Disk Write Bytes/sec',
- 'Disk Write IO Throttled/sec',
- 'Disk Write IO/sec',
- 'Used memory (KB)',
- 'Forwarded Records/sec',
- 'Background Writer pages/sec',
- 'Percent Log Used',
- 'Log Send Queue KB',
- 'Redo Queue KB'
- )
- ) OR (
- object_name LIKE '%User Settable%'
- OR object_name LIKE '%SQL Errors%'
- ) OR (
- instance_name IN ('_Total')
- AND counter_name IN (
- 'Lock Timeouts/sec',
- 'Number of Deadlocks/sec',
- 'Lock Waits/sec',
- 'Latch Waits/sec'
- )
- )
-
-DECLARE @SQL NVARCHAR(MAX)
-SET @SQL = REPLACE('
-SELECT
-"SQLServer:Workload Group Stats" AS object,
-counter,
-instance,
-CAST(vs.value AS BIGINT) AS value,
-1
-FROM
-(
- SELECT
- rgwg.name AS instance,
- rgwg.total_request_count AS "Request Count",
- rgwg.total_queued_request_count AS "Queued Request Count",
- rgwg.total_cpu_limit_violation_count AS "CPU Limit Violation Count",
- rgwg.total_cpu_usage_ms AS "CPU Usage (time)",
- ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN 'rgwg.total_cpu_usage_preemptive_ms AS "Premptive CPU Usage (time)",' ELSE '' END + '
- rgwg.total_lock_wait_count AS "Lock Wait Count",
- rgwg.total_lock_wait_time_ms AS "Lock Wait Time",
- rgwg.total_reduced_memgrant_count AS "Reduced Memory Grant Count"
- FROM sys.dm_resource_governor_workload_groups AS rgwg
- INNER JOIN sys.dm_resource_governor_resource_pools AS rgrp
- ON rgwg.pool_id = rgrp.pool_id
-) AS rg
-UNPIVOT (
- value FOR counter IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN '[Premptive CPU Usage (time)], ' ELSE '' END + '[Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] )
-) AS vs'
-,'"','''')
-
-INSERT INTO @PCounters
-EXEC( @SQL )
-
-SELECT 'sqlserver_performance' AS [measurement],
- REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
- pc.object_name AS [object],
- pc.counter_name AS [counter],
- CASE pc.instance_name WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.instance_name,'') END AS [instance],
- CAST(CASE WHEN pc.cntr_type = 537003264 AND pc1.cntr_value > 0 THEN (pc.cntr_value * 1.0) / (pc1.cntr_value * 1.0) * 100 ELSE pc.cntr_value END AS float(10)) AS [value]
-FROM @PCounters AS pc
- LEFT OUTER JOIN @PCounters AS pc1
- ON (
- pc.counter_name = REPLACE(pc1.counter_name,' base','')
- OR pc.counter_name = REPLACE(pc1.counter_name,' base',' (ms)')
- )
- AND pc.object_name = pc1.object_name
- AND pc.instance_name = pc1.instance_name
- AND pc1.counter_name LIKE '%base'
-WHERE pc.counter_name NOT LIKE '% base'
-OPTION(RECOMPILE);
-`
-
-const sqlWaitStatsCategorizedV2 string = `SET DEADLOCK_PRIORITY -10;
-SELECT
-'sqlserver_waitstats' AS [measurement],
-REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
-ws.wait_type,
-wait_time_ms,
-wait_time_ms - signal_wait_time_ms AS [resource_wait_ms],
-signal_wait_time_ms,
-max_wait_time_ms,
-waiting_tasks_count,
-ISNULL(wc.wait_category,'OTHER') AS [wait_category]
-FROM
-sys.dm_os_wait_stats AS ws WITH (NOLOCK)
-LEFT OUTER JOIN ( VALUES
-('ASYNC_IO_COMPLETION','Other Disk IO'),
-('ASYNC_NETWORK_IO','Network IO'),
-('BACKUPIO','Other Disk IO'),
-('BROKER_CONNECTION_RECEIVE_TASK','Service Broker'),
-('BROKER_DISPATCHER','Service Broker'),
-('BROKER_ENDPOINT_STATE_MUTEX','Service Broker'),
-('BROKER_EVENTHANDLER','Service Broker'),
-('BROKER_FORWARDER','Service Broker'),
-('BROKER_INIT','Service Broker'),
-('BROKER_MASTERSTART','Service Broker'),
-('BROKER_RECEIVE_WAITFOR','User Wait'),
-('BROKER_REGISTERALLENDPOINTS','Service Broker'),
-('BROKER_SERVICE','Service Broker'),
-('BROKER_SHUTDOWN','Service Broker'),
-('BROKER_START','Service Broker'),
-('BROKER_TASK_SHUTDOWN','Service Broker'),
-('BROKER_TASK_STOP','Service Broker'),
-('BROKER_TASK_SUBMIT','Service Broker'),
-('BROKER_TO_FLUSH','Service Broker'),
-('BROKER_TRANSMISSION_OBJECT','Service Broker'),
-('BROKER_TRANSMISSION_TABLE','Service Broker'),
-('BROKER_TRANSMISSION_WORK','Service Broker'),
-('BROKER_TRANSMITTER','Service Broker'),
-('CHECKPOINT_QUEUE','Idle'),
-('CHKPT','Tran Log IO'),
-('CLR_AUTO_EVENT','SQL CLR'),
-('CLR_CRST','SQL CLR'),
-('CLR_JOIN','SQL CLR'),
-('CLR_MANUAL_EVENT','SQL CLR'),
-('CLR_MEMORY_SPY','SQL CLR'),
-('CLR_MONITOR','SQL CLR'),
-('CLR_RWLOCK_READER','SQL CLR'),
-('CLR_RWLOCK_WRITER','SQL CLR'),
-('CLR_SEMAPHORE','SQL CLR'),
-('CLR_TASK_START','SQL CLR'),
-('CLRHOST_STATE_ACCESS','SQL CLR'),
-('CMEMPARTITIONED','Memory'),
-('CMEMTHREAD','Memory'),
-('CXPACKET','Parallelism'),
-('CXCONSUMER','Parallelism'),
-('DBMIRROR_DBM_EVENT','Mirroring'),
-('DBMIRROR_DBM_MUTEX','Mirroring'),
-('DBMIRROR_EVENTS_QUEUE','Mirroring'),
-('DBMIRROR_SEND','Mirroring'),
-('DBMIRROR_WORKER_QUEUE','Mirroring'),
-('DBMIRRORING_CMD','Mirroring'),
-('DTC','Transaction'),
-('DTC_ABORT_REQUEST','Transaction'),
-('DTC_RESOLVE','Transaction'),
-('DTC_STATE','Transaction'),
-('DTC_TMDOWN_REQUEST','Transaction'),
-('DTC_WAITFOR_OUTCOME','Transaction'),
-('DTCNEW_ENLIST','Transaction'),
-('DTCNEW_PREPARE','Transaction'),
-('DTCNEW_RECOVERY','Transaction'),
-('DTCNEW_TM','Transaction'),
-('DTCNEW_TRANSACTION_ENLISTMENT','Transaction'),
-('DTCPNTSYNC','Transaction'),
-('EE_PMOLOCK','Memory'),
-('EXCHANGE','Parallelism'),
-('EXTERNAL_SCRIPT_NETWORK_IOF','Network IO'),
-('FCB_REPLICA_READ','Replication'),
-('FCB_REPLICA_WRITE','Replication'),
-('FT_COMPROWSET_RWLOCK','Full Text Search'),
-('FT_IFTS_RWLOCK','Full Text Search'),
-('FT_IFTS_SCHEDULER_IDLE_WAIT','Idle'),
-('FT_IFTSHC_MUTEX','Full Text Search'),
-('FT_IFTSISM_MUTEX','Full Text Search'),
-('FT_MASTER_MERGE','Full Text Search'),
-('FT_MASTER_MERGE_COORDINATOR','Full Text Search'),
-('FT_METADATA_MUTEX','Full Text Search'),
-('FT_PROPERTYLIST_CACHE','Full Text Search'),
-('FT_RESTART_CRAWL','Full Text Search'),
-('FULLTEXT GATHERER','Full Text Search'),
-('HADR_AG_MUTEX','Replication'),
-('HADR_AR_CRITICAL_SECTION_ENTRY','Replication'),
-('HADR_AR_MANAGER_MUTEX','Replication'),
-('HADR_AR_UNLOAD_COMPLETED','Replication'),
-('HADR_ARCONTROLLER_NOTIFICATIONS_SUBSCRIBER_LIST','Replication'),
-('HADR_BACKUP_BULK_LOCK','Replication'),
-('HADR_BACKUP_QUEUE','Replication'),
-('HADR_CLUSAPI_CALL','Replication'),
-('HADR_COMPRESSED_CACHE_SYNC','Replication'),
-('HADR_CONNECTIVITY_INFO','Replication'),
-('HADR_DATABASE_FLOW_CONTROL','Replication'),
-('HADR_DATABASE_VERSIONING_STATE','Replication'),
-('HADR_DATABASE_WAIT_FOR_RECOVERY','Replication'),
-('HADR_DATABASE_WAIT_FOR_RESTART','Replication'),
-('HADR_DATABASE_WAIT_FOR_TRANSITION_TO_VERSIONING','Replication'),
-('HADR_DB_COMMAND','Replication'),
-('HADR_DB_OP_COMPLETION_SYNC','Replication'),
-('HADR_DB_OP_START_SYNC','Replication'),
-('HADR_DBR_SUBSCRIBER','Replication'),
-('HADR_DBR_SUBSCRIBER_FILTER_LIST','Replication'),
-('HADR_DBSEEDING','Replication'),
-('HADR_DBSEEDING_LIST','Replication'),
-('HADR_DBSTATECHANGE_SYNC','Replication'),
-('HADR_FABRIC_CALLBACK','Replication'),
-('HADR_FILESTREAM_BLOCK_FLUSH','Replication'),
-('HADR_FILESTREAM_FILE_CLOSE','Replication'),
-('HADR_FILESTREAM_FILE_REQUEST','Replication'),
-('HADR_FILESTREAM_IOMGR','Replication'),
-('HADR_FILESTREAM_IOMGR_IOCOMPLETION','Replication'),
-('HADR_FILESTREAM_MANAGER','Replication'),
-('HADR_FILESTREAM_PREPROC','Replication'),
-('HADR_GROUP_COMMIT','Replication'),
-('HADR_LOGCAPTURE_SYNC','Replication'),
-('HADR_LOGCAPTURE_WAIT','Replication'),
-('HADR_LOGPROGRESS_SYNC','Replication'),
-('HADR_NOTIFICATION_DEQUEUE','Replication'),
-('HADR_NOTIFICATION_WORKER_EXCLUSIVE_ACCESS','Replication'),
-('HADR_NOTIFICATION_WORKER_STARTUP_SYNC','Replication'),
-('HADR_NOTIFICATION_WORKER_TERMINATION_SYNC','Replication'),
-('HADR_PARTNER_SYNC','Replication'),
-('HADR_READ_ALL_NETWORKS','Replication'),
-('HADR_RECOVERY_WAIT_FOR_CONNECTION','Replication'),
-('HADR_RECOVERY_WAIT_FOR_UNDO','Replication'),
-('HADR_REPLICAINFO_SYNC','Replication'),
-('HADR_SEEDING_CANCELLATION','Replication'),
-('HADR_SEEDING_FILE_LIST','Replication'),
-('HADR_SEEDING_LIMIT_BACKUPS','Replication'),
-('HADR_SEEDING_SYNC_COMPLETION','Replication'),
-('HADR_SEEDING_TIMEOUT_TASK','Replication'),
-('HADR_SEEDING_WAIT_FOR_COMPLETION','Replication'),
-('HADR_SYNC_COMMIT','Replication'),
-('HADR_SYNCHRONIZING_THROTTLE','Replication'),
-('HADR_TDS_LISTENER_SYNC','Replication'),
-('HADR_TDS_LISTENER_SYNC_PROCESSING','Replication'),
-('HADR_THROTTLE_LOG_RATE_GOVERNOR','Log Rate Governor'),
-('HADR_TIMER_TASK','Replication'),
-('HADR_TRANSPORT_DBRLIST','Replication'),
-('HADR_TRANSPORT_FLOW_CONTROL','Replication'),
-('HADR_TRANSPORT_SESSION','Replication'),
-('HADR_WORK_POOL','Replication'),
-('HADR_WORK_QUEUE','Replication'),
-('HADR_XRF_STACK_ACCESS','Replication'),
-('INSTANCE_LOG_RATE_GOVERNOR','Log Rate Governor'),
-('IO_COMPLETION','Other Disk IO'),
-('IO_QUEUE_LIMIT','Other Disk IO'),
-('IO_RETRY','Other Disk IO'),
-('LATCH_DT','Latch'),
-('LATCH_EX','Latch'),
-('LATCH_KP','Latch'),
-('LATCH_NL','Latch'),
-('LATCH_SH','Latch'),
-('LATCH_UP','Latch'),
-('LAZYWRITER_SLEEP','Idle'),
-('LCK_M_BU','Lock'),
-('LCK_M_BU_ABORT_BLOCKERS','Lock'),
-('LCK_M_BU_LOW_PRIORITY','Lock'),
-('LCK_M_IS','Lock'),
-('LCK_M_IS_ABORT_BLOCKERS','Lock'),
-('LCK_M_IS_LOW_PRIORITY','Lock'),
-('LCK_M_IU','Lock'),
-('LCK_M_IU_ABORT_BLOCKERS','Lock'),
-('LCK_M_IU_LOW_PRIORITY','Lock'),
-('LCK_M_IX','Lock'),
-('LCK_M_IX_ABORT_BLOCKERS','Lock'),
-('LCK_M_IX_LOW_PRIORITY','Lock'),
-('LCK_M_RIn_NL','Lock'),
-('LCK_M_RIn_NL_ABORT_BLOCKERS','Lock'),
-('LCK_M_RIn_NL_LOW_PRIORITY','Lock'),
-('LCK_M_RIn_S','Lock'),
-('LCK_M_RIn_S_ABORT_BLOCKERS','Lock'),
-('LCK_M_RIn_S_LOW_PRIORITY','Lock'),
-('LCK_M_RIn_U','Lock'),
-('LCK_M_RIn_U_ABORT_BLOCKERS','Lock'),
-('LCK_M_RIn_U_LOW_PRIORITY','Lock'),
-('LCK_M_RIn_X','Lock'),
-('LCK_M_RIn_X_ABORT_BLOCKERS','Lock'),
-('LCK_M_RIn_X_LOW_PRIORITY','Lock'),
-('LCK_M_RS_S','Lock'),
-('LCK_M_RS_S_ABORT_BLOCKERS','Lock'),
-('LCK_M_RS_S_LOW_PRIORITY','Lock'),
-('LCK_M_RS_U','Lock'),
-('LCK_M_RS_U_ABORT_BLOCKERS','Lock'),
-('LCK_M_RS_U_LOW_PRIORITY','Lock'),
-('LCK_M_RX_S','Lock'),
-('LCK_M_RX_S_ABORT_BLOCKERS','Lock'),
-('LCK_M_RX_S_LOW_PRIORITY','Lock'),
-('LCK_M_RX_U','Lock'),
-('LCK_M_RX_U_ABORT_BLOCKERS','Lock'),
-('LCK_M_RX_U_LOW_PRIORITY','Lock'),
-('LCK_M_RX_X','Lock'),
-('LCK_M_RX_X_ABORT_BLOCKERS','Lock'),
-('LCK_M_RX_X_LOW_PRIORITY','Lock'),
-('LCK_M_S','Lock'),
-('LCK_M_S_ABORT_BLOCKERS','Lock'),
-('LCK_M_S_LOW_PRIORITY','Lock'),
-('LCK_M_SCH_M','Lock'),
-('LCK_M_SCH_M_ABORT_BLOCKERS','Lock'),
-('LCK_M_SCH_M_LOW_PRIORITY','Lock'),
-('LCK_M_SCH_S','Lock'),
-('LCK_M_SCH_S_ABORT_BLOCKERS','Lock'),
-('LCK_M_SCH_S_LOW_PRIORITY','Lock'),
-('LCK_M_SIU','Lock'),
-('LCK_M_SIU_ABORT_BLOCKERS','Lock'),
-('LCK_M_SIU_LOW_PRIORITY','Lock'),
-('LCK_M_SIX','Lock'),
-('LCK_M_SIX_ABORT_BLOCKERS','Lock'),
-('LCK_M_SIX_LOW_PRIORITY','Lock'),
-('LCK_M_U','Lock'),
-('LCK_M_U_ABORT_BLOCKERS','Lock'),
-('LCK_M_U_LOW_PRIORITY','Lock'),
-('LCK_M_UIX','Lock'),
-('LCK_M_UIX_ABORT_BLOCKERS','Lock'),
-('LCK_M_UIX_LOW_PRIORITY','Lock'),
-('LCK_M_X','Lock'),
-('LCK_M_X_ABORT_BLOCKERS','Lock'),
-('LCK_M_X_LOW_PRIORITY','Lock'),
-('LOGBUFFER','Tran Log IO'),
-('LOGMGR','Tran Log IO'),
-('LOGMGR_FLUSH','Tran Log IO'),
-('LOGMGR_PMM_LOG','Tran Log IO'),
-('LOGMGR_QUEUE','Idle'),
-('LOGMGR_RESERVE_APPEND','Tran Log IO'),
-('MEMORY_ALLOCATION_EXT','Memory'),
-('MEMORY_GRANT_UPDATE','Memory'),
-('MSQL_XACT_MGR_MUTEX','Transaction'),
-('MSQL_XACT_MUTEX','Transaction'),
-('MSSEARCH','Full Text Search'),
-('NET_WAITFOR_PACKET','Network IO'),
-('ONDEMAND_TASK_QUEUE','Idle'),
-('PAGEIOLATCH_DT','Buffer IO'),
-('PAGEIOLATCH_EX','Buffer IO'),
-('PAGEIOLATCH_KP','Buffer IO'),
-('PAGEIOLATCH_NL','Buffer IO'),
-('PAGEIOLATCH_SH','Buffer IO'),
-('PAGEIOLATCH_UP','Buffer IO'),
-('PAGELATCH_DT','Buffer Latch'),
-('PAGELATCH_EX','Buffer Latch'),
-('PAGELATCH_KP','Buffer Latch'),
-('PAGELATCH_NL','Buffer Latch'),
-('PAGELATCH_SH','Buffer Latch'),
-('PAGELATCH_UP','Buffer Latch'),
-('POOL_LOG_RATE_GOVERNOR','Log Rate Governor'),
-('PREEMPTIVE_ABR','Preemptive'),
-('PREEMPTIVE_CLOSEBACKUPMEDIA','Preemptive'),
-('PREEMPTIVE_CLOSEBACKUPTAPE','Preemptive'),
-('PREEMPTIVE_CLOSEBACKUPVDIDEVICE','Preemptive'),
-('PREEMPTIVE_CLUSAPI_CLUSTERRESOURCECONTROL','Preemptive'),
-('PREEMPTIVE_COM_COCREATEINSTANCE','Preemptive'),
-('PREEMPTIVE_COM_COGETCLASSOBJECT','Preemptive'),
-('PREEMPTIVE_COM_CREATEACCESSOR','Preemptive'),
-('PREEMPTIVE_COM_DELETEROWS','Preemptive'),
-('PREEMPTIVE_COM_GETCOMMANDTEXT','Preemptive'),
-('PREEMPTIVE_COM_GETDATA','Preemptive'),
-('PREEMPTIVE_COM_GETNEXTROWS','Preemptive'),
-('PREEMPTIVE_COM_GETRESULT','Preemptive'),
-('PREEMPTIVE_COM_GETROWSBYBOOKMARK','Preemptive'),
-('PREEMPTIVE_COM_LBFLUSH','Preemptive'),
-('PREEMPTIVE_COM_LBLOCKREGION','Preemptive'),
-('PREEMPTIVE_COM_LBREADAT','Preemptive'),
-('PREEMPTIVE_COM_LBSETSIZE','Preemptive'),
-('PREEMPTIVE_COM_LBSTAT','Preemptive'),
-('PREEMPTIVE_COM_LBUNLOCKREGION','Preemptive'),
-('PREEMPTIVE_COM_LBWRITEAT','Preemptive'),
-('PREEMPTIVE_COM_QUERYINTERFACE','Preemptive'),
-('PREEMPTIVE_COM_RELEASE','Preemptive'),
-('PREEMPTIVE_COM_RELEASEACCESSOR','Preemptive'),
-('PREEMPTIVE_COM_RELEASEROWS','Preemptive'),
-('PREEMPTIVE_COM_RELEASESESSION','Preemptive'),
-('PREEMPTIVE_COM_RESTARTPOSITION','Preemptive'),
-('PREEMPTIVE_COM_SEQSTRMREAD','Preemptive'),
-('PREEMPTIVE_COM_SEQSTRMREADANDWRITE','Preemptive'),
-('PREEMPTIVE_COM_SETDATAFAILURE','Preemptive'),
-('PREEMPTIVE_COM_SETPARAMETERINFO','Preemptive'),
-('PREEMPTIVE_COM_SETPARAMETERPROPERTIES','Preemptive'),
-('PREEMPTIVE_COM_STRMLOCKREGION','Preemptive'),
-('PREEMPTIVE_COM_STRMSEEKANDREAD','Preemptive'),
-('PREEMPTIVE_COM_STRMSEEKANDWRITE','Preemptive'),
-('PREEMPTIVE_COM_STRMSETSIZE','Preemptive'),
-('PREEMPTIVE_COM_STRMSTAT','Preemptive'),
-('PREEMPTIVE_COM_STRMUNLOCKREGION','Preemptive'),
-('PREEMPTIVE_CONSOLEWRITE','Preemptive'),
-('PREEMPTIVE_CREATEPARAM','Preemptive'),
-('PREEMPTIVE_DEBUG','Preemptive'),
-('PREEMPTIVE_DFSADDLINK','Preemptive'),
-('PREEMPTIVE_DFSLINKEXISTCHECK','Preemptive'),
-('PREEMPTIVE_DFSLINKHEALTHCHECK','Preemptive'),
-('PREEMPTIVE_DFSREMOVELINK','Preemptive'),
-('PREEMPTIVE_DFSREMOVEROOT','Preemptive'),
-('PREEMPTIVE_DFSROOTFOLDERCHECK','Preemptive'),
-('PREEMPTIVE_DFSROOTINIT','Preemptive'),
-('PREEMPTIVE_DFSROOTSHARECHECK','Preemptive'),
-('PREEMPTIVE_DTC_ABORT','Preemptive'),
-('PREEMPTIVE_DTC_ABORTREQUESTDONE','Preemptive'),
-('PREEMPTIVE_DTC_BEGINTRANSACTION','Preemptive'),
-('PREEMPTIVE_DTC_COMMITREQUESTDONE','Preemptive'),
-('PREEMPTIVE_DTC_ENLIST','Preemptive'),
-('PREEMPTIVE_DTC_PREPAREREQUESTDONE','Preemptive'),
-('PREEMPTIVE_FILESIZEGET','Preemptive'),
-('PREEMPTIVE_FSAOLEDB_ABORTTRANSACTION','Preemptive'),
-('PREEMPTIVE_FSAOLEDB_COMMITTRANSACTION','Preemptive'),
-('PREEMPTIVE_FSAOLEDB_STARTTRANSACTION','Preemptive'),
-('PREEMPTIVE_FSRECOVER_UNCONDITIONALUNDO','Preemptive'),
-('PREEMPTIVE_GETRMINFO','Preemptive'),
-('PREEMPTIVE_HADR_LEASE_MECHANISM','Preemptive'),
-('PREEMPTIVE_HTTP_EVENT_WAIT','Preemptive'),
-('PREEMPTIVE_HTTP_REQUEST','Preemptive'),
-('PREEMPTIVE_LOCKMONITOR','Preemptive'),
-('PREEMPTIVE_MSS_RELEASE','Preemptive'),
-('PREEMPTIVE_ODBCOPS','Preemptive'),
-('PREEMPTIVE_OLE_UNINIT','Preemptive'),
-('PREEMPTIVE_OLEDB_ABORTORCOMMITTRAN','Preemptive'),
-('PREEMPTIVE_OLEDB_ABORTTRAN','Preemptive'),
-('PREEMPTIVE_OLEDB_GETDATASOURCE','Preemptive'),
-('PREEMPTIVE_OLEDB_GETLITERALINFO','Preemptive'),
-('PREEMPTIVE_OLEDB_GETPROPERTIES','Preemptive'),
-('PREEMPTIVE_OLEDB_GETPROPERTYINFO','Preemptive'),
-('PREEMPTIVE_OLEDB_GETSCHEMALOCK','Preemptive'),
-('PREEMPTIVE_OLEDB_JOINTRANSACTION','Preemptive'),
-('PREEMPTIVE_OLEDB_RELEASE','Preemptive'),
-('PREEMPTIVE_OLEDB_SETPROPERTIES','Preemptive'),
-('PREEMPTIVE_OLEDBOPS','Preemptive'),
-('PREEMPTIVE_OS_ACCEPTSECURITYCONTEXT','Preemptive'),
-('PREEMPTIVE_OS_ACQUIRECREDENTIALSHANDLE','Preemptive'),
-('PREEMPTIVE_OS_AUTHENTICATIONOPS','Preemptive'),
-('PREEMPTIVE_OS_AUTHORIZATIONOPS','Preemptive'),
-('PREEMPTIVE_OS_AUTHZGETINFORMATIONFROMCONTEXT','Preemptive'),
-('PREEMPTIVE_OS_AUTHZINITIALIZECONTEXTFROMSID','Preemptive'),
-('PREEMPTIVE_OS_AUTHZINITIALIZERESOURCEMANAGER','Preemptive'),
-('PREEMPTIVE_OS_BACKUPREAD','Preemptive'),
-('PREEMPTIVE_OS_CLOSEHANDLE','Preemptive'),
-('PREEMPTIVE_OS_CLUSTEROPS','Preemptive'),
-('PREEMPTIVE_OS_COMOPS','Preemptive'),
-('PREEMPTIVE_OS_COMPLETEAUTHTOKEN','Preemptive'),
-('PREEMPTIVE_OS_COPYFILE','Preemptive'),
-('PREEMPTIVE_OS_CREATEDIRECTORY','Preemptive'),
-('PREEMPTIVE_OS_CREATEFILE','Preemptive'),
-('PREEMPTIVE_OS_CRYPTACQUIRECONTEXT','Preemptive'),
-('PREEMPTIVE_OS_CRYPTIMPORTKEY','Preemptive'),
-('PREEMPTIVE_OS_CRYPTOPS','Preemptive'),
-('PREEMPTIVE_OS_DECRYPTMESSAGE','Preemptive'),
-('PREEMPTIVE_OS_DELETEFILE','Preemptive'),
-('PREEMPTIVE_OS_DELETESECURITYCONTEXT','Preemptive'),
-('PREEMPTIVE_OS_DEVICEIOCONTROL','Preemptive'),
-('PREEMPTIVE_OS_DEVICEOPS','Preemptive'),
-('PREEMPTIVE_OS_DIRSVC_NETWORKOPS','Preemptive'),
-('PREEMPTIVE_OS_DISCONNECTNAMEDPIPE','Preemptive'),
-('PREEMPTIVE_OS_DOMAINSERVICESOPS','Preemptive'),
-('PREEMPTIVE_OS_DSGETDCNAME','Preemptive'),
-('PREEMPTIVE_OS_DTCOPS','Preemptive'),
-('PREEMPTIVE_OS_ENCRYPTMESSAGE','Preemptive'),
-('PREEMPTIVE_OS_FILEOPS','Preemptive'),
-('PREEMPTIVE_OS_FINDFILE','Preemptive'),
-('PREEMPTIVE_OS_FLUSHFILEBUFFERS','Preemptive'),
-('PREEMPTIVE_OS_FORMATMESSAGE','Preemptive'),
-('PREEMPTIVE_OS_FREECREDENTIALSHANDLE','Preemptive'),
-('PREEMPTIVE_OS_FREELIBRARY','Preemptive'),
-('PREEMPTIVE_OS_GENERICOPS','Preemptive'),
-('PREEMPTIVE_OS_GETADDRINFO','Preemptive'),
-('PREEMPTIVE_OS_GETCOMPRESSEDFILESIZE','Preemptive'),
-('PREEMPTIVE_OS_GETDISKFREESPACE','Preemptive'),
-('PREEMPTIVE_OS_GETFILEATTRIBUTES','Preemptive'),
-('PREEMPTIVE_OS_GETFILESIZE','Preemptive'),
-('PREEMPTIVE_OS_GETFINALFILEPATHBYHANDLE','Preemptive'),
-('PREEMPTIVE_OS_GETLONGPATHNAME','Preemptive'),
-('PREEMPTIVE_OS_GETPROCADDRESS','Preemptive'),
-('PREEMPTIVE_OS_GETVOLUMENAMEFORVOLUMEMOUNTPOINT','Preemptive'),
-('PREEMPTIVE_OS_GETVOLUMEPATHNAME','Preemptive'),
-('PREEMPTIVE_OS_INITIALIZESECURITYCONTEXT','Preemptive'),
-('PREEMPTIVE_OS_LIBRARYOPS','Preemptive'),
-('PREEMPTIVE_OS_LOADLIBRARY','Preemptive'),
-('PREEMPTIVE_OS_LOGONUSER','Preemptive'),
-('PREEMPTIVE_OS_LOOKUPACCOUNTSID','Preemptive'),
-('PREEMPTIVE_OS_MESSAGEQUEUEOPS','Preemptive'),
-('PREEMPTIVE_OS_MOVEFILE','Preemptive'),
-('PREEMPTIVE_OS_NETGROUPGETUSERS','Preemptive'),
-('PREEMPTIVE_OS_NETLOCALGROUPGETMEMBERS','Preemptive'),
-('PREEMPTIVE_OS_NETUSERGETGROUPS','Preemptive'),
-('PREEMPTIVE_OS_NETUSERGETLOCALGROUPS','Preemptive'),
-('PREEMPTIVE_OS_NETUSERMODALSGET','Preemptive'),
-('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICY','Preemptive'),
-('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICYFREE','Preemptive'),
-('PREEMPTIVE_OS_OPENDIRECTORY','Preemptive'),
-('PREEMPTIVE_OS_PDH_WMI_INIT','Preemptive'),
-('PREEMPTIVE_OS_PIPEOPS','Preemptive'),
-('PREEMPTIVE_OS_PROCESSOPS','Preemptive'),
-('PREEMPTIVE_OS_QUERYCONTEXTATTRIBUTES','Preemptive'),
-('PREEMPTIVE_OS_QUERYREGISTRY','Preemptive'),
-('PREEMPTIVE_OS_QUERYSECURITYCONTEXTTOKEN','Preemptive'),
-('PREEMPTIVE_OS_REMOVEDIRECTORY','Preemptive'),
-('PREEMPTIVE_OS_REPORTEVENT','Preemptive'),
-('PREEMPTIVE_OS_REVERTTOSELF','Preemptive'),
-('PREEMPTIVE_OS_RSFXDEVICEOPS','Preemptive'),
-('PREEMPTIVE_OS_SECURITYOPS','Preemptive'),
-('PREEMPTIVE_OS_SERVICEOPS','Preemptive'),
-('PREEMPTIVE_OS_SETENDOFFILE','Preemptive'),
-('PREEMPTIVE_OS_SETFILEPOINTER','Preemptive'),
-('PREEMPTIVE_OS_SETFILEVALIDDATA','Preemptive'),
-('PREEMPTIVE_OS_SETNAMEDSECURITYINFO','Preemptive'),
-('PREEMPTIVE_OS_SQLCLROPS','Preemptive'),
-('PREEMPTIVE_OS_SQMLAUNCH','Preemptive'),
-('PREEMPTIVE_OS_VERIFYSIGNATURE','Preemptive'),
-('PREEMPTIVE_OS_VERIFYTRUST','Preemptive'),
-('PREEMPTIVE_OS_VSSOPS','Preemptive'),
-('PREEMPTIVE_OS_WAITFORSINGLEOBJECT','Preemptive'),
-('PREEMPTIVE_OS_WINSOCKOPS','Preemptive'),
-('PREEMPTIVE_OS_WRITEFILE','Preemptive'),
-('PREEMPTIVE_OS_WRITEFILEGATHER','Preemptive'),
-('PREEMPTIVE_OS_WSASETLASTERROR','Preemptive'),
-('PREEMPTIVE_REENLIST','Preemptive'),
-('PREEMPTIVE_RESIZELOG','Preemptive'),
-('PREEMPTIVE_ROLLFORWARDREDO','Preemptive'),
-('PREEMPTIVE_ROLLFORWARDUNDO','Preemptive'),
-('PREEMPTIVE_SB_STOPENDPOINT','Preemptive'),
-('PREEMPTIVE_SERVER_STARTUP','Preemptive'),
-('PREEMPTIVE_SETRMINFO','Preemptive'),
-('PREEMPTIVE_SHAREDMEM_GETDATA','Preemptive'),
-('PREEMPTIVE_SNIOPEN','Preemptive'),
-('PREEMPTIVE_SOSHOST','Preemptive'),
-('PREEMPTIVE_SOSTESTING','Preemptive'),
-('PREEMPTIVE_SP_SERVER_DIAGNOSTICS','Preemptive'),
-('PREEMPTIVE_STARTRM','Preemptive'),
-('PREEMPTIVE_STREAMFCB_CHECKPOINT','Preemptive'),
-('PREEMPTIVE_STREAMFCB_RECOVER','Preemptive'),
-('PREEMPTIVE_STRESSDRIVER','Preemptive'),
-('PREEMPTIVE_TESTING','Preemptive'),
-('PREEMPTIVE_TRANSIMPORT','Preemptive'),
-('PREEMPTIVE_UNMARSHALPROPAGATIONTOKEN','Preemptive'),
-('PREEMPTIVE_VSS_CREATESNAPSHOT','Preemptive'),
-('PREEMPTIVE_VSS_CREATEVOLUMESNAPSHOT','Preemptive'),
-('PREEMPTIVE_XE_CALLBACKEXECUTE','Preemptive'),
-('PREEMPTIVE_XE_CX_FILE_OPEN','Preemptive'),
-('PREEMPTIVE_XE_CX_HTTP_CALL','Preemptive'),
-('PREEMPTIVE_XE_DISPATCHER','Preemptive'),
-('PREEMPTIVE_XE_ENGINEINIT','Preemptive'),
-('PREEMPTIVE_XE_GETTARGETSTATE','Preemptive'),
-('PREEMPTIVE_XE_SESSIONCOMMIT','Preemptive'),
-('PREEMPTIVE_XE_TARGETFINALIZE','Preemptive'),
-('PREEMPTIVE_XE_TARGETINIT','Preemptive'),
-('PREEMPTIVE_XE_TIMERRUN','Preemptive'),
-('PREEMPTIVE_XETESTING','Preemptive'),
-('PWAIT_HADR_ACTION_COMPLETED','Replication'),
-('PWAIT_HADR_CHANGE_NOTIFIER_TERMINATION_SYNC','Replication'),
-('PWAIT_HADR_CLUSTER_INTEGRATION','Replication'),
-('PWAIT_HADR_FAILOVER_COMPLETED','Replication'),
-('PWAIT_HADR_JOIN','Replication'),
-('PWAIT_HADR_OFFLINE_COMPLETED','Replication'),
-('PWAIT_HADR_ONLINE_COMPLETED','Replication'),
-('PWAIT_HADR_POST_ONLINE_COMPLETED','Replication'),
-('PWAIT_HADR_SERVER_READY_CONNECTIONS','Replication'),
-('PWAIT_HADR_WORKITEM_COMPLETED','Replication'),
-('PWAIT_HADRSIM','Replication'),
-('PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC','Full Text Search'),
-('QUERY_TRACEOUT','Tracing'),
-('REPL_CACHE_ACCESS','Replication'),
-('REPL_HISTORYCACHE_ACCESS','Replication'),
-('REPL_SCHEMA_ACCESS','Replication'),
-('REPL_TRANFSINFO_ACCESS','Replication'),
-('REPL_TRANHASHTABLE_ACCESS','Replication'),
-('REPL_TRANTEXTINFO_ACCESS','Replication'),
-('REPLICA_WRITES','Replication'),
-('REQUEST_FOR_DEADLOCK_SEARCH','Idle'),
-('RESERVED_MEMORY_ALLOCATION_EXT','Memory'),
-('RESOURCE_SEMAPHORE','Memory'),
-('RESOURCE_SEMAPHORE_QUERY_COMPILE','Compilation'),
-('SLEEP_BPOOL_FLUSH','Idle'),
-('SLEEP_BUFFERPOOL_HELPLW','Idle'),
-('SLEEP_DBSTARTUP','Idle'),
-('SLEEP_DCOMSTARTUP','Idle'),
-('SLEEP_MASTERDBREADY','Idle'),
-('SLEEP_MASTERMDREADY','Idle'),
-('SLEEP_MASTERUPGRADED','Idle'),
-('SLEEP_MEMORYPOOL_ALLOCATEPAGES','Idle'),
-('SLEEP_MSDBSTARTUP','Idle'),
-('SLEEP_RETRY_VIRTUALALLOC','Idle'),
-('SLEEP_SYSTEMTASK','Idle'),
-('SLEEP_TASK','Idle'),
-('SLEEP_TEMPDBSTARTUP','Idle'),
-('SLEEP_WORKSPACE_ALLOCATEPAGE','Idle'),
-('SOS_SCHEDULER_YIELD','CPU'),
-('SQLCLR_APPDOMAIN','SQL CLR'),
-('SQLCLR_ASSEMBLY','SQL CLR'),
-('SQLCLR_DEADLOCK_DETECTION','SQL CLR'),
-('SQLCLR_QUANTUM_PUNISHMENT','SQL CLR'),
-('SQLTRACE_BUFFER_FLUSH','Idle'),
-('SQLTRACE_FILE_BUFFER','Tracing'),
-('SQLTRACE_FILE_READ_IO_COMPLETION','Tracing'),
-('SQLTRACE_FILE_WRITE_IO_COMPLETION','Tracing'),
-('SQLTRACE_INCREMENTAL_FLUSH_SLEEP','Idle'),
-('SQLTRACE_PENDING_BUFFER_WRITERS','Tracing'),
-('SQLTRACE_SHUTDOWN','Tracing'),
-('SQLTRACE_WAIT_ENTRIES','Idle'),
-('THREADPOOL','Worker Thread'),
-('TRACE_EVTNOTIF','Tracing'),
-('TRACEWRITE','Tracing'),
-('TRAN_MARKLATCH_DT','Transaction'),
-('TRAN_MARKLATCH_EX','Transaction'),
-('TRAN_MARKLATCH_KP','Transaction'),
-('TRAN_MARKLATCH_NL','Transaction'),
-('TRAN_MARKLATCH_SH','Transaction'),
-('TRAN_MARKLATCH_UP','Transaction'),
-('TRANSACTION_MUTEX','Transaction'),
-('WAIT_FOR_RESULTS','User Wait'),
-('WAITFOR','User Wait'),
-('WRITE_COMPLETION','Other Disk IO'),
-('WRITELOG','Tran Log IO'),
-('XACT_OWN_TRANSACTION','Transaction'),
-('XACT_RECLAIM_SESSION','Transaction'),
-('XACTLOCKINFO','Transaction'),
-('XACTWORKSPACE_MUTEX','Transaction'),
-('XE_DISPATCHER_WAIT','Idle'),
-('XE_TIMER_EVENT','Idle')) AS wc(wait_type, wait_category)
- ON ws.wait_type = wc.wait_type
-WHERE
-ws.wait_type NOT IN (
- N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP',
- N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE',
- N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE',
- N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE',
- N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE',
- N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX',
- N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT',
- N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE',
- N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE',
- N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE',
- N'PARALLEL_REDO_WORKER_WAIT_WORK',
- N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS',
- N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS',
- N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST',
- N'PREEMPTIVE_OS_DEVICEOPS',
- N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER',
- N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT',
- N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE',
- N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT',
- N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP',
- N'QDS_ASYNC_QUEUE',
- N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH',
- N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP',
- N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY',
- N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK',
- N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP',
- N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
- N'SQLTRACE_WAIT_ENTRIES',
- N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT',
- N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE',
- N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN',
- N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT',
- N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT')
-AND waiting_tasks_count > 0
-AND wait_time_ms > 100
-OPTION (RECOMPILE);
-`
-
-const sqlAzureDB string = `SET DEADLOCK_PRIORITY -10;
-IF OBJECT_ID('sys.dm_db_resource_stats') IS NOT NULL
-BEGIN
- SELECT TOP(1)
- 'sqlserver_azurestats' AS [measurement],
- REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
- avg_cpu_percent,
- avg_data_io_percent,
- avg_log_write_percent,
- avg_memory_usage_percent,
- xtp_storage_percent,
- max_worker_percent,
- max_session_percent,
- dtu_limit,
- avg_login_rate_percent,
- end_time
- FROM
- sys.dm_db_resource_stats WITH (NOLOCK)
- ORDER BY
- end_time DESC
- OPTION (RECOMPILE)
-END
-ELSE
-BEGIN
- RAISERROR('This does not seem to be an AzureDB instance. Set "azureDB = false" in your telegraf configuration.',16,1)
-END`
-
-// Queries V1
-const sqlPerformanceMetrics string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET ARITHABORT ON;
-SET QUOTED_IDENTIFIER ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
-
-DECLARE @PCounters TABLE
-(
- counter_name nvarchar(64),
- cntr_value bigint,
- Primary Key(counter_name)
-);
-
-INSERT @PCounters (counter_name, cntr_value)
-SELECT 'Point In Time Recovery', Value = CASE
- WHEN 1 > 1.0 * COUNT(*) / NULLIF((SELECT COUNT(*) FROM sys.databases d WHERE database_id > 4), 0)
- THEN 0 ELSE 1 END
-FROM sys.databases d
-WHERE database_id > 4
- AND recovery_model IN (1)
-UNION ALL
-SELECT 'Page File Usage (%)', CAST(100 * (1 - available_page_file_kb * 1. / total_page_file_kb) as decimal(9,2)) as [PageFileUsage (%)]
-FROM sys.dm_os_sys_memory
-UNION ALL
-SELECT 'Connection memory per connection (bytes)', Ratio = CAST((cntr_value / (SELECT 1.0 * cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'User Connections')) * 1024 as int)
-FROM sys.dm_os_performance_counters
-WHERE counter_name = 'Connection Memory (KB)'
-UNION ALL
-SELECT 'Available physical memory (bytes)', available_physical_memory_kb * 1024
-FROM sys.dm_os_sys_memory
-UNION ALL
-SELECT 'Signal wait (%)', SignalWaitPercent = CAST(100.0 * SUM(signal_wait_time_ms) / SUM (wait_time_ms) AS NUMERIC(20,2))
-FROM sys.dm_os_wait_stats
-UNION ALL
-SELECT 'Sql compilation per batch request', SqlCompilationPercent = 100.0 * cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec')
-FROM sys.dm_os_performance_counters
-WHERE counter_name = 'SQL Compilations/sec'
-UNION ALL
-SELECT 'Sql recompilation per batch request', SqlReCompilationPercent = 100.0 *cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec')
-FROM sys.dm_os_performance_counters
-WHERE counter_name = 'SQL Re-Compilations/sec'
-UNION ALL
-SELECT 'Page lookup per batch request',PageLookupPercent = 100.0 * cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec')
-FROM sys.dm_os_performance_counters
-WHERE counter_name = 'Page lookups/sec'
-UNION ALL
-SELECT 'Page split per batch request',PageSplitPercent = 100.0 * cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec')
-FROM sys.dm_os_performance_counters
-WHERE counter_name = 'Page splits/sec'
-UNION ALL
-SELECT 'Average tasks', AverageTaskCount = (SELECT AVG(current_tasks_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
-UNION ALL
-SELECT 'Average runnable tasks', AverageRunnableTaskCount = (SELECT AVG(runnable_tasks_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
-UNION ALL
-SELECT 'Average pending disk IO', AveragePendingDiskIOCount = (SELECT AVG(pending_disk_io_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
-UNION ALL
-SELECT 'Buffer pool rate (bytes/sec)', BufferPoolRate = (1.0*cntr_value * 8 * 1024) /
- (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' AND counter_name = 'Page life expectancy')
-FROM sys.dm_os_performance_counters
-WHERE object_name like '%Buffer Manager%'
-AND counter_name = 'Database pages'
-UNION ALL
-SELECT 'Memory grant pending', MemoryGrantPending = cntr_value
-FROM sys.dm_os_performance_counters
-WHERE counter_name = 'Memory Grants Pending'
-UNION ALL
-SELECT 'Readahead per page read', Readahead = 100.0 *cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Page Reads/sec')
-FROM sys.dm_os_performance_counters
-WHERE counter_name = 'Readahead pages/sec'
-UNION ALL
-SELECT 'Total target memory ratio', TotalTargetMemoryRatio = 100.0 * cntr_value / (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE counter_name = 'Target Server Memory (KB)')
-FROM sys.dm_os_performance_counters
-WHERE counter_name = 'Total Server Memory (KB)'
-
-IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters;
-SELECT * INTO #PCounters FROM @PCounters
-
-DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
-DECLARE @ColumnName AS NVARCHAR(MAX)
-SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(counter_name)
-FROM (SELECT DISTINCT counter_name FROM @PCounters) AS bl
-
-SET @DynamicPivotQuery = N'
-SELECT measurement = ''Performance metrics'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Performance metrics''
-, ' + @ColumnName + ' FROM
-(
-SELECT counter_name, cntr_value
-FROM #PCounters
-) as V
-PIVOT(SUM(cntr_value) FOR counter_name IN (' + @ColumnName + ')) AS PVTTable
-'
-EXEC sp_executesql @DynamicPivotQuery;
-`
-
-const sqlMemoryClerk string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
-
-DECLARE @sqlVers numeric(4,2)
-SELECT @sqlVers = LEFT(CAST(SERVERPROPERTY('productversion') as varchar), 4)
-
-IF OBJECT_ID('tempdb..#clerk') IS NOT NULL
- DROP TABLE #clerk;
-
-CREATE TABLE #clerk (
- ClerkCategory nvarchar(64) NOT NULL,
- UsedPercent decimal(9,2),
- UsedBytes bigint
-);
-
-DECLARE @DynamicClerkQuery AS NVARCHAR(MAX)
-
-IF @sqlVers < 11
-BEGIN
- SET @DynamicClerkQuery = N'
- INSERT #clerk (ClerkCategory, UsedPercent, UsedBytes)
- SELECT ClerkCategory
- , UsedPercent = SUM(UsedPercent)
- , UsedBytes = SUM(UsedBytes)
- FROM
- (
- SELECT ClerkCategory = CASE MC.[type]
- WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool''
- WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)''
- WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)''
- ELSE ''Other'' END
- , SUM((single_pages_kb + multi_pages_kb) * 1024) AS UsedBytes
- , Cast(100 * Sum((single_pages_kb + multi_pages_kb))*1.0/(Select Sum((single_pages_kb + multi_pages_kb)) From sys.dm_os_memory_clerks) as Decimal(7, 4)) UsedPercent
- FROM sys.dm_os_memory_clerks MC
- WHERE (single_pages_kb + multi_pages_kb) > 0
- GROUP BY CASE MC.[type]
- WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool''
- WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)''
- WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)''
- ELSE ''Other'' END
- ) as T
- GROUP BY ClerkCategory;
- '
-END
-ELSE
-BEGIN
- SET @DynamicClerkQuery = N'
- INSERT #clerk (ClerkCategory, UsedPercent, UsedBytes)
- SELECT ClerkCategory
- , UsedPercent = SUM(UsedPercent)
- , UsedBytes = SUM(UsedBytes)
- FROM
- (
- SELECT ClerkCategory = CASE MC.[type]
- WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool''
- WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)''
- WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)''
- ELSE ''Other'' END
- , SUM(pages_kb * 1024) AS UsedBytes
- , Cast(100 * Sum(pages_kb)*1.0/(Select Sum(pages_kb) From sys.dm_os_memory_clerks) as Decimal(7, 4)) UsedPercent
- FROM sys.dm_os_memory_clerks MC
- WHERE pages_kb > 0
- GROUP BY CASE MC.[type]
- WHEN ''MEMORYCLERK_SQLBUFFERPOOL'' THEN ''Buffer pool''
- WHEN ''CACHESTORE_SQLCP'' THEN ''Cache (sql plans)''
- WHEN ''CACHESTORE_OBJCP'' THEN ''Cache (objects)''
- ELSE ''Other'' END
- ) as T
- GROUP BY ClerkCategory;
- '
-END
-EXEC sp_executesql @DynamicClerkQuery;
-SELECT
--- measurement
-measurement
--- tags
-, servername= REPLACE(@@SERVERNAME, '\', ':')
-, type = 'Memory clerk'
--- value
-, [Buffer pool]
-, [Cache (objects)]
-, [Cache (sql plans)]
-, [Other]
-FROM
-(
-SELECT measurement = 'Memory breakdown (%)'
-, [Buffer pool] = ISNULL(ROUND([Buffer Pool], 1), 0)
-, [Cache (objects)] = ISNULL(ROUND([Cache (objects)], 1), 0)
-, [Cache (sql plans)] = ISNULL(ROUND([Cache (sql plans)], 1), 0)
-, [Other] = ISNULL(ROUND([Other], 1), 0)
-FROM (SELECT ClerkCategory, UsedPercent FROM #clerk) as G1
-PIVOT
-(
- SUM(UsedPercent)
- FOR ClerkCategory IN ([Buffer Pool], [Cache (objects)], [Cache (sql plans)], [Other])
-) AS PivotTable
-
-UNION ALL
-
-SELECT measurement = 'Memory breakdown (bytes)'
-, [Buffer pool] = ISNULL(ROUND([Buffer Pool], 1), 0)
-, [Cache (objects)] = ISNULL(ROUND([Cache (objects)], 1), 0)
-, [Cache (sql plans)] = ISNULL(ROUND([Cache (sql plans)], 1), 0)
-, [Other] = ISNULL(ROUND([Other], 1), 0)
-FROM (SELECT ClerkCategory, UsedBytes FROM #clerk) as G2
-PIVOT
-(
- SUM(UsedBytes)
- FOR ClerkCategory IN ([Buffer Pool], [Cache (objects)], [Cache (sql plans)], [Other])
-) AS PivotTable
-) as T;
-`
-
-const sqlDatabaseSize string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
-
-IF OBJECT_ID('tempdb..#baseline') IS NOT NULL
- DROP TABLE #baseline;
-SELECT
- DB_NAME(mf.database_id) AS database_name ,
- CAST(mf.size AS BIGINT) as database_size_8k_pages,
- CAST(mf.max_size AS BIGINT) as database_max_size_8k_pages,
- size_on_disk_bytes ,
- type_desc as datafile_type,
- GETDATE() AS baselineDate
-INTO #baseline
-FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs
-INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id
- AND mf.file_id = divfs.file_id
-
-DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
-DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX)
-
-SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(database_name)
-FROM (SELECT DISTINCT database_name FROM #baseline) AS bl
-
---Prepare the PIVOT query using the dynamic
-SET @DynamicPivotQuery = N'
-SELECT measurement = ''Log size (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
-, ' + @ColumnName + ' FROM
-(
-SELECT database_name, size_on_disk_bytes
-FROM #baseline
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(size_on_disk_bytes) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Rows size (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
-, ' + @ColumnName + ' FROM
-(
-SELECT database_name, size_on_disk_bytes
-FROM #baseline
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(size_on_disk_bytes) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Rows size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
-, ' + @ColumnName + ' FROM
-(
-SELECT database_name, database_size_8k_pages
-FROM #baseline
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(database_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Log size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
-, ' + @ColumnName + ' FROM
-(
-SELECT database_name, database_size_8k_pages
-FROM #baseline
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(database_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Rows max size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
-, ' + @ColumnName + ' FROM
-(
-SELECT database_name, database_max_size_8k_pages
-FROM #baseline
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Logs max size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
-, ' + @ColumnName + ' FROM
-(
-SELECT database_name, database_max_size_8k_pages
-FROM #baseline
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-'
---PRINT @DynamicPivotQuery
-EXEC sp_executesql @DynamicPivotQuery;
-`
-
-const sqlDatabaseStats string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
-
-IF OBJECT_ID('tempdb..#baseline') IS NOT NULL
- DROP TABLE #baseline;
-
-SELECT
-[ReadLatency] =
- CASE WHEN [num_of_reads] = 0
- THEN 0 ELSE ([io_stall_read_ms] / [num_of_reads]) END,
-[WriteLatency] =
- CASE WHEN [num_of_writes] = 0
- THEN 0 ELSE ([io_stall_write_ms] / [num_of_writes]) END,
-[Latency] =
- CASE WHEN ([num_of_reads] = 0 AND [num_of_writes] = 0)
- THEN 0 ELSE ([io_stall] / ([num_of_reads] + [num_of_writes])) END,
-[AvgBytesPerRead] =
- CASE WHEN [num_of_reads] = 0
- THEN 0 ELSE ([num_of_bytes_read] / [num_of_reads]) END,
-[AvgBytesPerWrite] =
- CASE WHEN [num_of_writes] = 0
- THEN 0 ELSE ([num_of_bytes_written] / [num_of_writes]) END,
-[AvgBytesPerTransfer] =
- CASE WHEN ([num_of_reads] = 0 AND [num_of_writes] = 0)
- THEN 0 ELSE
- (([num_of_bytes_read] + [num_of_bytes_written]) /
- ([num_of_reads] + [num_of_writes])) END,
-DB_NAME ([vfs].[database_id]) AS DatabaseName,
-[mf].type_desc as datafile_type
-INTO #baseline
-FROM sys.dm_io_virtual_file_stats (NULL,NULL) AS [vfs]
-JOIN sys.master_files AS [mf] ON [vfs].[database_id] = [mf].[database_id]
- AND [vfs].[file_id] = [mf].[file_id]
-
-
-
-DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
-DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX)
-
-SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(DatabaseName)
-FROM (SELECT DISTINCT DatabaseName FROM #baseline) AS bl
-
---Prepare the PIVOT query using the dynamic
-SET @DynamicPivotQuery = N'
-SELECT measurement = ''Log read latency (ms)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
-, ' + @ColumnName + ' FROM
-(
-SELECT DatabaseName, ReadLatency
-FROM #baseline
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(MAX(ReadLatency) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Log write latency (ms)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
-, ' + @ColumnName + ' FROM
-(
-SELECT DatabaseName, WriteLatency
-FROM #baseline
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(MAX(WriteLatency) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Rows read latency (ms)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
-, ' + @ColumnName + ' FROM
-(
-SELECT DatabaseName, ReadLatency
-FROM #baseline
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(MAX(ReadLatency) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Rows write latency (ms)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
-, ' + @ColumnName + ' FROM
-(
-SELECT DatabaseName, WriteLatency
-FROM #baseline
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(MAX(WriteLatency) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Rows (average bytes/read)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
-, ' + @ColumnName + ' FROM
-(
-SELECT DatabaseName, AvgBytesPerRead
-FROM #baseline
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(AvgBytesPerRead) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Rows (average bytes/write)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
-, ' + @ColumnName + ' FROM
-(
-SELECT DatabaseName, AvgBytesPerWrite
-FROM #baseline
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(AvgBytesPerWrite) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Log (average bytes/read)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
-, ' + @ColumnName + ' FROM
-(
-SELECT DatabaseName, AvgBytesPerRead
-FROM #baseline
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(AvgBytesPerRead) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Log (average bytes/write)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database stats''
-, ' + @ColumnName + ' FROM
-(
-SELECT DatabaseName, AvgBytesPerWrite
-FROM #baseline
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(AvgBytesPerWrite) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-'
---PRINT @DynamicPivotQuery
-EXEC sp_executesql @DynamicPivotQuery;
-`
-
-const sqlDatabaseIO string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
-DECLARE @secondsBetween tinyint = 5;
-DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108);
-IF OBJECT_ID('tempdb..#baseline') IS NOT NULL
- DROP TABLE #baseline;
-IF OBJECT_ID('tempdb..#baselinewritten') IS NOT NULL
- DROP TABLE #baselinewritten;
-SELECT DB_NAME(mf.database_id) AS databaseName ,
- mf.physical_name,
- divfs.num_of_bytes_read,
- divfs.num_of_bytes_written,
- divfs.num_of_reads,
- divfs.num_of_writes,
- GETDATE() AS baselinedate
-INTO #baseline
-FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs
-INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id
- AND mf.file_id = divfs.file_id
-WAITFOR DELAY @delayInterval;
-;WITH currentLine AS
-(
- SELECT DB_NAME(mf.database_id) AS databaseName ,
- type_desc,
- mf.physical_name,
- divfs.num_of_bytes_read,
- divfs.num_of_bytes_written,
- divfs.num_of_reads,
- divfs.num_of_writes,
- GETDATE() AS currentlinedate
- FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs
- INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id
- AND mf.file_id = divfs.file_id
-)
-SELECT database_name
-, datafile_type
-, num_of_bytes_read_persec = SUM(num_of_bytes_read_persec)
-, num_of_bytes_written_persec = SUM(num_of_bytes_written_persec)
-, num_of_reads_persec = SUM(num_of_reads_persec)
-, num_of_writes_persec = SUM(num_of_writes_persec)
-INTO #baselinewritten
-FROM
-(
-SELECT
- database_name = currentLine.databaseName
-, datafile_type = type_desc
-, num_of_bytes_read_persec = (currentLine.num_of_bytes_read - T1.num_of_bytes_read) / (DATEDIFF(SECOND,baselinedate,currentlinedate))
-, num_of_bytes_written_persec = (currentLine.num_of_bytes_written - T1.num_of_bytes_written) / (DATEDIFF(SECOND,baselinedate,currentlinedate))
-, num_of_reads_persec = (currentLine.num_of_reads - T1.num_of_reads) / (DATEDIFF(SECOND,baselinedate,currentlinedate))
-, num_of_writes_persec = (currentLine.num_of_writes - T1.num_of_writes) / (DATEDIFF(SECOND,baselinedate,currentlinedate))
-FROM currentLine
-INNER JOIN #baseline T1 ON T1.databaseName = currentLine.databaseName
- AND T1.physical_name = currentLine.physical_name
-) as T
-GROUP BY database_name, datafile_type
-DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
-DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX)
-SELECT @ColumnName = ISNULL(@ColumnName + ',','') + QUOTENAME(database_name)
- FROM (SELECT DISTINCT database_name FROM #baselinewritten) AS bl
-SELECT @ColumnName2 = ISNULL(@ColumnName2 + '+','') + QUOTENAME(database_name)
- FROM (SELECT DISTINCT database_name FROM #baselinewritten) AS bl
-SET @DynamicPivotQuery = N'
-SELECT measurement = ''Log writes (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
-, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
-(
-SELECT database_name, num_of_bytes_written_persec
-FROM #baselinewritten
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(num_of_bytes_written_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-UNION ALL
-SELECT measurement = ''Rows writes (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
-, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
-(
-SELECT database_name, num_of_bytes_written_persec
-FROM #baselinewritten
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(num_of_bytes_written_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-UNION ALL
-SELECT measurement = ''Log reads (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
-, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
-(
-SELECT database_name, num_of_bytes_read_persec
-FROM #baselinewritten
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(num_of_bytes_read_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-UNION ALL
-SELECT measurement = ''Rows reads (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
-, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
-(
-SELECT database_name, num_of_bytes_read_persec
-FROM #baselinewritten
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(num_of_bytes_read_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-UNION ALL
-SELECT measurement = ''Log (writes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
-, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
-(
-SELECT database_name, num_of_writes_persec
-FROM #baselinewritten
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-UNION ALL
-SELECT measurement = ''Rows (writes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
-, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
-(
-SELECT database_name, num_of_writes_persec
-FROM #baselinewritten
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTabl
-UNION ALL
-SELECT measurement = ''Log (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
-, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
-(
-SELECT database_name, num_of_reads_persec
-FROM #baselinewritten
-WHERE datafile_type = ''LOG''
-) as V
-PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-UNION ALL
-SELECT measurement = ''Rows (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO''
-, ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM
-(
-SELECT database_name, num_of_reads_persec
-FROM #baselinewritten
-WHERE datafile_type = ''ROWS''
-) as V
-PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
-'
-EXEC sp_executesql @DynamicPivotQuery;
-`
-
-const sqlDatabaseProperties string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET ARITHABORT ON;
-SET QUOTED_IDENTIFIER ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
-
-IF OBJECT_ID('tempdb..#Databases') IS NOT NULL
- DROP TABLE #Databases;
-CREATE TABLE #Databases
-(
- Measurement nvarchar(64) NOT NULL,
- DatabaseName nvarchar(128) NOT NULL,
- Value tinyint NOT NULL
- Primary Key(DatabaseName, Measurement)
-);
-
-INSERT #Databases ( Measurement, DatabaseName, Value)
-SELECT
- Measurement = 'Recovery Model FULL'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.recovery_model = 1 THEN 1 ELSE 0 END
-FROM sys.databases d
-UNION ALL
-SELECT
- Measurement = 'Recovery Model BULK_LOGGED'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.recovery_model = 2 THEN 1 ELSE 0 END
-FROM sys.databases d
-UNION ALL
-SELECT
- Measurement = 'Recovery Model SIMPLE'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.recovery_model = 3 THEN 1 ELSE 0 END
-FROM sys.databases d
-
-UNION ALL
-SELECT
- Measurement = 'State ONLINE'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.state = 0 THEN 1 ELSE 0 END
-FROM sys.databases d
-UNION ALL
-SELECT
- Measurement = 'State RESTORING'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.state = 1 THEN 1 ELSE 0 END
-FROM sys.databases d
-UNION ALL
-SELECT
- Measurement = 'State RECOVERING'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.state = 2 THEN 1 ELSE 0 END
-FROM sys.databases d
-UNION ALL
-SELECT
- Measurement = 'State RECOVERY_PENDING'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.state = 3 THEN 1 ELSE 0 END
-FROM sys.databases d
-UNION ALL
-SELECT
- Measurement = 'State SUSPECT'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.state = 4 THEN 1 ELSE 0 END
-FROM sys.databases d
-UNION ALL
-SELECT
- Measurement = 'State EMERGENCY'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.state = 5 THEN 1 ELSE 0 END
-FROM sys.databases d
-UNION ALL
-SELECT
- Measurement = 'State OFFLINE'
-, DatabaseName = d.Name
-, Value = CASE WHEN d.state = 6 THEN 1 ELSE 0 END
-FROM sys.databases d
-
-DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
-DECLARE @ColumnName AS NVARCHAR(MAX)
-SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(DatabaseName)
-FROM (SELECT DISTINCT DatabaseName FROM #Databases) AS bl
-
-SET @DynamicPivotQuery = N'
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''Recovery Model FULL''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''Recovery Model BULK_LOGGED''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''Recovery Model SIMPLE''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''State ONLINE''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''State RESTORING''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''State RECOVERING''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''State RECOVERY_PENDING''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''State SUSPECT''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''State EMERGENCY''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = Measurement, servername = REPLACE(@@SERVERNAME, ''\'', '':'')
-, type = ''Database properties''
-, ' + @ColumnName + ', Total FROM
-(
-SELECT Measurement, DatabaseName, Value
-, Total = (SELECT SUM(Value) FROM #Databases WHERE Measurement = d.Measurement)
-FROM #Databases d
-WHERE d.Measurement = ''State OFFLINE''
-) as V
-PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable
-'
-EXEC sp_executesql @DynamicPivotQuery;
-`
-
-const sqlCPUHistory string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET ARITHABORT ON;
-SET QUOTED_IDENTIFIER ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
-
-DECLARE @ms_ticks bigint;
-SET @ms_ticks = (Select ms_ticks From sys.dm_os_sys_info);
-DECLARE @maxEvents int = 1
-
-SELECT
----- measurement
- measurement = 'CPU (%)'
----- tags
-, servername= REPLACE(@@SERVERNAME, '\', ':')
-, type = 'CPU usage'
--- value
-, [SQL process] = ProcessUtilization
-, [External process]= 100 - SystemIdle - ProcessUtilization
-, [SystemIdle]
-FROM
-(
-SELECT TOP (@maxEvents)
- EventTime = CAST(DateAdd(ms, -1 * (@ms_ticks - timestamp_ms), GetUTCDate()) as datetime)
-, ProcessUtilization = CAST(ProcessUtilization as int)
-, SystemIdle = CAST(SystemIdle as int)
-FROM (SELECT Record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') as SystemIdle,
- Record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') as ProcessUtilization,
- timestamp as timestamp_ms
-FROM (SELECT timestamp, convert(xml, record) As Record
- FROM sys.dm_os_ring_buffers
- WHERE ring_buffer_type = N'RING_BUFFER_SCHEDULER_MONITOR'
- And record Like '%%') x) y
-ORDER BY timestamp_ms Desc
-) as T;
-`
-
-const sqlPerformanceCounters string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
-IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters
-CREATE TABLE #PCounters
-(
- object_name nvarchar(128),
- counter_name nvarchar(128),
- instance_name nvarchar(128),
- cntr_value bigint,
- cntr_type INT,
- Primary Key(object_name, counter_name, instance_name)
-);
-INSERT #PCounters
-SELECT DISTINCT RTrim(spi.object_name) object_name
-, RTrim(spi.counter_name) counter_name
-, RTrim(spi.instance_name) instance_name
-, spi.cntr_value
-, spi.cntr_type
-FROM sys.dm_os_performance_counters spi
-WHERE spi.object_name NOT LIKE 'SQLServer:Backup Device%'
- AND NOT EXISTS (SELECT 1 FROM sys.databases WHERE Name = spi.instance_name);
-
-WAITFOR DELAY '00:00:01';
-
-IF OBJECT_ID('tempdb..#CCounters') IS NOT NULL DROP TABLE #CCounters
-CREATE TABLE #CCounters
-(
- object_name nvarchar(128),
- counter_name nvarchar(128),
- instance_name nvarchar(128),
- cntr_value bigint,
- cntr_type INT,
- Primary Key(object_name, counter_name, instance_name)
-);
-INSERT #CCounters
-SELECT DISTINCT RTrim(spi.object_name) object_name
-, RTrim(spi.counter_name) counter_name
-, RTrim(spi.instance_name) instance_name
-, spi.cntr_value
-, spi.cntr_type
-FROM sys.dm_os_performance_counters spi
-WHERE spi.object_name NOT LIKE 'SQLServer:Backup Device%'
- AND NOT EXISTS (SELECT 1 FROM sys.databases WHERE Name = spi.instance_name);
-
-SELECT
- measurement = cc.counter_name
- + CASE WHEN LEN(cc.instance_name) > 0 THEN ' | ' + cc.instance_name ELSE '' END
- + ' | '
- + SUBSTRING( cc.object_name, CHARINDEX(':', cc.object_name) + 1, LEN( cc.object_name) - CHARINDEX(':', cc.object_name))
--- tags
-, servername = REPLACE(@@SERVERNAME, '\', ':')
-, type = 'Performance counters'
---, countertype = CASE cc.cntr_type
--- When 65792 Then 'Count'
--- When 537003264 Then 'Ratio'
--- When 272696576 Then 'Per second'
--- When 1073874176 Then 'Average'
--- When 272696320 Then 'Average Per Second'
--- When 1073939712 Then 'Base'
--- END
--- value
-, value = CAST(CASE cc.cntr_type
- When 65792 Then cc.cntr_value -- Count
- When 537003264 Then IsNull(Cast(cc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value, 0), 0) -- Ratio
- When 272696576 Then cc.cntr_value - pc.cntr_value -- Per Second
- When 1073874176 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg
- When 272696320 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg/sec
- When 1073939712 Then cc.cntr_value - pc.cntr_value -- Base
- Else cc.cntr_value End as bigint)
---, currentvalue= CAST(cc.cntr_value as bigint)
-FROM #CCounters cc
-INNER JOIN #PCounters pc On cc.object_name = pc.object_name
- And cc.counter_name = pc.counter_name
- And cc.instance_name = pc.instance_name
- And cc.cntr_type = pc.cntr_type
-LEFT JOIN #CCounters cbc On cc.object_name = cbc.object_name
- And (Case When cc.counter_name Like '%(ms)' Then Replace(cc.counter_name, ' (ms)',' Base')
- When cc.object_name = 'SQLServer:FileTable' Then Replace(cc.counter_name, 'Avg ','') + ' base'
- When cc.counter_name = 'Worktables From Cache Ratio' Then 'Worktables From Cache Base'
- When cc.counter_name = 'Avg. Length of Batched Writes' Then 'Avg. Length of Batched Writes BS'
- Else cc.counter_name + ' base'
- End) = cbc.counter_name
- And cc.instance_name = cbc.instance_name
- And cc.cntr_type In (537003264, 1073874176)
- And cbc.cntr_type = 1073939712
-LEFT JOIN #PCounters pbc On pc.object_name = pbc.object_name
- And pc.instance_name = pbc.instance_name
- And (Case When pc.counter_name Like '%(ms)' Then Replace(pc.counter_name, ' (ms)',' Base')
- When pc.object_name = 'SQLServer:FileTable' Then Replace(pc.counter_name, 'Avg ','') + ' base'
- When pc.counter_name = 'Worktables From Cache Ratio' Then 'Worktables From Cache Base'
- When pc.counter_name = 'Avg. Length of Batched Writes' Then 'Avg. Length of Batched Writes BS'
- Else pc.counter_name + ' base'
- End) = pbc.counter_name
- And pc.cntr_type In (537003264, 1073874176)
-
-IF OBJECT_ID('tempdb..#CCounters') IS NOT NULL DROP TABLE #CCounters;
-IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters;
-`
-
-const sqlWaitStatsCategorized string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
-DECLARE @secondsBetween tinyint = 5
-DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108);
-
-DECLARE @w1 TABLE
-(
- WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
- WaitTimeInMs bigint NOT NULL,
- WaitTaskCount bigint NOT NULL,
- CollectionDate datetime NOT NULL
-)
-DECLARE @w2 TABLE
-(
- WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
- WaitTimeInMs bigint NOT NULL,
- WaitTaskCount bigint NOT NULL,
- CollectionDate datetime NOT NULL
-)
-DECLARE @w3 TABLE
-(
- WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
-)
-DECLARE @w4 TABLE
-(
- WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
- WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
-)
-DECLARE @w5 TABLE
-(
- WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
- WaitTimeInMs bigint NOT NULL,
- WaitTaskCount bigint NOT NULL
-)
-
-INSERT @w3 (WaitType)
-VALUES (N'QDS_SHUTDOWN_QUEUE'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETION'),
- (N'BROKER_EVENTHANDLER'), (N'BROKER_RECEIVE_WAITFOR'),
- (N'BROKER_TASK_STOP'), (N'BROKER_TO_FLUSH'),
- (N'BROKER_TRANSMITTER'), (N'CHECKPOINT_QUEUE'),
- (N'CHKPT'), (N'CLR_AUTO_EVENT'),
- (N'CLR_MANUAL_EVENT'), (N'CLR_SEMAPHORE'),
- (N'DBMIRROR_DBM_EVENT'), (N'DBMIRROR_EVENTS_QUEUE'),
- (N'DBMIRROR_WORKER_QUEUE'), (N'DBMIRRORING_CMD'),
- (N'DIRTY_PAGE_POLL'), (N'DISPATCHER_QUEUE_SEMAPHORE'),
- (N'EXECSYNC'), (N'FSAGENT'),
- (N'FT_IFTS_SCHEDULER_IDLE_WAIT'), (N'FT_IFTSHC_MUTEX'),
- (N'HADR_CLUSAPI_CALL'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETIO(N'),
- (N'HADR_LOGCAPTURE_WAIT'), (N'HADR_NOTIFICATION_DEQUEUE'),
- (N'HADR_TIMER_TASK'), (N'HADR_WORK_QUEUE'),
- (N'KSOURCE_WAKEUP'), (N'LAZYWRITER_SLEEP'),
- (N'LOGMGR_QUEUE'), (N'ONDEMAND_TASK_QUEUE'),
- (N'PWAIT_ALL_COMPONENTS_INITIALIZED'),
- (N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP'),
- (N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP'),
- (N'REQUEST_FOR_DEADLOCK_SEARCH'), (N'RESOURCE_QUEUE'),
- (N'SERVER_IDLE_CHECK'), (N'SLEEP_BPOOL_FLUSH'),
- (N'SLEEP_DBSTARTUP'), (N'SLEEP_DCOMSTARTUP'),
- (N'SLEEP_MASTERDBREADY'), (N'SLEEP_MASTERMDREADY'),
- (N'SLEEP_MASTERUPGRADED'), (N'SLEEP_MSDBSTARTUP'),
- (N'SLEEP_SYSTEMTASK'), (N'SLEEP_TASK'),
- (N'SLEEP_TEMPDBSTARTUP'), (N'SNI_HTTP_ACCEPT'),
- (N'SP_SERVER_DIAGNOSTICS_SLEEP'), (N'SQLTRACE_BUFFER_FLUSH'),
- (N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP'),
- (N'SQLTRACE_WAIT_ENTRIES'), (N'WAIT_FOR_RESULTS'),
- (N'WAITFOR'), (N'WAITFOR_TASKSHUTDOW(N'),
- (N'WAIT_XTP_HOST_WAIT'), (N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG'),
- (N'WAIT_XTP_CKPT_CLOSE'), (N'XE_DISPATCHER_JOI(N'),
- (N'XE_DISPATCHER_WAIT'), (N'XE_TIMER_EVENT');
-
-INSERT @w4 (WaitType, WaitCategory) VALUES ('ABR', 'OTHER') ,
-('ASSEMBLY_LOAD' , 'OTHER') , ('ASYNC_DISKPOOL_LOCK' , 'I/O') , ('ASYNC_IO_COMPLETION' , 'I/O') ,
-('ASYNC_NETWORK_IO' , 'NETWORK') , ('AUDIT_GROUPCACHE_LOCK' , 'OTHER') , ('AUDIT_LOGINCACHE_LOCK' ,
-'OTHER') , ('AUDIT_ON_DEMAND_TARGET_LOCK' , 'OTHER') , ('AUDIT_XE_SESSION_MGR' , 'OTHER') , ('BACKUP' ,
-'BACKUP') , ('BACKUP_CLIENTLOCK ' , 'BACKUP') , ('BACKUP_OPERATOR' , 'BACKUP') , ('BACKUPBUFFER' ,
-'BACKUP') , ('BACKUPIO' , 'BACKUP') , ('BACKUPTHREAD' , 'BACKUP') , ('BAD_PAGE_PROCESS' , 'MEMORY') ,
-('BROKER_CONNECTION_RECEIVE_TASK' , 'SERVICE BROKER') , ('BROKER_ENDPOINT_STATE_MUTEX' , 'SERVICE BROKER')
-, ('BROKER_EVENTHANDLER' , 'SERVICE BROKER') , ('BROKER_INIT' , 'SERVICE BROKER') , ('BROKER_MASTERSTART'
-, 'SERVICE BROKER') , ('BROKER_RECEIVE_WAITFOR' , 'SERVICE BROKER') , ('BROKER_REGISTERALLENDPOINTS' ,
-'SERVICE BROKER') , ('BROKER_SERVICE' , 'SERVICE BROKER') , ('BROKER_SHUTDOWN' , 'SERVICE BROKER') ,
-('BROKER_TASK_STOP' , 'SERVICE BROKER') , ('BROKER_TO_FLUSH' , 'SERVICE BROKER') , ('BROKER_TRANSMITTER' ,
-'SERVICE BROKER') , ('BUILTIN_HASHKEY_MUTEX' , 'OTHER') , ('CHECK_PRINT_RECORD' , 'OTHER') ,
-('CHECKPOINT_QUEUE' , 'BUFFER') , ('CHKPT' , 'BUFFER') , ('CLEAR_DB' , 'OTHER') , ('CLR_AUTO_EVENT' ,
-'CLR') , ('CLR_CRST' , 'CLR') , ('CLR_JOIN' , 'CLR') , ('CLR_MANUAL_EVENT' , 'CLR') , ('CLR_MEMORY_SPY' ,
-'CLR') , ('CLR_MONITOR' , 'CLR') , ('CLR_RWLOCK_READER' , 'CLR') , ('CLR_RWLOCK_WRITER' , 'CLR') ,
-('CLR_SEMAPHORE' , 'CLR') , ('CLR_TASK_START' , 'CLR') , ('CLRHOST_STATE_ACCESS' , 'CLR') , ('CMEMTHREAD'
-, 'MEMORY') , ('COMMIT_TABLE' , 'OTHER') , ('CURSOR' , 'OTHER') , ('CURSOR_ASYNC' , 'OTHER') , ('CXPACKET'
-, 'OTHER') , ('CXROWSET_SYNC' , 'OTHER') , ('DAC_INIT' , 'OTHER') , ('DBMIRROR_DBM_EVENT ' , 'OTHER') ,
-('DBMIRROR_DBM_MUTEX ' , 'OTHER') , ('DBMIRROR_EVENTS_QUEUE' , 'OTHER') , ('DBMIRROR_SEND' , 'OTHER') ,
-('DBMIRROR_WORKER_QUEUE' , 'OTHER') , ('DBMIRRORING_CMD' , 'OTHER') , ('DBTABLE' , 'OTHER') ,
-('DEADLOCK_ENUM_MUTEX' , 'LOCK') , ('DEADLOCK_TASK_SEARCH' , 'LOCK') , ('DEBUG' , 'OTHER') ,
-('DISABLE_VERSIONING' , 'OTHER') , ('DISKIO_SUSPEND' , 'BACKUP') , ('DISPATCHER_QUEUE_SEMAPHORE' ,
-'OTHER') , ('DLL_LOADING_MUTEX' , 'XML') , ('DROPTEMP' , 'TEMPORARY OBJECTS') , ('DTC' , 'OTHER') ,
-('DTC_ABORT_REQUEST' , 'OTHER') , ('DTC_RESOLVE' , 'OTHER') , ('DTC_STATE' , 'DOTHERTC') ,
-('DTC_TMDOWN_REQUEST' , 'OTHER') , ('DTC_WAITFOR_OUTCOME' , 'OTHER') , ('DUMP_LOG_COORDINATOR' , 'OTHER')
-, ('DUMP_LOG_COORDINATOR_QUEUE' , 'OTHER') , ('DUMPTRIGGER' , 'OTHER') , ('EC' , 'OTHER') , ('EE_PMOLOCK'
-, 'MEMORY') , ('EE_SPECPROC_MAP_INIT' , 'OTHER') , ('ENABLE_VERSIONING' , 'OTHER') ,
-('ERROR_REPORTING_MANAGER' , 'OTHER') , ('EXCHANGE' , 'OTHER') , ('EXECSYNC' , 'OTHER') ,
-('EXECUTION_PIPE_EVENT_OTHER' , 'OTHER') , ('Failpoint' , 'OTHER') , ('FCB_REPLICA_READ' , 'OTHER') ,
-('FCB_REPLICA_WRITE' , 'OTHER') , ('FS_FC_RWLOCK' , 'OTHER') , ('FS_GARBAGE_COLLECTOR_SHUTDOWN' , 'OTHER')
-, ('FS_HEADER_RWLOCK' , 'OTHER') , ('FS_LOGTRUNC_RWLOCK' , 'OTHER') , ('FSA_FORCE_OWN_XACT' , 'OTHER') ,
-('FSAGENT' , 'OTHER') , ('FSTR_CONFIG_MUTEX' , 'OTHER') , ('FSTR_CONFIG_RWLOCK' , 'OTHER') ,
-('FT_COMPROWSET_RWLOCK' , 'OTHER') , ('FT_IFTS_RWLOCK' , 'OTHER') , ('FT_IFTS_SCHEDULER_IDLE_WAIT' ,
-'OTHER') , ('FT_IFTSHC_MUTEX' , 'OTHER') , ('FT_IFTSISM_MUTEX' , 'OTHER') , ('FT_MASTER_MERGE' , 'OTHER')
-, ('FT_METADATA_MUTEX' , 'OTHER') , ('FT_RESTART_CRAWL' , 'OTHER') , ('FT_RESUME_CRAWL' , 'OTHER') ,
-('FULLTEXT GATHERER' , 'OTHER') , ('GUARDIAN' , 'OTHER') , ('HTTP_ENDPOINT_COLLCREATE' , 'SERVICE BROKER')
-, ('HTTP_ENUMERATION' , 'SERVICE BROKER') , ('HTTP_START' , 'SERVICE BROKER') , ('IMP_IMPORT_MUTEX' ,
-'OTHER') , ('IMPPROV_IOWAIT' , 'I/O') , ('INDEX_USAGE_STATS_MUTEX' , 'OTHER') , ('OTHER_TESTING' ,
-'OTHER') , ('IO_AUDIT_MUTEX' , 'OTHER') , ('IO_COMPLETION' , 'I/O') , ('IO_RETRY' , 'I/O') ,
-('IOAFF_RANGE_QUEUE' , 'OTHER') , ('KSOURCE_WAKEUP' , 'SHUTDOWN') , ('KTM_ENLISTMENT' , 'OTHER') ,
-('KTM_RECOVERY_MANAGER' , 'OTHER') , ('KTM_RECOVERY_RESOLUTION' , 'OTHER') , ('LATCH_DT' , 'LATCH') ,
-('LATCH_EX' , 'LATCH') , ('LATCH_KP' , 'LATCH') , ('LATCH_NL' , 'LATCH') , ('LATCH_SH' , 'LATCH') ,
-('LATCH_UP' , 'LATCH') , ('LAZYWRITER_SLEEP' , 'BUFFER') , ('LCK_M_BU' , 'LOCK') , ('LCK_M_IS' , 'LOCK') ,
-('LCK_M_IU' , 'LOCK') , ('LCK_M_IX' , 'LOCK') , ('LCK_M_RIn_NL' , 'LOCK') , ('LCK_M_RIn_S' , 'LOCK') ,
-('LCK_M_RIn_U' , 'LOCK') , ('LCK_M_RIn_X' , 'LOCK') , ('LCK_M_RS_S' , 'LOCK') , ('LCK_M_RS_U' , 'LOCK') ,
-('LCK_M_RX_S' , 'LOCK') , ('LCK_M_RX_U' , 'LOCK') , ('LCK_M_RX_X' , 'LOCK') , ('LCK_M_S' , 'LOCK') ,
-('LCK_M_SCH_M' , 'LOCK') , ('LCK_M_SCH_S' , 'LOCK') , ('LCK_M_SIU' , 'LOCK') , ('LCK_M_SIX' , 'LOCK') ,
-('LCK_M_U' , 'LOCK') , ('LCK_M_UIX' , 'LOCK') , ('LCK_M_X' , 'LOCK') , ('LOGBUFFER' , 'OTHER') ,
-('LOGGENERATION' , 'OTHER') , ('LOGMGR' , 'OTHER') , ('LOGMGR_FLUSH' , 'OTHER') , ('LOGMGR_QUEUE' ,
-'OTHER') , ('LOGMGR_RESERVE_APPEND' , 'OTHER') , ('LOWFAIL_MEMMGR_QUEUE' , 'MEMORY') ,
-('METADATA_LAZYCACHE_RWLOCK' , 'OTHER') , ('MIRROR_SEND_MESSAGE' , 'OTHER') , ('MISCELLANEOUS' , 'IGNORE')
-, ('MSQL_DQ' , 'DISTRIBUTED QUERY') , ('MSQL_SYNC_PIPE' , 'OTHER') , ('MSQL_XACT_MGR_MUTEX' , 'OTHER') ,
-('MSQL_XACT_MUTEX' , 'OTHER') , ('MSQL_XP' , 'OTHER') , ('MSSEARCH' , 'OTHER') , ('NET_WAITFOR_PACKET' ,
-'NETWORK') , ('NODE_CACHE_MUTEX' , 'OTHER') , ('OTHER' , 'OTHER') , ('ONDEMAND_TASK_QUEUE' , 'OTHER') ,
-('PAGEIOLATCH_DT' , 'LATCH') , ('PAGEIOLATCH_EX' , 'LATCH') , ('PAGEIOLATCH_KP' , 'LATCH') ,
-('PAGEIOLATCH_NL' , 'LATCH') , ('PAGEIOLATCH_SH' , 'LATCH') , ('PAGEIOLATCH_UP' , 'LATCH') ,
-('PAGELATCH_DT' , 'LATCH') , ('PAGELATCH_EX' , 'LATCH') , ('PAGELATCH_KP' , 'LATCH') , ('PAGELATCH_NL' ,
-'LATCH') , ('PAGELATCH_SH' , 'LATCH') , ('PAGELATCH_UP' , 'LATCH') , ('PARALLEL_BACKUP_QUEUE' , 'BACKUP')
-, ('PERFORMANCE_COUNTERS_RWLOCK' , 'OTHER') , ('PREEMPTIVE_ABR' , 'OTHER') ,
-('PREEMPTIVE_AUDIT_ACCESS_EVENTLOG' , 'OTHER') , ('PREEMPTIVE_AUDIT_ACCESS_SECLOG' , 'OTHER') ,
-('PREEMPTIVE_CLOSEBACKUPMEDIA' , 'OTHER') , ('PREEMPTIVE_CLOSEBACKUPTAPE' , 'OTHER') ,
-('PREEMPTIVE_CLOSEBACKUPVDIDEVICE' , 'OTHER') , ('PREEMPTIVE_CLUSAPI_CLUSTERRESOURCECONTROL' , 'OTHER') ,
-('PREEMPTIVE_COM_COCREATEINSTANCE' , 'OTHER') , ('PREEMPTIVE_COM_COGETCLASSOBJECT' , 'OTHER') ,
-('PREEMPTIVE_COM_CREATEACCESSOR' , 'OTHER') , ('PREEMPTIVE_COM_DELETEROWS' , 'OTHER') ,
-('PREEMPTIVE_COM_GETCOMMANDTEXT' , 'OTHER') , ('PREEMPTIVE_COM_GETDATA' , 'OTHER') ,
-('PREEMPTIVE_COM_GETNEXTROWS' , 'OTHER') , ('PREEMPTIVE_COM_GETRESULT' , 'OTHER') ,
-('PREEMPTIVE_COM_GETROWSBYBOOKMARK' , 'OTHER') , ('PREEMPTIVE_COM_LBFLUSH' , 'OTHER') ,
-('PREEMPTIVE_COM_LBLOCKREGION' , 'OTHER') , ('PREEMPTIVE_COM_LBREADAT' , 'OTHER') ,
-('PREEMPTIVE_COM_LBSETSIZE' , 'OTHER') , ('PREEMPTIVE_COM_LBSTAT' , 'OTHER') ,
-('PREEMPTIVE_COM_LBUNLOCKREGION' , 'OTHER') , ('PREEMPTIVE_COM_LBWRITEAT' , 'OTHER') ,
-('PREEMPTIVE_COM_QUERYINTERFACE' , 'OTHER') , ('PREEMPTIVE_COM_RELEASE' , 'OTHER') ,
-('PREEMPTIVE_COM_RELEASEACCESSOR' , 'OTHER') , ('PREEMPTIVE_COM_RELEASEROWS' , 'OTHER') ,
-('PREEMPTIVE_COM_RELEASESESSION' , 'OTHER') , ('PREEMPTIVE_COM_RESTARTPOSITION' , 'OTHER') ,
-('PREEMPTIVE_COM_SEQSTRMREAD' , 'OTHER') , ('PREEMPTIVE_COM_SEQSTRMREADANDWRITE' , 'OTHER') ,
-('PREEMPTIVE_COM_SETDATAFAILURE' , 'OTHER') , ('PREEMPTIVE_COM_SETPARAMETERINFO' , 'OTHER') ,
-('PREEMPTIVE_COM_SETPARAMETERPROPERTIES' , 'OTHER') , ('PREEMPTIVE_COM_STRMLOCKREGION' , 'OTHER') ,
-('PREEMPTIVE_COM_STRMSEEKANDREAD' , 'OTHER') , ('PREEMPTIVE_COM_STRMSEEKANDWRITE' , 'OTHER') ,
-('PREEMPTIVE_COM_STRMSETSIZE' , 'OTHER') , ('PREEMPTIVE_COM_STRMSTAT' , 'OTHER') ,
-('PREEMPTIVE_COM_STRMUNLOCKREGION' , 'OTHER') , ('PREEMPTIVE_CONSOLEWRITE' , 'OTHER') ,
-('PREEMPTIVE_CREATEPARAM' , 'OTHER') , ('PREEMPTIVE_DEBUG' , 'OTHER') , ('PREEMPTIVE_DFSADDLINK' ,
-'OTHER') , ('PREEMPTIVE_DFSLINKEXISTCHECK' , 'OTHER') , ('PREEMPTIVE_DFSLINKHEALTHCHECK' , 'OTHER') ,
-('PREEMPTIVE_DFSREMOVELINK' , 'OTHER') , ('PREEMPTIVE_DFSREMOVEROOT' , 'OTHER') ,
-('PREEMPTIVE_DFSROOTFOLDERCHECK' , 'OTHER') , ('PREEMPTIVE_DFSROOTINIT' , 'OTHER') ,
-('PREEMPTIVE_DFSROOTSHARECHECK' , 'OTHER') , ('PREEMPTIVE_DTC_ABORT' , 'OTHER') ,
-('PREEMPTIVE_DTC_ABORTREQUESTDONE' , 'OTHER') , ('PREEMPTIVE_DTC_BEGINOTHER' , 'OTHER') ,
-('PREEMPTIVE_DTC_COMMITREQUESTDONE' , 'OTHER') , ('PREEMPTIVE_DTC_ENLIST' , 'OTHER') ,
-('PREEMPTIVE_DTC_PREPAREREQUESTDONE' , 'OTHER') , ('PREEMPTIVE_FILESIZEGET' , 'OTHER') ,
-('PREEMPTIVE_FSAOTHER_ABORTOTHER' , 'OTHER') , ('PREEMPTIVE_FSAOTHER_COMMITOTHER' , 'OTHER') ,
-('PREEMPTIVE_FSAOTHER_STARTOTHER' , 'OTHER') , ('PREEMPTIVE_FSRECOVER_UNCONDITIONALUNDO' , 'OTHER') ,
-('PREEMPTIVE_GETRMINFO' , 'OTHER') , ('PREEMPTIVE_LOCKMONITOR' , 'OTHER') , ('PREEMPTIVE_MSS_RELEASE' ,
-'OTHER') , ('PREEMPTIVE_ODBCOPS' , 'OTHER') , ('PREEMPTIVE_OLE_UNINIT' , 'OTHER') ,
-('PREEMPTIVE_OTHER_ABORTORCOMMITTRAN' , 'OTHER') , ('PREEMPTIVE_OTHER_ABORTTRAN' , 'OTHER') ,
-('PREEMPTIVE_OTHER_GETDATASOURCE' , 'OTHER') , ('PREEMPTIVE_OTHER_GETLITERALINFO' , 'OTHER') ,
-('PREEMPTIVE_OTHER_GETPROPERTIES' , 'OTHER') , ('PREEMPTIVE_OTHER_GETPROPERTYINFO' , 'OTHER') ,
-('PREEMPTIVE_OTHER_GETSCHEMALOCK' , 'OTHER') , ('PREEMPTIVE_OTHER_JOINOTHER' , 'OTHER') ,
-('PREEMPTIVE_OTHER_RELEASE' , 'OTHER') , ('PREEMPTIVE_OTHER_SETPROPERTIES' , 'OTHER') ,
-('PREEMPTIVE_OTHEROPS' , 'OTHER') , ('PREEMPTIVE_OS_ACCEPTSECURITYCONTEXT' , 'OTHER') ,
-('PREEMPTIVE_OS_ACQUIRECREDENTIALSHANDLE' , 'OTHER') , ('PREEMPTIVE_OS_AU,TICATIONOPS' , 'OTHER') ,
-('PREEMPTIVE_OS_AUTHORIZATIONOPS' , 'OTHER') , ('PREEMPTIVE_OS_AUTHZGETINFORMATIONFROMCONTEXT' , 'OTHER')
-, ('PREEMPTIVE_OS_AUTHZINITIALIZECONTEXTFROMSID' , 'OTHER') ,
-('PREEMPTIVE_OS_AUTHZINITIALIZERESOURCEMANAGER' , 'OTHER') , ('PREEMPTIVE_OS_BACKUPREAD' , 'OTHER') ,
-('PREEMPTIVE_OS_CLOSEHANDLE' , 'OTHER') , ('PREEMPTIVE_OS_CLUSTEROPS' , 'OTHER') , ('PREEMPTIVE_OS_COMOPS'
-, 'OTHER') , ('PREEMPTIVE_OS_COMPLETEAUTHTOKEN' , 'OTHER') , ('PREEMPTIVE_OS_COPYFILE' , 'OTHER') ,
-('PREEMPTIVE_OS_CREATEDIRECTORY' , 'OTHER') , ('PREEMPTIVE_OS_CREATEFILE' , 'OTHER') ,
-('PREEMPTIVE_OS_CRYPTACQUIRECONTEXT' , 'OTHER') , ('PREEMPTIVE_OS_CRYPTIMPORTKEY' , 'OTHER') ,
-('PREEMPTIVE_OS_CRYPTOPS' , 'OTHER') , ('PREEMPTIVE_OS_DECRYPTMESSAGE' , 'OTHER') ,
-('PREEMPTIVE_OS_DELETEFILE' , 'OTHER') , ('PREEMPTIVE_OS_DELETESECURITYCONTEXT' , 'OTHER') ,
-('PREEMPTIVE_OS_DEVICEIOCONTROL' , 'OTHER') , ('PREEMPTIVE_OS_DEVICEOPS' , 'OTHER') ,
-('PREEMPTIVE_OS_DIRSVC_NETWORKOPS' , 'OTHER') , ('PREEMPTIVE_OS_DISCONNECTNAMEDPIPE' , 'OTHER') ,
-('PREEMPTIVE_OS_DOMAINSERVICESOPS' , 'OTHER') , ('PREEMPTIVE_OS_DSGETDCNAME' , 'OTHER') ,
-('PREEMPTIVE_OS_DTCOPS' , 'OTHER') , ('PREEMPTIVE_OS_ENCRYPTMESSAGE' , 'OTHER') , ('PREEMPTIVE_OS_FILEOPS'
-, 'OTHER') , ('PREEMPTIVE_OS_FINDFILE' , 'OTHER') , ('PREEMPTIVE_OS_FLUSHFILEBUFFERS' , 'OTHER') ,
-('PREEMPTIVE_OS_FORMATMESSAGE' , 'OTHER') , ('PREEMPTIVE_OS_FREECREDENTIALSHANDLE' , 'OTHER') ,
-('PREEMPTIVE_OS_FREELIBRARY' , 'OTHER') , ('PREEMPTIVE_OS_GENERICOPS' , 'OTHER') ,
-('PREEMPTIVE_OS_GETADDRINFO' , 'OTHER') , ('PREEMPTIVE_OS_GETCOMPRESSEDFILESIZE' , 'OTHER') ,
-('PREEMPTIVE_OS_GETDISKFREESPACE' , 'OTHER') , ('PREEMPTIVE_OS_GETFILEATTRIBUTES' , 'OTHER') ,
-('PREEMPTIVE_OS_GETFILESIZE' , 'OTHER') , ('PREEMPTIVE_OS_GETLONGPATHNAME' , 'OTHER') ,
-('PREEMPTIVE_OS_GETPROCADDRESS' , 'OTHER') , ('PREEMPTIVE_OS_GETVOLUMENAMEFORVOLUMEMOUNTPOINT' , 'OTHER')
-, ('PREEMPTIVE_OS_GETVOLUMEPATHNAME' , 'OTHER') , ('PREEMPTIVE_OS_INITIALIZESECURITYCONTEXT' , 'OTHER') ,
-('PREEMPTIVE_OS_LIBRARYOPS' , 'OTHER') , ('PREEMPTIVE_OS_LOADLIBRARY' , 'OTHER') ,
-('PREEMPTIVE_OS_LOGONUSER' , 'OTHER') , ('PREEMPTIVE_OS_LOOKUPACCOUNTSID' , 'OTHER') ,
-('PREEMPTIVE_OS_MESSAGEQUEUEOPS' , 'OTHER') , ('PREEMPTIVE_OS_MOVEFILE' , 'OTHER') ,
-('PREEMPTIVE_OS_NETGROUPGETUSERS' , 'OTHER') , ('PREEMPTIVE_OS_NETLOCALGROUPGETMEMBERS' , 'OTHER') ,
-('PREEMPTIVE_OS_NETUSERGETGROUPS' , 'OTHER') , ('PREEMPTIVE_OS_NETUSERGETLOCALGROUPS' , 'OTHER') ,
-('PREEMPTIVE_OS_NETUSERMODALSGET' , 'OTHER') , ('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICY' , 'OTHER') ,
-('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICYFREE' , 'OTHER') , ('PREEMPTIVE_OS_OPENDIRECTORY' , 'OTHER') ,
-('PREEMPTIVE_OS_PIPEOPS' , 'OTHER') , ('PREEMPTIVE_OS_PROCESSOPS' , 'OTHER') ,
-('PREEMPTIVE_OS_QUERYREGISTRY' , 'OTHER') , ('PREEMPTIVE_OS_QUERYSECURITYCONTEXTTOKEN' , 'OTHER') ,
-('PREEMPTIVE_OS_REMOVEDIRECTORY' , 'OTHER') , ('PREEMPTIVE_OS_REPORTEVENT' , 'OTHER') ,
-('PREEMPTIVE_OS_REVERTTOSELF' , 'OTHER') , ('PREEMPTIVE_OS_RSFXDEVICEOPS' , 'OTHER') ,
-('PREEMPTIVE_OS_SECURITYOPS' , 'OTHER') , ('PREEMPTIVE_OS_SERVICEOPS' , 'OTHER') ,
-('PREEMPTIVE_OS_SETENDOFFILE' , 'OTHER') , ('PREEMPTIVE_OS_SETFILEPOINTER' , 'OTHER') ,
-('PREEMPTIVE_OS_SETFILEVALIDDATA' , 'OTHER') , ('PREEMPTIVE_OS_SETNAMEDSECURITYINFO' , 'OTHER') ,
-('PREEMPTIVE_OS_SQLCLROPS' , 'OTHER') , ('PREEMPTIVE_OS_SQMLAUNCH' , 'OTHER') ,
-('PREEMPTIVE_OS_VERIFYSIGNATURE' , 'OTHER') , ('PREEMPTIVE_OS_VSSOPS' , 'OTHER') ,
-('PREEMPTIVE_OS_WAITFORSINGLEOBJECT' , 'OTHER') , ('PREEMPTIVE_OS_WINSOCKOPS' , 'OTHER') ,
-('PREEMPTIVE_OS_WRITEFILE' , 'OTHER') , ('PREEMPTIVE_OS_WRITEFILEGATHER' , 'OTHER') ,
-('PREEMPTIVE_OS_WSASETLASTERROR' , 'OTHER') , ('PREEMPTIVE_REENLIST' , 'OTHER') , ('PREEMPTIVE_RESIZELOG'
-, 'OTHER') , ('PREEMPTIVE_ROLLFORWARDREDO' , 'OTHER') , ('PREEMPTIVE_ROLLFORWARDUNDO' , 'OTHER') ,
-('PREEMPTIVE_SB_STOPENDPOINT' , 'OTHER') , ('PREEMPTIVE_SERVER_STARTUP' , 'OTHER') ,
-('PREEMPTIVE_SETRMINFO' , 'OTHER') , ('PREEMPTIVE_SHAREDMEM_GETDATA' , 'OTHER') , ('PREEMPTIVE_SNIOPEN' ,
-'OTHER') , ('PREEMPTIVE_SOSHOST' , 'OTHER') , ('PREEMPTIVE_SOSTESTING' , 'OTHER') , ('PREEMPTIVE_STARTRM'
-, 'OTHER') , ('PREEMPTIVE_STREAMFCB_CHECKPOINT' , 'OTHER') , ('PREEMPTIVE_STREAMFCB_RECOVER' , 'OTHER') ,
-('PREEMPTIVE_STRESSDRIVER' , 'OTHER') , ('PREEMPTIVE_TESTING' , 'OTHER') , ('PREEMPTIVE_TRANSIMPORT' ,
-'OTHER') , ('PREEMPTIVE_UNMARSHALPROPAGATIONTOKEN' , 'OTHER') , ('PREEMPTIVE_VSS_CREATESNAPSHOT' ,
-'OTHER') , ('PREEMPTIVE_VSS_CREATEVOLUMESNAPSHOT' , 'OTHER') , ('PREEMPTIVE_XE_CALLBACKEXECUTE' , 'OTHER')
-, ('PREEMPTIVE_XE_DISPATCHER' , 'OTHER') , ('PREEMPTIVE_XE_ENGINEINIT' , 'OTHER') ,
-('PREEMPTIVE_XE_GETTARGETSTATE' , 'OTHER') , ('PREEMPTIVE_XE_SESSIONCOMMIT' , 'OTHER') ,
-('PREEMPTIVE_XE_TARGETFINALIZE' , 'OTHER') , ('PREEMPTIVE_XE_TARGETINIT' , 'OTHER') ,
-('PREEMPTIVE_XE_TIMERRUN' , 'OTHER') , ('PREEMPTIVE_XETESTING' , 'OTHER') , ('PREEMPTIVE_XXX' , 'OTHER') ,
-('PRINT_ROLLBACK_PROGRESS' , 'OTHER') , ('QNMANAGER_ACQUIRE' , 'OTHER') , ('QPJOB_KILL' , 'OTHER') ,
-('QPJOB_WAITFOR_ABORT' , 'OTHER') , ('QRY_MEM_GRANT_INFO_MUTEX' , 'OTHER') , ('QUERY_ERRHDL_SERVICE_DONE'
-, 'OTHER') , ('QUERY_EXECUTION_INDEX_SORT_EVENT_OPEN' , 'OTHER') , ('QUERY_NOTIFICATION_MGR_MUTEX' ,
-'OTHER') , ('QUERY_NOTIFICATION_SUBSCRIPTION_MUTEX' , 'OTHER') , ('QUERY_NOTIFICATION_TABLE_MGR_MUTEX' ,
-'OTHER') , ('QUERY_NOTIFICATION_UNITTEST_MUTEX' , 'OTHER') , ('QUERY_OPTIMIZER_PRINT_MUTEX' , 'OTHER') ,
-('QUERY_TRACEOUT' , 'OTHER') , ('QUERY_WAIT_ERRHDL_SERVICE' , 'OTHER') , ('RECOVER_CHANGEDB' , 'OTHER') ,
-('REPL_CACHE_ACCESS' , 'REPLICATION') , ('REPL_HISTORYCACHE_ACCESS' , 'OTHER') , ('REPL_SCHEMA_ACCESS' ,
-'OTHER') , ('REPL_TRANHASHTABLE_ACCESS' , 'OTHER') , ('REPLICA_WRITES' , 'OTHER') ,
-('REQUEST_DISPENSER_PAUSE' , 'BACKUP') , ('REQUEST_FOR_DEADLOCK_SEARCH' , 'LOCK') , ('RESMGR_THROTTLED' ,
-'OTHER') , ('RESOURCE_QUERY_SEMAPHORE_COMPILE' , 'QUERY') , ('RESOURCE_QUEUE' , 'OTHER') ,
-('RESOURCE_SEMAPHORE' , 'OTHER') , ('RESOURCE_SEMAPHORE_MUTEX' , 'MEMORY') ,
-('RESOURCE_SEMAPHORE_QUERY_COMPILE' , 'MEMORY') , ('RESOURCE_SEMAPHORE_SMALL_QUERY' , 'MEMORY') ,
-('RG_RECONFIG' , 'OTHER') , ('SEC_DROP_TEMP_KEY' , 'SECURITY') , ('SECURITY_MUTEX' , 'OTHER') ,
-('SEQUENTIAL_GUID' , 'OTHER') , ('SERVER_IDLE_CHECK' , 'OTHER') , ('SHUTDOWN' , 'OTHER') ,
-('SLEEP_BPOOL_FLUSH' , 'OTHER') , ('SLEEP_DBSTARTUP' , 'OTHER') , ('SLEEP_DCOMSTARTUP' , 'OTHER') ,
-('SLEEP_MSDBSTARTUP' , 'OTHER') , ('SLEEP_SYSTEMTASK' , 'OTHER') , ('SLEEP_TASK' , 'OTHER') ,
-('SLEEP_TEMPDBSTARTUP' , 'OTHER') , ('SNI_CRITICAL_SECTION' , 'OTHER') , ('SNI_HTTP_ACCEPT' , 'OTHER') ,
-('SNI_HTTP_WAITFOR_0_DISCON' , 'OTHER') , ('SNI_LISTENER_ACCESS' , 'OTHER') , ('SNI_TASK_COMPLETION' ,
-'OTHER') , ('SOAP_READ' , 'OTHER') , ('SOAP_WRITE' , 'OTHER') , ('SOS_CALLBACK_REMOVAL' , 'OTHER') ,
-('SOS_DISPATCHER_MUTEX' , 'OTHER') , ('SOS_LOCALALLOCATORLIST' , 'OTHER') , ('SOS_MEMORY_USAGE_ADJUSTMENT'
-, 'OTHER') , ('SOS_OBJECT_STORE_DESTROY_MUTEX' , 'OTHER') , ('SOS_PROCESS_AFFINITY_MUTEX' , 'OTHER') ,
-('SOS_RESERVEDMEMBLOCKLIST' , 'OTHER') , ('SOS_SCHEDULER_YIELD' , 'SQLOS') , ('SOS_SMALL_PAGE_ALLOC' ,
-'OTHER') , ('SOS_STACKSTORE_INIT_MUTEX' , 'OTHER') , ('SOS_SYNC_TASK_ENQUEUE_EVENT' , 'OTHER') ,
-('SOS_VIRTUALMEMORY_LOW' , 'OTHER') , ('SOSHOST_EVENT' , 'CLR') , ('SOSHOST_OTHER' , 'CLR') ,
-('SOSHOST_MUTEX' , 'CLR') , ('SOSHOST_ROWLOCK' , 'CLR') , ('SOSHOST_RWLOCK' , 'CLR') ,
-('SOSHOST_SEMAPHORE' , 'CLR') , ('SOSHOST_SLEEP' , 'CLR') , ('SOSHOST_TRACELOCK' , 'CLR') ,
-('SOSHOST_WAITFORDONE' , 'CLR') , ('SQLCLR_APPDOMAIN' , 'CLR') , ('SQLCLR_ASSEMBLY' , 'CLR') ,
-('SQLCLR_DEADLOCK_DETECTION' , 'CLR') , ('SQLCLR_QUANTUM_PUNISHMENT' , 'CLR') , ('SQLSORT_NORMMUTEX' ,
-'OTHER') , ('SQLSORT_SORTMUTEX' , 'OTHER') , ('SQLTRACE_BUFFER_FLUSH ' , 'TRACE') , ('SQLTRACE_LOCK' ,
-'OTHER') , ('SQLTRACE_SHUTDOWN' , 'OTHER') , ('SQLTRACE_WAIT_ENTRIES' , 'OTHER') , ('SRVPROC_SHUTDOWN' ,
-'OTHER') , ('TEMPOBJ' , 'OTHER') , ('THREADPOOL' , 'SQLOS') , ('TIMEPRIV_TIMEPERIOD' , 'OTHER') ,
-('TRACE_EVTNOTIF' , 'OTHER') , ('TRACEWRITE' , 'OTHER') , ('TRAN_MARKLATCH_DT' , 'TRAN_MARKLATCH') ,
-('TRAN_MARKLATCH_EX' , 'TRAN_MARKLATCH') , ('TRAN_MARKLATCH_KP' , 'TRAN_MARKLATCH') , ('TRAN_MARKLATCH_NL'
-, 'TRAN_MARKLATCH') , ('TRAN_MARKLATCH_SH' , 'TRAN_MARKLATCH') , ('TRAN_MARKLATCH_UP' , 'TRAN_MARKLATCH')
-, ('OTHER_MUTEX' , 'OTHER') , ('UTIL_PAGE_ALLOC' , 'OTHER') , ('VIA_ACCEPT' , 'OTHER') ,
-('VIEW_DEFINITION_MUTEX' , 'OTHER') , ('WAIT_FOR_RESULTS' , 'OTHER') , ('WAITFOR' , 'WAITFOR') ,
-('WAITFOR_TASKSHUTDOWN' , 'OTHER') , ('WAITSTAT_MUTEX' , 'OTHER') , ('WCC' , 'OTHER') , ('WORKTBL_DROP' ,
-'OTHER') , ('WRITE_COMPLETION' , 'OTHER') , ('WRITELOG' , 'I/O') , ('XACT_OWN_OTHER' , 'OTHER') ,
-('XACT_RECLAIM_SESSION' , 'OTHER') , ('XACTLOCKINFO' , 'OTHER') , ('XACTWORKSPACE_MUTEX' , 'OTHER') ,
-('XE_BUFFERMGR_ALLPROCESSED_EVENT' , 'XEVENT') , ('XE_BUFFERMGR_FREEBUF_EVENT' , 'XEVENT') ,
-('XE_DISPATCHER_CONFIG_SESSION_LIST' , 'XEVENT') , ('XE_DISPATCHER_JOIN' , 'XEVENT') ,
-('XE_DISPATCHER_WAIT' , 'XEVENT') , ('XE_MODULEMGR_SYNC' , 'XEVENT') , ('XE_OLS_LOCK' , 'XEVENT') ,
-('XE_PACKAGE_LOCK_BACKOFF' , 'XEVENT') , ('XE_SERVICES_EVENTMANUAL' , 'XEVENT') , ('XE_SERVICES_MUTEX' ,
-'XEVENT') , ('XE_SERVICES_RWLOCK' , 'XEVENT') , ('XE_SESSION_CREATE_SYNC' , 'XEVENT') ,
-('XE_SESSION_FLUSH' , 'XEVENT') , ('XE_SESSION_SYNC' , 'XEVENT') , ('XE_STM_CREATE' , 'XEVENT') ,
-('XE_TIMER_EVENT' , 'XEVENT') , ('XE_TIMER_MUTEX' , 'XEVENT')
-, ('XE_TIMER_TASK_DONE' , 'XEVENT');
-
-
-INSERT @w1 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
-SELECT
- WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
-, WaitTimeInMs = SUM(wait_time_ms)
-, WaitTaskCount = SUM(waiting_tasks_count)
-, CollectionDate = GETDATE()
-FROM sys.dm_os_wait_stats
-WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
-(
- SELECT WaitType FROM @w3
-)
-AND [waiting_tasks_count] > 0
-GROUP BY wait_type
-
-WAITFOR DELAY @delayInterval;
-
-INSERT @w2 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
-SELECT
- WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
-, WaitTimeInMs = SUM(wait_time_ms)
-, WaitTaskCount = SUM(waiting_tasks_count)
-, CollectionDate = GETDATE()
-FROM sys.dm_os_wait_stats
-WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
-(
- SELECT WaitType FROM @w3
-)
-AND [waiting_tasks_count] > 0
-GROUP BY wait_type;
-
-
-INSERT @w5 (WaitCategory, WaitTimeInMs, WaitTaskCount)
-SELECT WaitCategory
-, WaitTimeInMs = SUM(WaitTimeInMs)
-, WaitTaskCount = SUM(WaitTaskCount)
-FROM
-(
-SELECT
- WaitCategory = ISNULL(T4.WaitCategory, 'OTHER')
-, WaitTimeInMs = (T2.WaitTimeInMs - T1.WaitTimeInMs)
-, WaitTaskCount = (T2.WaitTaskCount - T1.WaitTaskCount)
---, WaitTimeInMsPerSec = ((T2.WaitTimeInMs - T1.WaitTimeInMs) / CAST(DATEDIFF(SECOND, T1.CollectionDate, T2.CollectionDate) as float))
-FROM @w1 T1
-INNER JOIN @w2 T2 ON T2.WaitType = T1.WaitType
-LEFT JOIN @w4 T4 ON T4.WaitType = T1.WaitType
-WHERE T2.WaitTaskCount - T1.WaitTaskCount > 0
-) as G
-GROUP BY G.WaitCategory;
-
-
-
-SELECT
----- measurement
- measurement = 'Wait time (ms)'
----- tags
-, servername= REPLACE(@@SERVERNAME, '\', ':')
-, type = 'Wait stats'
----- values
-, [I/O] = SUM([I/O])
-, [Latch] = SUM([LATCH])
-, [Lock] = SUM([LOCK])
-, [Network] = SUM([NETWORK])
-, [Service broker] = SUM([SERVICE BROKER])
-, [Memory] = SUM([MEMORY])
-, [Buffer] = SUM([BUFFER])
-, [CLR] = SUM([CLR])
-, [SQLOS] = SUM([SQLOS])
-, [XEvent] = SUM([XEVENT])
-, [Other] = SUM([OTHER])
-, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
-FROM
-(
-SELECT
- [I/O] = ISNULL([I/O] , 0)
-, [MEMORY] = ISNULL([MEMORY] , 0)
-, [BUFFER] = ISNULL([BUFFER] , 0)
-, [LATCH] = ISNULL([LATCH] , 0)
-, [LOCK] = ISNULL([LOCK] , 0)
-, [NETWORK] = ISNULL([NETWORK] , 0)
-, [SERVICE BROKER] = ISNULL([SERVICE BROKER] , 0)
-, [CLR] = ISNULL([CLR] , 0)
-, [XEVENT] = ISNULL([XEVENT] , 0)
-, [SQLOS] = ISNULL([SQLOS] , 0)
-, [OTHER] = ISNULL([OTHER] , 0)
-FROM @w5 as P
-PIVOT
-(
- SUM(WaitTimeInMs)
- FOR WaitCategory IN ([I/O], [LATCH], [LOCK], [NETWORK], [SERVICE BROKER], [MEMORY], [BUFFER], [CLR], [XEVENT], [SQLOS], [OTHER])
-) AS PivotTable
-) as T
-
-UNION ALL
-
-SELECT
----- measurement
- measurement = 'Wait tasks'
----- tags
-, server_name= REPLACE(@@SERVERNAME, '\', ':')
-, type = 'Wait stats'
----- values
-, [I/O] = SUM([I/O])
-, [Latch] = SUM([LATCH])
-, [Lock] = SUM([LOCK])
-, [Network] = SUM([NETWORK])
-, [Service broker] = SUM([SERVICE BROKER])
-, [Memory] = SUM([MEMORY])
-, [Buffer] = SUM([BUFFER])
-, [CLR] = SUM([CLR])
-, [SQLOS] = SUM([SQLOS])
-, [XEvent] = SUM([XEVENT])
-, [Other] = SUM([OTHER])
-, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
-FROM
-(
-SELECT
- [I/O] = ISNULL([I/O] , 0)
-, [MEMORY] = ISNULL([MEMORY] , 0)
-, [BUFFER] = ISNULL([BUFFER] , 0)
-, [LATCH] = ISNULL([LATCH] , 0)
-, [LOCK] = ISNULL([LOCK] , 0)
-, [NETWORK] = ISNULL([NETWORK] , 0)
-, [SERVICE BROKER] = ISNULL([SERVICE BROKER] , 0)
-, [CLR] = ISNULL([CLR] , 0)
-, [XEVENT] = ISNULL([XEVENT] , 0)
-, [SQLOS] = ISNULL([SQLOS] , 0)
-, [OTHER] = ISNULL([OTHER] , 0)
-FROM @w5 as P
-PIVOT
-(
- SUM(WaitTaskCount)
- FOR WaitCategory IN ([I/O], [LATCH], [LOCK], [NETWORK], [SERVICE BROKER], [MEMORY], [BUFFER], [CLR], [XEVENT], [SQLOS], [OTHER])
-) AS PivotTable
-) as T;
-`
-
-const sqlVolumeSpace string = `SET DEADLOCK_PRIORITY -10;
-SET NOCOUNT ON;
-SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
-
-IF OBJECT_ID('tempdb..#volumestats') IS NOT NULL
- DROP TABLE #volumestats;
-SELECT DISTINCT
- volume = REPLACE(vs.volume_mount_point, '\', '')
- + CASE WHEN LEN(vs.logical_volume_name) > 0
- THEN ' (' + vs.logical_volume_name + ')'
- ELSE '' END
-, total_bytes = vs.total_bytes
-, available_bytes = vs.available_bytes
-, used_bytes = vs.total_bytes - vs.available_bytes
-, used_percent = 100 * CAST(ROUND((vs.total_bytes - vs.available_bytes) * 1. / vs.total_bytes, 2) as decimal(5,2))
-INTO #volumestats
-FROM sys.master_files AS f
-CROSS APPLY sys.dm_os_volume_stats(f.database_id, f.file_id) vs
-
-DECLARE @DynamicPivotQuery AS NVARCHAR(MAX)
-DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX)
-
-SELECT @ColumnName= ISNULL(@ColumnName + ',','') + QUOTENAME(volume)
-FROM (SELECT DISTINCT volume FROM #volumestats) AS bl
-
---Prepare the PIVOT query using the dynamic
-SET @DynamicPivotQuery = N'
-SELECT measurement = ''Volume total space (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''OS Volume space''
-, ' + @ColumnName + ' FROM
-(
-SELECT volume, total_bytes
-FROM #volumestats
-) as V
-PIVOT(SUM(total_bytes) FOR volume IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Volume available space (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''OS Volume space''
-, ' + @ColumnName + ' FROM
-(
-SELECT volume, available_bytes
-FROM #volumestats
-) as V
-PIVOT(SUM(available_bytes) FOR volume IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Volume used space (bytes)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''OS Volume space''
-, ' + @ColumnName + ' FROM
-(
-SELECT volume, used_bytes
-FROM #volumestats
-) as V
-PIVOT(SUM(used_bytes) FOR volume IN (' + @ColumnName + ')) AS PVTTable
-
-UNION ALL
-
-SELECT measurement = ''Volume used space (%)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''OS Volume space''
-, ' + @ColumnName + ' FROM
-(
-SELECT volume, used_percent
-FROM #volumestats
-) as V
-PIVOT(SUM(used_percent) FOR volume IN (' + @ColumnName + ')) AS PVTTable'
-
-EXEC sp_executesql @DynamicPivotQuery;
-`
diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go
index 063af75950a13..8f5d355ef4df3 100644
--- a/plugins/inputs/sqlserver/sqlserver_test.go
+++ b/plugins/inputs/sqlserver/sqlserver_test.go
@@ -6,25 +6,57 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
+func TestSqlServer_QueriesInclusionExclusion(t *testing.T) {
+ cases := []map[string]interface{}{
+ {
+ "IncludeQuery": []string{},
+ "ExcludeQuery": []string{"WaitStatsCategorized", "DatabaseIO", "ServerProperties", "MemoryClerk", "Schedulers", "VolumeSpace", "Cpu"},
+ "queries": []string{"PerformanceCounters", "SqlRequests"},
+ "queriesTotal": 2,
+ },
+ {
+ "IncludeQuery": []string{"PerformanceCounters", "SqlRequests"},
+ "ExcludeQuery": []string{"SqlRequests", "WaitStatsCategorized", "DatabaseIO", "VolumeSpace", "Cpu"},
+ "queries": []string{"PerformanceCounters"},
+ "queriesTotal": 1,
+ },
+ }
+
+ for _, test := range cases {
+ s := SQLServer{
+ QueryVersion: 2,
+ IncludeQuery: test["IncludeQuery"].([]string),
+ ExcludeQuery: test["ExcludeQuery"].([]string),
+ }
+ initQueries(&s)
+ assert.Equal(t, len(s.queries), test["queriesTotal"].(int))
+ for _, query := range test["queries"].([]string) {
+ assert.Contains(t, s.queries, query)
+ }
+ }
+}
+
func TestSqlServer_ParseMetrics(t *testing.T) {
var acc testutil.Accumulator
- queries = make(MapQuery)
- queries["PerformanceCounters"] = Query{Script: mockPerformanceCounters, ResultByRow: true}
- queries["WaitStatsCategorized"] = Query{Script: mockWaitStatsCategorized, ResultByRow: false}
- queries["CPUHistory"] = Query{Script: mockCPUHistory, ResultByRow: false}
- queries["DatabaseIO"] = Query{Script: mockDatabaseIO, ResultByRow: false}
- queries["DatabaseSize"] = Query{Script: mockDatabaseSize, ResultByRow: false}
- queries["DatabaseStats"] = Query{Script: mockDatabaseStats, ResultByRow: false}
- queries["DatabaseProperties"] = Query{Script: mockDatabaseProperties, ResultByRow: false}
- queries["VolumeSpace"] = Query{Script: mockVolumeSpace, ResultByRow: false}
- queries["MemoryClerk"] = Query{Script: mockMemoryClerk, ResultByRow: false}
- queries["PerformanceMetrics"] = Query{Script: mockPerformanceMetrics, ResultByRow: false}
+ queries := make(MapQuery)
+ queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: mockPerformanceCounters, ResultByRow: true}
+ queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: mockWaitStatsCategorized, ResultByRow: false}
+ queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: mockCPUHistory, ResultByRow: false}
+ queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: mockDatabaseIO, ResultByRow: false}
+ queries["DatabaseSize"] = Query{ScriptName: "DatabaseSize", Script: mockDatabaseSize, ResultByRow: false}
+ queries["DatabaseStats"] = Query{ScriptName: "DatabaseStats", Script: mockDatabaseStats, ResultByRow: false}
+ queries["DatabaseProperties"] = Query{ScriptName: "DatabaseProperties", Script: mockDatabaseProperties, ResultByRow: false}
+ queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: mockVolumeSpace, ResultByRow: false}
+ queries["MemoryClerk"] = Query{ScriptName: "MemoryClerk", Script: mockMemoryClerk, ResultByRow: false}
+ queries["PerformanceMetrics"] = Query{ScriptName: "PerformanceMetrics", Script: mockPerformanceMetrics, ResultByRow: false}
var headers, mock, row []string
var tags = make(map[string]string)
@@ -81,6 +113,64 @@ func TestSqlServer_ParseMetrics(t *testing.T) {
}
}
+func TestSqlServer_MultipleInstance(t *testing.T) {
+ // Invoke Gather() from two separate configurations and
+ // confirm they don't interfere with each other
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+ testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1"
+ s := &SQLServer{
+ Servers: []string{testServer},
+ ExcludeQuery: []string{"MemoryClerk"},
+ }
+ s2 := &SQLServer{
+ Servers: []string{testServer},
+ ExcludeQuery: []string{"DatabaseSize"},
+ }
+
+ var acc, acc2 testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+ assert.Equal(t, s.isInitialized, true)
+ assert.Equal(t, s2.isInitialized, false)
+
+ err = s2.Gather(&acc2)
+ require.NoError(t, err)
+ assert.Equal(t, s.isInitialized, true)
+ assert.Equal(t, s2.isInitialized, true)
+
+ // acc includes size metrics, and excludes memory metrics
+ assert.False(t, acc.HasMeasurement("Memory breakdown (%)"))
+ assert.True(t, acc.HasMeasurement("Log size (bytes)"))
+
+ // acc2 includes memory metrics, and excludes size metrics
+ assert.True(t, acc2.HasMeasurement("Memory breakdown (%)"))
+ assert.False(t, acc2.HasMeasurement("Log size (bytes)"))
+}
+
+func TestSqlServer_MultipleInit(t *testing.T) {
+
+ s := &SQLServer{}
+ s2 := &SQLServer{
+ ExcludeQuery: []string{"DatabaseSize"},
+ }
+
+ initQueries(s)
+ _, ok := s.queries["DatabaseSize"]
+ // acc includes size metrics
+ assert.True(t, ok)
+ assert.Equal(t, s.isInitialized, true)
+ assert.Equal(t, s2.isInitialized, false)
+
+ initQueries(s2)
+ _, ok = s2.queries["DatabaseSize"]
+ // acc2 excludes size metrics
+ assert.False(t, ok)
+ assert.Equal(t, s.isInitialized, true)
+ assert.Equal(t, s2.isInitialized, true)
+}
+
const mockPerformanceMetrics = `measurement;servername;type;Point In Time Recovery;Available physical memory (bytes);Average pending disk IO;Average runnable tasks;Average tasks;Buffer pool rate (bytes/sec);Connection memory per connection (bytes);Memory grant pending;Page File Usage (%);Page lookup per batch request;Page split per batch request;Readahead per page read;Signal wait (%);Sql compilation per batch request;Sql recompilation per batch request;Total target memory ratio
Performance metrics;WIN8-DEV;Performance metrics;0;6353158144;0;0;7;2773;415061;0;25;229371;130;10;18;188;52;14`
diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go
new file mode 100644
index 0000000000000..7a686c6f4bc77
--- /dev/null
+++ b/plugins/inputs/sqlserver/sqlserverqueries.go
@@ -0,0 +1,1157 @@
+package sqlserver
+
+import (
+ _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization
+)
+
+// Queries - V2
+// Thanks Bob Ward (http://aka.ms/bobwardms)
+// and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs)
+// for putting most of the memory clerk definitions online!
+/*
+The SQL scripts use a series of IF and CASE statemens to choose the correct query based on edition and version of SQL Server, below the meaning of the numbers:
+EngineEdition:
+1 = Personal or Desktop Engine (Not available in SQL Server 2005 (9.x) and later versions.)
+2 = Standard (This is returned for Standard, Web, and Business Intelligence.)
+3 = Enterprise (This is returned for Evaluation, Developer, and Enterprise editions.)
+4 = Express (This is returned for Express, Express with Tools, and Express with Advanced Services)
+5 = SQL Database
+6 = Microsoft Azure Synapse Analytics (formerly SQL Data Warehouse)
+8 = Managed Instance
+
+ProductVersion:
+see https://sqlserverbuilds.blogspot.com/ for all the details about the version number of SQL Server
+*/
+
+const sqlServerMemoryClerks = `
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+ ,@Columns AS nvarchar(max) = ''
+
+IF @MajorMinorVersion >= 1100
+ SET @Columns += N'mc.[pages_kb]';
+ELSE
+ SET @Columns += N'mc.[single_pages_kb] + mc.[multi_pages_kb]';
+
+SET @SqlStatement = N'
+SELECT
+ ''sqlserver_memory_clerks'' AS [measurement]
+ ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance]
+ ,ISNULL(clerk_names.[name],mc.[type]) AS [clerk_type]
+ ,SUM(' + @Columns + N') AS [size_kb]
+FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK)
+LEFT OUTER JOIN ( VALUES
+ (''CACHESTORE_BROKERDSH'',''Service Broker Dialog Security Header Cache'')
+ ,(''CACHESTORE_BROKERKEK'',''Service Broker Key Exchange Key Cache'')
+ ,(''CACHESTORE_BROKERREADONLY'',''Service Broker (Read-Only)'')
+ ,(''CACHESTORE_BROKERRSB'',''Service Broker Null Remote Service Binding Cache'')
+ ,(''CACHESTORE_BROKERTBLACS'',''Broker dormant rowsets'')
+ ,(''CACHESTORE_BROKERTO'',''Service Broker Transmission Object Cache'')
+ ,(''CACHESTORE_BROKERUSERCERTLOOKUP'',''Service Broker user certificates lookup result cache'')
+ ,(''CACHESTORE_CLRPROC'',''CLR Procedure Cache'')
+ ,(''CACHESTORE_CLRUDTINFO'',''CLR UDT Info'')
+ ,(''CACHESTORE_COLUMNSTOREOBJECTPOOL'',''Column Store Object Pool'')
+ ,(''CACHESTORE_CONVPRI'',''Conversation Priority Cache'')
+ ,(''CACHESTORE_EVENTS'',''Event Notification Cache'')
+ ,(''CACHESTORE_FULLTEXTSTOPLIST'',''Full Text Stoplist Cache'')
+ ,(''CACHESTORE_NOTIF'',''Notification Store'')
+ ,(''CACHESTORE_OBJCP'',''Object Plans'')
+ ,(''CACHESTORE_PHDR'',''Bound Trees'')
+ ,(''CACHESTORE_SEARCHPROPERTYLIST'',''Search Property List Cache'')
+ ,(''CACHESTORE_SEHOBTCOLUMNATTRIBUTE'',''SE Shared Column Metadata Cache'')
+ ,(''CACHESTORE_SQLCP'',''SQL Plans'')
+ ,(''CACHESTORE_STACKFRAMES'',''SOS_StackFramesStore'')
+ ,(''CACHESTORE_SYSTEMROWSET'',''System Rowset Store'')
+ ,(''CACHESTORE_TEMPTABLES'',''Temporary Tables & Table Variables'')
+ ,(''CACHESTORE_VIEWDEFINITIONS'',''View Definition Cache'')
+ ,(''CACHESTORE_XML_SELECTIVE_DG'',''XML DB Cache (Selective)'')
+ ,(''CACHESTORE_XMLDBATTRIBUTE'',''XML DB Cache (Attribute)'')
+ ,(''CACHESTORE_XMLDBELEMENT'',''XML DB Cache (Element)'')
+ ,(''CACHESTORE_XMLDBTYPE'',''XML DB Cache (Type)'')
+ ,(''CACHESTORE_XPROC'',''Extended Stored Procedures'')
+ ,(''MEMORYCLERK_FILETABLE'',''Memory Clerk (File Table)'')
+ ,(''MEMORYCLERK_FSCHUNKER'',''Memory Clerk (FS Chunker)'')
+ ,(''MEMORYCLERK_FULLTEXT'',''Full Text'')
+ ,(''MEMORYCLERK_FULLTEXT_SHMEM'',''Full-text IG'')
+ ,(''MEMORYCLERK_HADR'',''HADR'')
+ ,(''MEMORYCLERK_HOST'',''Host'')
+ ,(''MEMORYCLERK_LANGSVC'',''Language Service'')
+ ,(''MEMORYCLERK_LWC'',''Light Weight Cache'')
+ ,(''MEMORYCLERK_QSRANGEPREFETCH'',''QS Range Prefetch'')
+ ,(''MEMORYCLERK_SERIALIZATION'',''Serialization'')
+ ,(''MEMORYCLERK_SNI'',''SNI'')
+ ,(''MEMORYCLERK_SOSMEMMANAGER'',''SOS Memory Manager'')
+ ,(''MEMORYCLERK_SOSNODE'',''SOS Node'')
+ ,(''MEMORYCLERK_SOSOS'',''SOS Memory Clerk'')
+ ,(''MEMORYCLERK_SQLBUFFERPOOL'',''Buffer Pool'')
+ ,(''MEMORYCLERK_SQLCLR'',''CLR'')
+ ,(''MEMORYCLERK_SQLCLRASSEMBLY'',''CLR Assembly'')
+ ,(''MEMORYCLERK_SQLCONNECTIONPOOL'',''Connection Pool'')
+ ,(''MEMORYCLERK_SQLGENERAL'',''General'')
+ ,(''MEMORYCLERK_SQLHTTP'',''HTTP'')
+ ,(''MEMORYCLERK_SQLLOGPOOL'',''Log Pool'')
+ ,(''MEMORYCLERK_SQLOPTIMIZER'',''SQL Optimizer'')
+ ,(''MEMORYCLERK_SQLQERESERVATIONS'',''SQL Reservations'')
+ ,(''MEMORYCLERK_SQLQUERYCOMPILE'',''SQL Query Compile'')
+ ,(''MEMORYCLERK_SQLQUERYEXEC'',''SQL Query Exec'')
+ ,(''MEMORYCLERK_SQLQUERYPLAN'',''SQL Query Plan'')
+ ,(''MEMORYCLERK_SQLSERVICEBROKER'',''SQL Service Broker'')
+ ,(''MEMORYCLERK_SQLSERVICEBROKERTRANSPORT'',''Unified Communication Stack'')
+ ,(''MEMORYCLERK_SQLSOAP'',''SQL SOAP'')
+ ,(''MEMORYCLERK_SQLSOAPSESSIONSTORE'',''SQL SOAP (Session Store)'')
+ ,(''MEMORYCLERK_SQLSTORENG'',''SQL Storage Engine'')
+ ,(''MEMORYCLERK_SQLUTILITIES'',''SQL Utilities'')
+ ,(''MEMORYCLERK_SQLXML'',''SQL XML'')
+ ,(''MEMORYCLERK_SQLXP'',''SQL XP'')
+ ,(''MEMORYCLERK_TRACE_EVTNOTIF'',''Trace Event Notification'')
+ ,(''MEMORYCLERK_XE'',''XE Engine'')
+ ,(''MEMORYCLERK_XE_BUFFER'',''XE Buffer'')
+ ,(''MEMORYCLERK_XTP'',''In-Memory OLTP'')
+ ,(''OBJECTSTORE_LBSS'',''Lbss Cache (Object Store)'')
+ ,(''OBJECTSTORE_LOCK_MANAGER'',''Lock Manager (Object Store)'')
+ ,(''OBJECTSTORE_SECAUDIT_EVENT_BUFFER'',''Audit Event Buffer (Object Store)'')
+ ,(''OBJECTSTORE_SERVICE_BROKER'',''Service Broker (Object Store)'')
+ ,(''OBJECTSTORE_SNI_PACKET'',''SNI Packet (Object Store)'')
+ ,(''OBJECTSTORE_XACT_CACHE'',''Transactions Cache (Object Store)'')
+ ,(''USERSTORE_DBMETADATA'',''DB Metadata (User Store)'')
+ ,(''USERSTORE_OBJPERM'',''Object Permissions (User Store)'')
+ ,(''USERSTORE_SCHEMAMGR'',''Schema Manager (User Store)'')
+ ,(''USERSTORE_SXC'',''SXC (User Store)'')
+ ,(''USERSTORE_TOKENPERM'',''Token Permissions (User Store)'')
+ ,(''USERSTORE_QDSSTMT'',''QDS Statement Buffer (Pre-persist)'')
+ ,(''CACHESTORE_QDSRUNTIMESTATS'',''QDS Runtime Stats (Pre-persist)'')
+ ,(''CACHESTORE_QDSCONTEXTSETTINGS'',''QDS Unique Context Settings'')
+ ,(''MEMORYCLERK_QUERYDISKSTORE'',''QDS General'')
+ ,(''MEMORYCLERK_QUERYDISKSTORE_HASHMAP'',''QDS Query/Plan Hash Table'')
+) AS clerk_names([system_name],[name])
+ ON mc.[type] = clerk_names.[system_name]
+GROUP BY
+ ISNULL(clerk_names.[name], mc.[type])
+HAVING
+ SUM(' + @Columns + N') >= 1024
+OPTION(RECOMPILE);
+'
+
+EXEC(@SqlStatement)
+`
+
+const sqlServerDatabaseIO = `
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+
+ DECLARE @MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int) * 100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+
+ IF @EngineEdition IN (2,3,4) /*Standard,Enterpris,Express*/
+ BEGIN
+ DECLARE @Columns as nvarchar(max) = ''
+ DECLARE @Tables as nvarchar(max) = ''
+ IF @MajorMinorVersion >= 1050 BEGIN
+ /*in [volume_mount_point] any trailing "\" char will be removed by telegraf */
+ SET @Columns += N',[volume_mount_point]'
+ SET @Tables += N'CROSS APPLY sys.dm_os_volume_stats(vfs.[database_id], vfs.[file_id]) AS vs'
+ END
+
+ IF @MajorMinorVersion > 1100 BEGIN
+ SET @Columns += N'
+ ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
+ ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]'
+ END
+
+ SET @SqlStatement = N'
+ SELECT
+ ''sqlserver_database_io'' AS [measurement]
+ ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
+ ,DB_NAME(vfs.[database_id]) AS [database_name]
+ ,COALESCE(mf.[physical_name],''RBPEX'') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension
+ ,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension
+ ,mf.[type_desc] AS [file_type]
+ ,vfs.[io_stall_read_ms] AS [read_latency_ms]
+ ,vfs.[num_of_reads] AS [reads]
+ ,vfs.[num_of_bytes_read] AS [read_bytes]
+ ,vfs.[io_stall_write_ms] AS [write_latency_ms]
+ ,vfs.[num_of_writes] AS [writes]
+ ,vfs.[num_of_bytes_written] AS [write_bytes]'
+ + @Columns + N'
+ FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs
+ INNER JOIN sys.master_files AS mf WITH (NOLOCK)
+ ON vfs.[database_id] = mf.[database_id] AND vfs.[file_id] = mf.[file_id]
+ '
+ + @Tables;
+ EXEC sp_executesql @SqlStatement
+ END
+ `
+
+const sqlServerProperties = `
+DECLARE
+ @SqlStatement AS nvarchar(max) = ''
+ ,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+
+IF @EngineEdition IN (2,3,4) /*Standard,Enterpris,Express*/
+BEGIN
+ DECLARE @MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+ DECLARE @Columns AS nvarchar(MAX) = ''
+
+ IF @MajorMinorVersion >= 1050
+ SET @Columns = N',CASE [virtual_machine_type_desc]
+ WHEN ''NONE'' THEN ''PHYSICAL Machine''
+ ELSE [virtual_machine_type_desc]
+ END AS [hardware_type]';
+ ELSE /*data not available*/
+ SET @Columns = N','''' AS [hardware_type]';
+
+ SET @SqlStatement = 'SELECT ''sqlserver_server_properties'' AS [measurement],
+ REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance],
+ [cpu_count]
+ ,(SELECT [total_physical_memory_kb] FROM sys.[dm_os_sys_memory]) AS [server_memory]
+ ,CAST(SERVERPROPERTY(''Edition'') AS NVARCHAR) AS [sku]
+ ,@EngineEdition AS [engine_edition]
+ ,DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) AS [uptime]
+ ' + @Columns + ',
+ SERVERPROPERTY(''ProductVersion'') AS sql_version,
+ db_online,
+ db_restoring,
+ db_recovering,
+ db_recoveryPending,
+ db_suspect,
+ db_offline
+ FROM sys.[dm_os_sys_info]
+ CROSS APPLY
+ ( SELECT SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online,
+ SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring,
+ SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering,
+ SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending,
+ SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect,
+ SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline
+ FROM sys.databases
+ ) AS dbs';
+
+ EXEC sp_executesql @SqlStatement , N'@EngineEdition smallint', @EngineEdition = @EngineEdition;
+END
+`
+
+const sqlServerSchedulers string = `
+IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/
+BEGIN
+ DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+ ,@Columns AS nvarchar(MAX) = ''
+
+ IF @MajorMinorVersion >= 1300 BEGIN
+ SET @Columns += N',s.[total_cpu_usage_ms]
+ ,s.[total_scheduler_delay_ms]'
+ END
+
+ SET @SqlStatement = N'
+ SELECT
+ ''sqlserver_schedulers'' AS [measurement]
+ ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance]
+ ,cast(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id]
+ ,cast(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id]
+ ,s.[is_online]
+ ,s.[is_idle]
+ ,s.[preemptive_switches_count]
+ ,s.[context_switches_count]
+ ,s.[current_tasks_count]
+ ,s.[runnable_tasks_count]
+ ,s.[current_workers_count]
+ ,s.[active_workers_count]
+ ,s.[work_queue_count]
+ ,s.[pending_disk_io_count]
+ ,s.[load_factor]
+ ,s.[yield_count]
+ ' + @Columns + N'
+ FROM sys.dm_os_schedulers AS s'
+
+ EXEC sp_executesql @SqlStatement
+END
+`
+
+const sqlServerPerformanceCounters string = `
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+ ,@Columns AS nvarchar(MAX) = ''
+ ,@PivotColumns AS nvarchar(MAX) = ''
+
+IF @EngineEdition IN (2,3,4) /*Standard,Enterpris,Express*/
+BEGIN
+ DECLARE @PCounters TABLE
+ (
+ object_name nvarchar(128),
+ counter_name nvarchar(128),
+ instance_name nvarchar(128),
+ cntr_value bigint,
+ cntr_type INT,
+ Primary Key(object_name, counter_name, instance_name)
+ );
+
+ SET @SqlStatement = N'SELECT DISTINCT
+ RTrim(spi.object_name) object_name,
+ RTrim(spi.counter_name) counter_name,
+ RTRIM(spi.instance_name) as instance_name,
+ CAST(spi.cntr_value AS BIGINT) AS cntr_value,
+ spi.cntr_type
+ FROM sys.dm_os_performance_counters AS spi
+ WHERE (
+ counter_name IN (
+ ''SQL Compilations/sec'',
+ ''SQL Re-Compilations/sec'',
+ ''User Connections'',
+ ''Batch Requests/sec'',
+ ''Logouts/sec'',
+ ''Logins/sec'',
+ ''Processes blocked'',
+ ''Latch Waits/sec'',
+ ''Full Scans/sec'',
+ ''Index Searches/sec'',
+ ''Page Splits/sec'',
+ ''Page lookups/sec'',
+ ''Page reads/sec'',
+ ''Page writes/sec'',
+ ''Readahead pages/sec'',
+ ''Lazy writes/sec'',
+ ''Checkpoint pages/sec'',
+ ''Page life expectancy'',
+ ''Log File(s) Size (KB)'',
+ ''Log File(s) Used Size (KB)'',
+ ''Data File(s) Size (KB)'',
+ ''Transactions/sec'',
+ ''Write Transactions/sec'',
+ ''Active Temp Tables'',
+ ''Temp Tables Creation Rate'',
+ ''Temp Tables For Destruction'',
+ ''Free Space in tempdb (KB)'',
+ ''Version Store Size (KB)'',
+ ''Memory Grants Pending'',
+ ''Memory Grants Outstanding'',
+ ''Free list stalls/sec'',
+ ''Buffer cache hit ratio'',
+ ''Buffer cache hit ratio base'',
+ ''Backup/Restore Throughput/sec'',
+ ''Total Server Memory (KB)'',
+ ''Target Server Memory (KB)'',
+ ''Log Flushes/sec'',
+ ''Log Flush Wait Time'',
+ ''Memory broker clerk size'',
+ ''Log Bytes Flushed/sec'',
+ ''Bytes Sent to Replica/sec'',
+ ''Log Send Queue'',
+ ''Bytes Sent to Transport/sec'',
+ ''Sends to Replica/sec'',
+ ''Bytes Sent to Transport/sec'',
+ ''Sends to Transport/sec'',
+ ''Bytes Received from Replica/sec'',
+ ''Receives from Replica/sec'',
+ ''Flow Control Time (ms/sec)'',
+ ''Flow Control/sec'',
+ ''Resent Messages/sec'',
+ ''Redone Bytes/sec'',
+ ''XTP Memory Used (KB)'',
+ ''Transaction Delay'',
+ ''Log Bytes Received/sec'',
+ ''Log Apply Pending Queue'',
+ ''Redone Bytes/sec'',
+ ''Recovery Queue'',
+ ''Log Apply Ready Queue'',
+ ''CPU usage %'',
+ ''CPU usage % base'',
+ ''Queued requests'',
+ ''Requests completed/sec'',
+ ''Blocked tasks'',
+ ''Active memory grant amount (KB)'',
+ ''Disk Read Bytes/sec'',
+ ''Disk Read IO Throttled/sec'',
+ ''Disk Read IO/sec'',
+ ''Disk Write Bytes/sec'',
+ ''Disk Write IO Throttled/sec'',
+ ''Disk Write IO/sec'',
+ ''Used memory (KB)'',
+ ''Forwarded Records/sec'',
+ ''Background Writer pages/sec'',
+ ''Percent Log Used'',
+ ''Log Send Queue KB'',
+ ''Redo Queue KB'',
+ ''Mirrored Write Transactions/sec'',
+ ''Group Commit Time'',
+ ''Group Commits/Sec''
+ )
+ ) OR (
+ object_name LIKE ''%User Settable%''
+ OR object_name LIKE ''%SQL Errors%''
+ ) OR (
+ object_name LIKE ''%Batch Resp Statistics%''
+ ) OR (
+ instance_name IN (''_Total'')
+ AND counter_name IN (
+ ''Lock Timeouts/sec'',
+ ''Lock Timeouts (timeout > 0)/sec'',
+ ''Number of Deadlocks/sec'',
+ ''Lock Waits/sec'',
+ ''Latch Waits/sec''
+ )
+ )
+ '
+ INSERT INTO @PCounters
+ EXEC (@SqlStatement)
+
+ IF @MajorMinorVersion >= 1300 BEGIN
+ SET @Columns += N',rgwg.[total_cpu_usage_preemptive_ms] AS [Preemptive CPU Usage (time)]'
+ SET @PivotColumns += N',[Preemptive CPU Usage (time)]'
+ END
+
+ SET @SqlStatement = N'
+ SELECT
+ ''SQLServer:Workload Group Stats'' AS [object]
+ ,[counter]
+ ,[instance]
+ ,CAST(vs.[value] AS BIGINT) AS [value]
+ ,1
+ FROM
+ (
+ SELECT
+ rgwg.name AS instance
+ ,rgwg.total_request_count AS [Request Count]
+ ,rgwg.total_queued_request_count AS [Queued Request Count]
+ ,rgwg.total_cpu_limit_violation_count AS [CPU Limit Violation Count]
+ ,rgwg.total_cpu_usage_ms AS [CPU Usage (time)]
+ ,rgwg.total_lock_wait_count AS [Lock Wait Count]
+ ,rgwg.total_lock_wait_time_ms AS [Lock Wait Time]
+ ,rgwg.total_reduced_memgrant_count AS [Reduced Memory Grant Count]
+ ' + @Columns + N'
+ FROM sys.[dm_resource_governor_workload_groups] AS rgwg
+ INNER JOIN sys.[dm_resource_governor_resource_pools] AS rgrp
+ ON rgwg.[pool_id] = rgrp.[pool_id]
+ ) AS rg
+ UNPIVOT (
+ value FOR counter IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], [Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] ' + @PivotColumns + N')
+ ) AS vs'
+
+ INSERT INTO @PCounters
+ EXEC( @SqlStatement )
+
+ SELECT 'sqlserver_performance' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ pc.object_name AS [object],
+ pc.counter_name AS [counter],
+ CASE pc.instance_name WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.instance_name,'') END AS [instance],
+ CAST(CASE WHEN pc.cntr_type = 537003264 AND pc1.cntr_value > 0 THEN (pc.cntr_value * 1.0) / (pc1.cntr_value * 1.0) * 100 ELSE pc.cntr_value END AS float(10)) AS [value],
+ -- cast to string as TAG
+ cast(pc.cntr_type as varchar(25)) as [counter_type]
+ FROM @PCounters AS pc
+ LEFT OUTER JOIN @PCounters AS pc1
+ ON (
+ pc.counter_name = REPLACE(pc1.counter_name,' base','')
+ OR pc.counter_name = REPLACE(pc1.counter_name,' base',' (ms)')
+ )
+ AND pc.object_name = pc1.object_name
+ AND pc.instance_name = pc1.instance_name
+ AND pc1.counter_name LIKE '%base'
+ WHERE pc.counter_name NOT LIKE '% base'
+ OPTION(RECOMPILE);
+END
+`
+
+const sqlServerWaitStatsCategorized string = `
+IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/
+ SELECT
+ 'sqlserver_waitstats' AS [measurement],
+ REPLACE(@@SERVERNAME,'\',':') AS [sql_instance],
+ ws.wait_type,
+ wait_time_ms,
+ wait_time_ms - signal_wait_time_ms AS [resource_wait_ms],
+ signal_wait_time_ms,
+ max_wait_time_ms,
+ waiting_tasks_count,
+ ISNULL(wc.wait_category,'OTHER') AS [wait_category]
+ FROM
+ sys.dm_os_wait_stats AS ws WITH (NOLOCK)
+ LEFT OUTER JOIN ( VALUES
+ ('ASYNC_IO_COMPLETION','Other Disk IO'),
+ ('ASYNC_NETWORK_IO','Network IO'),
+ ('BACKUPIO','Other Disk IO'),
+ ('BROKER_CONNECTION_RECEIVE_TASK','Service Broker'),
+ ('BROKER_DISPATCHER','Service Broker'),
+ ('BROKER_ENDPOINT_STATE_MUTEX','Service Broker'),
+ ('BROKER_EVENTHANDLER','Service Broker'),
+ ('BROKER_FORWARDER','Service Broker'),
+ ('BROKER_INIT','Service Broker'),
+ ('BROKER_MASTERSTART','Service Broker'),
+ ('BROKER_RECEIVE_WAITFOR','User Wait'),
+ ('BROKER_REGISTERALLENDPOINTS','Service Broker'),
+ ('BROKER_SERVICE','Service Broker'),
+ ('BROKER_SHUTDOWN','Service Broker'),
+ ('BROKER_START','Service Broker'),
+ ('BROKER_TASK_SHUTDOWN','Service Broker'),
+ ('BROKER_TASK_STOP','Service Broker'),
+ ('BROKER_TASK_SUBMIT','Service Broker'),
+ ('BROKER_TO_FLUSH','Service Broker'),
+ ('BROKER_TRANSMISSION_OBJECT','Service Broker'),
+ ('BROKER_TRANSMISSION_TABLE','Service Broker'),
+ ('BROKER_TRANSMISSION_WORK','Service Broker'),
+ ('BROKER_TRANSMITTER','Service Broker'),
+ ('CHECKPOINT_QUEUE','Idle'),
+ ('CHKPT','Tran Log IO'),
+ ('CLR_AUTO_EVENT','SQL CLR'),
+ ('CLR_CRST','SQL CLR'),
+ ('CLR_JOIN','SQL CLR'),
+ ('CLR_MANUAL_EVENT','SQL CLR'),
+ ('CLR_MEMORY_SPY','SQL CLR'),
+ ('CLR_MONITOR','SQL CLR'),
+ ('CLR_RWLOCK_READER','SQL CLR'),
+ ('CLR_RWLOCK_WRITER','SQL CLR'),
+ ('CLR_SEMAPHORE','SQL CLR'),
+ ('CLR_TASK_START','SQL CLR'),
+ ('CLRHOST_STATE_ACCESS','SQL CLR'),
+ ('CMEMPARTITIONED','Memory'),
+ ('CMEMTHREAD','Memory'),
+ ('CXPACKET','Parallelism'),
+ ('CXCONSUMER','Parallelism'),
+ ('DBMIRROR_DBM_EVENT','Mirroring'),
+ ('DBMIRROR_DBM_MUTEX','Mirroring'),
+ ('DBMIRROR_EVENTS_QUEUE','Mirroring'),
+ ('DBMIRROR_SEND','Mirroring'),
+ ('DBMIRROR_WORKER_QUEUE','Mirroring'),
+ ('DBMIRRORING_CMD','Mirroring'),
+ ('DTC','Transaction'),
+ ('DTC_ABORT_REQUEST','Transaction'),
+ ('DTC_RESOLVE','Transaction'),
+ ('DTC_STATE','Transaction'),
+ ('DTC_TMDOWN_REQUEST','Transaction'),
+ ('DTC_WAITFOR_OUTCOME','Transaction'),
+ ('DTCNEW_ENLIST','Transaction'),
+ ('DTCNEW_PREPARE','Transaction'),
+ ('DTCNEW_RECOVERY','Transaction'),
+ ('DTCNEW_TM','Transaction'),
+ ('DTCNEW_TRANSACTION_ENLISTMENT','Transaction'),
+ ('DTCPNTSYNC','Transaction'),
+ ('EE_PMOLOCK','Memory'),
+ ('EXCHANGE','Parallelism'),
+ ('EXTERNAL_SCRIPT_NETWORK_IOF','Network IO'),
+ ('FCB_REPLICA_READ','Replication'),
+ ('FCB_REPLICA_WRITE','Replication'),
+ ('FT_COMPROWSET_RWLOCK','Full Text Search'),
+ ('FT_IFTS_RWLOCK','Full Text Search'),
+ ('FT_IFTS_SCHEDULER_IDLE_WAIT','Idle'),
+ ('FT_IFTSHC_MUTEX','Full Text Search'),
+ ('FT_IFTSISM_MUTEX','Full Text Search'),
+ ('FT_MASTER_MERGE','Full Text Search'),
+ ('FT_MASTER_MERGE_COORDINATOR','Full Text Search'),
+ ('FT_METADATA_MUTEX','Full Text Search'),
+ ('FT_PROPERTYLIST_CACHE','Full Text Search'),
+ ('FT_RESTART_CRAWL','Full Text Search'),
+ ('FULLTEXT GATHERER','Full Text Search'),
+ ('HADR_AG_MUTEX','Replication'),
+ ('HADR_AR_CRITICAL_SECTION_ENTRY','Replication'),
+ ('HADR_AR_MANAGER_MUTEX','Replication'),
+ ('HADR_AR_UNLOAD_COMPLETED','Replication'),
+ ('HADR_ARCONTROLLER_NOTIFICATIONS_SUBSCRIBER_LIST','Replication'),
+ ('HADR_BACKUP_BULK_LOCK','Replication'),
+ ('HADR_BACKUP_QUEUE','Replication'),
+ ('HADR_CLUSAPI_CALL','Replication'),
+ ('HADR_COMPRESSED_CACHE_SYNC','Replication'),
+ ('HADR_CONNECTIVITY_INFO','Replication'),
+ ('HADR_DATABASE_FLOW_CONTROL','Replication'),
+ ('HADR_DATABASE_VERSIONING_STATE','Replication'),
+ ('HADR_DATABASE_WAIT_FOR_RECOVERY','Replication'),
+ ('HADR_DATABASE_WAIT_FOR_RESTART','Replication'),
+ ('HADR_DATABASE_WAIT_FOR_TRANSITION_TO_VERSIONING','Replication'),
+ ('HADR_DB_COMMAND','Replication'),
+ ('HADR_DB_OP_COMPLETION_SYNC','Replication'),
+ ('HADR_DB_OP_START_SYNC','Replication'),
+ ('HADR_DBR_SUBSCRIBER','Replication'),
+ ('HADR_DBR_SUBSCRIBER_FILTER_LIST','Replication'),
+ ('HADR_DBSEEDING','Replication'),
+ ('HADR_DBSEEDING_LIST','Replication'),
+ ('HADR_DBSTATECHANGE_SYNC','Replication'),
+ ('HADR_FABRIC_CALLBACK','Replication'),
+ ('HADR_FILESTREAM_BLOCK_FLUSH','Replication'),
+ ('HADR_FILESTREAM_FILE_CLOSE','Replication'),
+ ('HADR_FILESTREAM_FILE_REQUEST','Replication'),
+ ('HADR_FILESTREAM_IOMGR','Replication'),
+ ('HADR_FILESTREAM_IOMGR_IOCOMPLETION','Replication'),
+ ('HADR_FILESTREAM_MANAGER','Replication'),
+ ('HADR_FILESTREAM_PREPROC','Replication'),
+ ('HADR_GROUP_COMMIT','Replication'),
+ ('HADR_LOGCAPTURE_SYNC','Replication'),
+ ('HADR_LOGCAPTURE_WAIT','Replication'),
+ ('HADR_LOGPROGRESS_SYNC','Replication'),
+ ('HADR_NOTIFICATION_DEQUEUE','Replication'),
+ ('HADR_NOTIFICATION_WORKER_EXCLUSIVE_ACCESS','Replication'),
+ ('HADR_NOTIFICATION_WORKER_STARTUP_SYNC','Replication'),
+ ('HADR_NOTIFICATION_WORKER_TERMINATION_SYNC','Replication'),
+ ('HADR_PARTNER_SYNC','Replication'),
+ ('HADR_READ_ALL_NETWORKS','Replication'),
+ ('HADR_RECOVERY_WAIT_FOR_CONNECTION','Replication'),
+ ('HADR_RECOVERY_WAIT_FOR_UNDO','Replication'),
+ ('HADR_REPLICAINFO_SYNC','Replication'),
+ ('HADR_SEEDING_CANCELLATION','Replication'),
+ ('HADR_SEEDING_FILE_LIST','Replication'),
+ ('HADR_SEEDING_LIMIT_BACKUPS','Replication'),
+ ('HADR_SEEDING_SYNC_COMPLETION','Replication'),
+ ('HADR_SEEDING_TIMEOUT_TASK','Replication'),
+ ('HADR_SEEDING_WAIT_FOR_COMPLETION','Replication'),
+ ('HADR_SYNC_COMMIT','Replication'),
+ ('HADR_SYNCHRONIZING_THROTTLE','Replication'),
+ ('HADR_TDS_LISTENER_SYNC','Replication'),
+ ('HADR_TDS_LISTENER_SYNC_PROCESSING','Replication'),
+ ('HADR_THROTTLE_LOG_RATE_GOVERNOR','Log Rate Governor'),
+ ('HADR_TIMER_TASK','Replication'),
+ ('HADR_TRANSPORT_DBRLIST','Replication'),
+ ('HADR_TRANSPORT_FLOW_CONTROL','Replication'),
+ ('HADR_TRANSPORT_SESSION','Replication'),
+ ('HADR_WORK_POOL','Replication'),
+ ('HADR_WORK_QUEUE','Replication'),
+ ('HADR_XRF_STACK_ACCESS','Replication'),
+ ('INSTANCE_LOG_RATE_GOVERNOR','Log Rate Governor'),
+ ('IO_COMPLETION','Other Disk IO'),
+ ('IO_QUEUE_LIMIT','Other Disk IO'),
+ ('IO_RETRY','Other Disk IO'),
+ ('LATCH_DT','Latch'),
+ ('LATCH_EX','Latch'),
+ ('LATCH_KP','Latch'),
+ ('LATCH_NL','Latch'),
+ ('LATCH_SH','Latch'),
+ ('LATCH_UP','Latch'),
+ ('LAZYWRITER_SLEEP','Idle'),
+ ('LCK_M_BU','Lock'),
+ ('LCK_M_BU_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_BU_LOW_PRIORITY','Lock'),
+ ('LCK_M_IS','Lock'),
+ ('LCK_M_IS_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_IS_LOW_PRIORITY','Lock'),
+ ('LCK_M_IU','Lock'),
+ ('LCK_M_IU_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_IU_LOW_PRIORITY','Lock'),
+ ('LCK_M_IX','Lock'),
+ ('LCK_M_IX_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_IX_LOW_PRIORITY','Lock'),
+ ('LCK_M_RIn_NL','Lock'),
+ ('LCK_M_RIn_NL_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RIn_NL_LOW_PRIORITY','Lock'),
+ ('LCK_M_RIn_S','Lock'),
+ ('LCK_M_RIn_S_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RIn_S_LOW_PRIORITY','Lock'),
+ ('LCK_M_RIn_U','Lock'),
+ ('LCK_M_RIn_U_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RIn_U_LOW_PRIORITY','Lock'),
+ ('LCK_M_RIn_X','Lock'),
+ ('LCK_M_RIn_X_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RIn_X_LOW_PRIORITY','Lock'),
+ ('LCK_M_RS_S','Lock'),
+ ('LCK_M_RS_S_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RS_S_LOW_PRIORITY','Lock'),
+ ('LCK_M_RS_U','Lock'),
+ ('LCK_M_RS_U_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RS_U_LOW_PRIORITY','Lock'),
+ ('LCK_M_RX_S','Lock'),
+ ('LCK_M_RX_S_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RX_S_LOW_PRIORITY','Lock'),
+ ('LCK_M_RX_U','Lock'),
+ ('LCK_M_RX_U_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RX_U_LOW_PRIORITY','Lock'),
+ ('LCK_M_RX_X','Lock'),
+ ('LCK_M_RX_X_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_RX_X_LOW_PRIORITY','Lock'),
+ ('LCK_M_S','Lock'),
+ ('LCK_M_S_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_S_LOW_PRIORITY','Lock'),
+ ('LCK_M_SCH_M','Lock'),
+ ('LCK_M_SCH_M_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_SCH_M_LOW_PRIORITY','Lock'),
+ ('LCK_M_SCH_S','Lock'),
+ ('LCK_M_SCH_S_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_SCH_S_LOW_PRIORITY','Lock'),
+ ('LCK_M_SIU','Lock'),
+ ('LCK_M_SIU_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_SIU_LOW_PRIORITY','Lock'),
+ ('LCK_M_SIX','Lock'),
+ ('LCK_M_SIX_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_SIX_LOW_PRIORITY','Lock'),
+ ('LCK_M_U','Lock'),
+ ('LCK_M_U_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_U_LOW_PRIORITY','Lock'),
+ ('LCK_M_UIX','Lock'),
+ ('LCK_M_UIX_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_UIX_LOW_PRIORITY','Lock'),
+ ('LCK_M_X','Lock'),
+ ('LCK_M_X_ABORT_BLOCKERS','Lock'),
+ ('LCK_M_X_LOW_PRIORITY','Lock'),
+ ('LOGBUFFER','Tran Log IO'),
+ ('LOGMGR','Tran Log IO'),
+ ('LOGMGR_FLUSH','Tran Log IO'),
+ ('LOGMGR_PMM_LOG','Tran Log IO'),
+ ('LOGMGR_QUEUE','Idle'),
+ ('LOGMGR_RESERVE_APPEND','Tran Log IO'),
+ ('MEMORY_ALLOCATION_EXT','Memory'),
+ ('MEMORY_GRANT_UPDATE','Memory'),
+ ('MSQL_XACT_MGR_MUTEX','Transaction'),
+ ('MSQL_XACT_MUTEX','Transaction'),
+ ('MSSEARCH','Full Text Search'),
+ ('NET_WAITFOR_PACKET','Network IO'),
+ ('ONDEMAND_TASK_QUEUE','Idle'),
+ ('PAGEIOLATCH_DT','Buffer IO'),
+ ('PAGEIOLATCH_EX','Buffer IO'),
+ ('PAGEIOLATCH_KP','Buffer IO'),
+ ('PAGEIOLATCH_NL','Buffer IO'),
+ ('PAGEIOLATCH_SH','Buffer IO'),
+ ('PAGEIOLATCH_UP','Buffer IO'),
+ ('PAGELATCH_DT','Buffer Latch'),
+ ('PAGELATCH_EX','Buffer Latch'),
+ ('PAGELATCH_KP','Buffer Latch'),
+ ('PAGELATCH_NL','Buffer Latch'),
+ ('PAGELATCH_SH','Buffer Latch'),
+ ('PAGELATCH_UP','Buffer Latch'),
+ ('POOL_LOG_RATE_GOVERNOR','Log Rate Governor'),
+ ('PREEMPTIVE_ABR','Preemptive'),
+ ('PREEMPTIVE_CLOSEBACKUPMEDIA','Preemptive'),
+ ('PREEMPTIVE_CLOSEBACKUPTAPE','Preemptive'),
+ ('PREEMPTIVE_CLOSEBACKUPVDIDEVICE','Preemptive'),
+ ('PREEMPTIVE_CLUSAPI_CLUSTERRESOURCECONTROL','Preemptive'),
+ ('PREEMPTIVE_COM_COCREATEINSTANCE','Preemptive'),
+ ('PREEMPTIVE_COM_COGETCLASSOBJECT','Preemptive'),
+ ('PREEMPTIVE_COM_CREATEACCESSOR','Preemptive'),
+ ('PREEMPTIVE_COM_DELETEROWS','Preemptive'),
+ ('PREEMPTIVE_COM_GETCOMMANDTEXT','Preemptive'),
+ ('PREEMPTIVE_COM_GETDATA','Preemptive'),
+ ('PREEMPTIVE_COM_GETNEXTROWS','Preemptive'),
+ ('PREEMPTIVE_COM_GETRESULT','Preemptive'),
+ ('PREEMPTIVE_COM_GETROWSBYBOOKMARK','Preemptive'),
+ ('PREEMPTIVE_COM_LBFLUSH','Preemptive'),
+ ('PREEMPTIVE_COM_LBLOCKREGION','Preemptive'),
+ ('PREEMPTIVE_COM_LBREADAT','Preemptive'),
+ ('PREEMPTIVE_COM_LBSETSIZE','Preemptive'),
+ ('PREEMPTIVE_COM_LBSTAT','Preemptive'),
+ ('PREEMPTIVE_COM_LBUNLOCKREGION','Preemptive'),
+ ('PREEMPTIVE_COM_LBWRITEAT','Preemptive'),
+ ('PREEMPTIVE_COM_QUERYINTERFACE','Preemptive'),
+ ('PREEMPTIVE_COM_RELEASE','Preemptive'),
+ ('PREEMPTIVE_COM_RELEASEACCESSOR','Preemptive'),
+ ('PREEMPTIVE_COM_RELEASEROWS','Preemptive'),
+ ('PREEMPTIVE_COM_RELEASESESSION','Preemptive'),
+ ('PREEMPTIVE_COM_RESTARTPOSITION','Preemptive'),
+ ('PREEMPTIVE_COM_SEQSTRMREAD','Preemptive'),
+ ('PREEMPTIVE_COM_SEQSTRMREADANDWRITE','Preemptive'),
+ ('PREEMPTIVE_COM_SETDATAFAILURE','Preemptive'),
+ ('PREEMPTIVE_COM_SETPARAMETERINFO','Preemptive'),
+ ('PREEMPTIVE_COM_SETPARAMETERPROPERTIES','Preemptive'),
+ ('PREEMPTIVE_COM_STRMLOCKREGION','Preemptive'),
+ ('PREEMPTIVE_COM_STRMSEEKANDREAD','Preemptive'),
+ ('PREEMPTIVE_COM_STRMSEEKANDWRITE','Preemptive'),
+ ('PREEMPTIVE_COM_STRMSETSIZE','Preemptive'),
+ ('PREEMPTIVE_COM_STRMSTAT','Preemptive'),
+ ('PREEMPTIVE_COM_STRMUNLOCKREGION','Preemptive'),
+ ('PREEMPTIVE_CONSOLEWRITE','Preemptive'),
+ ('PREEMPTIVE_CREATEPARAM','Preemptive'),
+ ('PREEMPTIVE_DEBUG','Preemptive'),
+ ('PREEMPTIVE_DFSADDLINK','Preemptive'),
+ ('PREEMPTIVE_DFSLINKEXISTCHECK','Preemptive'),
+ ('PREEMPTIVE_DFSLINKHEALTHCHECK','Preemptive'),
+ ('PREEMPTIVE_DFSREMOVELINK','Preemptive'),
+ ('PREEMPTIVE_DFSREMOVEROOT','Preemptive'),
+ ('PREEMPTIVE_DFSROOTFOLDERCHECK','Preemptive'),
+ ('PREEMPTIVE_DFSROOTINIT','Preemptive'),
+ ('PREEMPTIVE_DFSROOTSHARECHECK','Preemptive'),
+ ('PREEMPTIVE_DTC_ABORT','Preemptive'),
+ ('PREEMPTIVE_DTC_ABORTREQUESTDONE','Preemptive'),
+ ('PREEMPTIVE_DTC_BEGINTRANSACTION','Preemptive'),
+ ('PREEMPTIVE_DTC_COMMITREQUESTDONE','Preemptive'),
+ ('PREEMPTIVE_DTC_ENLIST','Preemptive'),
+ ('PREEMPTIVE_DTC_PREPAREREQUESTDONE','Preemptive'),
+ ('PREEMPTIVE_FILESIZEGET','Preemptive'),
+ ('PREEMPTIVE_FSAOLEDB_ABORTTRANSACTION','Preemptive'),
+ ('PREEMPTIVE_FSAOLEDB_COMMITTRANSACTION','Preemptive'),
+ ('PREEMPTIVE_FSAOLEDB_STARTTRANSACTION','Preemptive'),
+ ('PREEMPTIVE_FSRECOVER_UNCONDITIONALUNDO','Preemptive'),
+ ('PREEMPTIVE_GETRMINFO','Preemptive'),
+ ('PREEMPTIVE_HADR_LEASE_MECHANISM','Preemptive'),
+ ('PREEMPTIVE_HTTP_EVENT_WAIT','Preemptive'),
+ ('PREEMPTIVE_HTTP_REQUEST','Preemptive'),
+ ('PREEMPTIVE_LOCKMONITOR','Preemptive'),
+ ('PREEMPTIVE_MSS_RELEASE','Preemptive'),
+ ('PREEMPTIVE_ODBCOPS','Preemptive'),
+ ('PREEMPTIVE_OLE_UNINIT','Preemptive'),
+ ('PREEMPTIVE_OLEDB_ABORTORCOMMITTRAN','Preemptive'),
+ ('PREEMPTIVE_OLEDB_ABORTTRAN','Preemptive'),
+ ('PREEMPTIVE_OLEDB_GETDATASOURCE','Preemptive'),
+ ('PREEMPTIVE_OLEDB_GETLITERALINFO','Preemptive'),
+ ('PREEMPTIVE_OLEDB_GETPROPERTIES','Preemptive'),
+ ('PREEMPTIVE_OLEDB_GETPROPERTYINFO','Preemptive'),
+ ('PREEMPTIVE_OLEDB_GETSCHEMALOCK','Preemptive'),
+ ('PREEMPTIVE_OLEDB_JOINTRANSACTION','Preemptive'),
+ ('PREEMPTIVE_OLEDB_RELEASE','Preemptive'),
+ ('PREEMPTIVE_OLEDB_SETPROPERTIES','Preemptive'),
+ ('PREEMPTIVE_OLEDBOPS','Preemptive'),
+ ('PREEMPTIVE_OS_ACCEPTSECURITYCONTEXT','Preemptive'),
+ ('PREEMPTIVE_OS_ACQUIRECREDENTIALSHANDLE','Preemptive'),
+ ('PREEMPTIVE_OS_AUTHENTICATIONOPS','Preemptive'),
+ ('PREEMPTIVE_OS_AUTHORIZATIONOPS','Preemptive'),
+ ('PREEMPTIVE_OS_AUTHZGETINFORMATIONFROMCONTEXT','Preemptive'),
+ ('PREEMPTIVE_OS_AUTHZINITIALIZECONTEXTFROMSID','Preemptive'),
+ ('PREEMPTIVE_OS_AUTHZINITIALIZERESOURCEMANAGER','Preemptive'),
+ ('PREEMPTIVE_OS_BACKUPREAD','Preemptive'),
+ ('PREEMPTIVE_OS_CLOSEHANDLE','Preemptive'),
+ ('PREEMPTIVE_OS_CLUSTEROPS','Preemptive'),
+ ('PREEMPTIVE_OS_COMOPS','Preemptive'),
+ ('PREEMPTIVE_OS_COMPLETEAUTHTOKEN','Preemptive'),
+ ('PREEMPTIVE_OS_COPYFILE','Preemptive'),
+ ('PREEMPTIVE_OS_CREATEDIRECTORY','Preemptive'),
+ ('PREEMPTIVE_OS_CREATEFILE','Preemptive'),
+ ('PREEMPTIVE_OS_CRYPTACQUIRECONTEXT','Preemptive'),
+ ('PREEMPTIVE_OS_CRYPTIMPORTKEY','Preemptive'),
+ ('PREEMPTIVE_OS_CRYPTOPS','Preemptive'),
+ ('PREEMPTIVE_OS_DECRYPTMESSAGE','Preemptive'),
+ ('PREEMPTIVE_OS_DELETEFILE','Preemptive'),
+ ('PREEMPTIVE_OS_DELETESECURITYCONTEXT','Preemptive'),
+ ('PREEMPTIVE_OS_DEVICEIOCONTROL','Preemptive'),
+ ('PREEMPTIVE_OS_DEVICEOPS','Preemptive'),
+ ('PREEMPTIVE_OS_DIRSVC_NETWORKOPS','Preemptive'),
+ ('PREEMPTIVE_OS_DISCONNECTNAMEDPIPE','Preemptive'),
+ ('PREEMPTIVE_OS_DOMAINSERVICESOPS','Preemptive'),
+ ('PREEMPTIVE_OS_DSGETDCNAME','Preemptive'),
+ ('PREEMPTIVE_OS_DTCOPS','Preemptive'),
+ ('PREEMPTIVE_OS_ENCRYPTMESSAGE','Preemptive'),
+ ('PREEMPTIVE_OS_FILEOPS','Preemptive'),
+ ('PREEMPTIVE_OS_FINDFILE','Preemptive'),
+ ('PREEMPTIVE_OS_FLUSHFILEBUFFERS','Preemptive'),
+ ('PREEMPTIVE_OS_FORMATMESSAGE','Preemptive'),
+ ('PREEMPTIVE_OS_FREECREDENTIALSHANDLE','Preemptive'),
+ ('PREEMPTIVE_OS_FREELIBRARY','Preemptive'),
+ ('PREEMPTIVE_OS_GENERICOPS','Preemptive'),
+ ('PREEMPTIVE_OS_GETADDRINFO','Preemptive'),
+ ('PREEMPTIVE_OS_GETCOMPRESSEDFILESIZE','Preemptive'),
+ ('PREEMPTIVE_OS_GETDISKFREESPACE','Preemptive'),
+ ('PREEMPTIVE_OS_GETFILEATTRIBUTES','Preemptive'),
+ ('PREEMPTIVE_OS_GETFILESIZE','Preemptive'),
+ ('PREEMPTIVE_OS_GETFINALFILEPATHBYHANDLE','Preemptive'),
+ ('PREEMPTIVE_OS_GETLONGPATHNAME','Preemptive'),
+ ('PREEMPTIVE_OS_GETPROCADDRESS','Preemptive'),
+ ('PREEMPTIVE_OS_GETVOLUMENAMEFORVOLUMEMOUNTPOINT','Preemptive'),
+ ('PREEMPTIVE_OS_GETVOLUMEPATHNAME','Preemptive'),
+ ('PREEMPTIVE_OS_INITIALIZESECURITYCONTEXT','Preemptive'),
+ ('PREEMPTIVE_OS_LIBRARYOPS','Preemptive'),
+ ('PREEMPTIVE_OS_LOADLIBRARY','Preemptive'),
+ ('PREEMPTIVE_OS_LOGONUSER','Preemptive'),
+ ('PREEMPTIVE_OS_LOOKUPACCOUNTSID','Preemptive'),
+ ('PREEMPTIVE_OS_MESSAGEQUEUEOPS','Preemptive'),
+ ('PREEMPTIVE_OS_MOVEFILE','Preemptive'),
+ ('PREEMPTIVE_OS_NETGROUPGETUSERS','Preemptive'),
+ ('PREEMPTIVE_OS_NETLOCALGROUPGETMEMBERS','Preemptive'),
+ ('PREEMPTIVE_OS_NETUSERGETGROUPS','Preemptive'),
+ ('PREEMPTIVE_OS_NETUSERGETLOCALGROUPS','Preemptive'),
+ ('PREEMPTIVE_OS_NETUSERMODALSGET','Preemptive'),
+ ('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICY','Preemptive'),
+ ('PREEMPTIVE_OS_NETVALIDATEPASSWORDPOLICYFREE','Preemptive'),
+ ('PREEMPTIVE_OS_OPENDIRECTORY','Preemptive'),
+ ('PREEMPTIVE_OS_PDH_WMI_INIT','Preemptive'),
+ ('PREEMPTIVE_OS_PIPEOPS','Preemptive'),
+ ('PREEMPTIVE_OS_PROCESSOPS','Preemptive'),
+ ('PREEMPTIVE_OS_QUERYCONTEXTATTRIBUTES','Preemptive'),
+ ('PREEMPTIVE_OS_QUERYREGISTRY','Preemptive'),
+ ('PREEMPTIVE_OS_QUERYSECURITYCONTEXTTOKEN','Preemptive'),
+ ('PREEMPTIVE_OS_REMOVEDIRECTORY','Preemptive'),
+ ('PREEMPTIVE_OS_REPORTEVENT','Preemptive'),
+ ('PREEMPTIVE_OS_REVERTTOSELF','Preemptive'),
+ ('PREEMPTIVE_OS_RSFXDEVICEOPS','Preemptive'),
+ ('PREEMPTIVE_OS_SECURITYOPS','Preemptive'),
+ ('PREEMPTIVE_OS_SERVICEOPS','Preemptive'),
+ ('PREEMPTIVE_OS_SETENDOFFILE','Preemptive'),
+ ('PREEMPTIVE_OS_SETFILEPOINTER','Preemptive'),
+ ('PREEMPTIVE_OS_SETFILEVALIDDATA','Preemptive'),
+ ('PREEMPTIVE_OS_SETNAMEDSECURITYINFO','Preemptive'),
+ ('PREEMPTIVE_OS_SQLCLROPS','Preemptive'),
+ ('PREEMPTIVE_OS_SQMLAUNCH','Preemptive'),
+ ('PREEMPTIVE_OS_VERIFYSIGNATURE','Preemptive'),
+ ('PREEMPTIVE_OS_VERIFYTRUST','Preemptive'),
+ ('PREEMPTIVE_OS_VSSOPS','Preemptive'),
+ ('PREEMPTIVE_OS_WAITFORSINGLEOBJECT','Preemptive'),
+ ('PREEMPTIVE_OS_WINSOCKOPS','Preemptive'),
+ ('PREEMPTIVE_OS_WRITEFILE','Preemptive'),
+ ('PREEMPTIVE_OS_WRITEFILEGATHER','Preemptive'),
+ ('PREEMPTIVE_OS_WSASETLASTERROR','Preemptive'),
+ ('PREEMPTIVE_REENLIST','Preemptive'),
+ ('PREEMPTIVE_RESIZELOG','Preemptive'),
+ ('PREEMPTIVE_ROLLFORWARDREDO','Preemptive'),
+ ('PREEMPTIVE_ROLLFORWARDUNDO','Preemptive'),
+ ('PREEMPTIVE_SB_STOPENDPOINT','Preemptive'),
+ ('PREEMPTIVE_SERVER_STARTUP','Preemptive'),
+ ('PREEMPTIVE_SETRMINFO','Preemptive'),
+ ('PREEMPTIVE_SHAREDMEM_GETDATA','Preemptive'),
+ ('PREEMPTIVE_SNIOPEN','Preemptive'),
+ ('PREEMPTIVE_SOSHOST','Preemptive'),
+ ('PREEMPTIVE_SOSTESTING','Preemptive'),
+ ('PREEMPTIVE_SP_SERVER_DIAGNOSTICS','Preemptive'),
+ ('PREEMPTIVE_STARTRM','Preemptive'),
+ ('PREEMPTIVE_STREAMFCB_CHECKPOINT','Preemptive'),
+ ('PREEMPTIVE_STREAMFCB_RECOVER','Preemptive'),
+ ('PREEMPTIVE_STRESSDRIVER','Preemptive'),
+ ('PREEMPTIVE_TESTING','Preemptive'),
+ ('PREEMPTIVE_TRANSIMPORT','Preemptive'),
+ ('PREEMPTIVE_UNMARSHALPROPAGATIONTOKEN','Preemptive'),
+ ('PREEMPTIVE_VSS_CREATESNAPSHOT','Preemptive'),
+ ('PREEMPTIVE_VSS_CREATEVOLUMESNAPSHOT','Preemptive'),
+ ('PREEMPTIVE_XE_CALLBACKEXECUTE','Preemptive'),
+ ('PREEMPTIVE_XE_CX_FILE_OPEN','Preemptive'),
+ ('PREEMPTIVE_XE_CX_HTTP_CALL','Preemptive'),
+ ('PREEMPTIVE_XE_DISPATCHER','Preemptive'),
+ ('PREEMPTIVE_XE_ENGINEINIT','Preemptive'),
+ ('PREEMPTIVE_XE_GETTARGETSTATE','Preemptive'),
+ ('PREEMPTIVE_XE_SESSIONCOMMIT','Preemptive'),
+ ('PREEMPTIVE_XE_TARGETFINALIZE','Preemptive'),
+ ('PREEMPTIVE_XE_TARGETINIT','Preemptive'),
+ ('PREEMPTIVE_XE_TIMERRUN','Preemptive'),
+ ('PREEMPTIVE_XETESTING','Preemptive'),
+ ('PWAIT_HADR_ACTION_COMPLETED','Replication'),
+ ('PWAIT_HADR_CHANGE_NOTIFIER_TERMINATION_SYNC','Replication'),
+ ('PWAIT_HADR_CLUSTER_INTEGRATION','Replication'),
+ ('PWAIT_HADR_FAILOVER_COMPLETED','Replication'),
+ ('PWAIT_HADR_JOIN','Replication'),
+ ('PWAIT_HADR_OFFLINE_COMPLETED','Replication'),
+ ('PWAIT_HADR_ONLINE_COMPLETED','Replication'),
+ ('PWAIT_HADR_POST_ONLINE_COMPLETED','Replication'),
+ ('PWAIT_HADR_SERVER_READY_CONNECTIONS','Replication'),
+ ('PWAIT_HADR_WORKITEM_COMPLETED','Replication'),
+ ('PWAIT_HADRSIM','Replication'),
+ ('PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC','Full Text Search'),
+ ('QUERY_TRACEOUT','Tracing'),
+ ('REPL_CACHE_ACCESS','Replication'),
+ ('REPL_HISTORYCACHE_ACCESS','Replication'),
+ ('REPL_SCHEMA_ACCESS','Replication'),
+ ('REPL_TRANFSINFO_ACCESS','Replication'),
+ ('REPL_TRANHASHTABLE_ACCESS','Replication'),
+ ('REPL_TRANTEXTINFO_ACCESS','Replication'),
+ ('REPLICA_WRITES','Replication'),
+ ('REQUEST_FOR_DEADLOCK_SEARCH','Idle'),
+ ('RESERVED_MEMORY_ALLOCATION_EXT','Memory'),
+ ('RESOURCE_SEMAPHORE','Memory'),
+ ('RESOURCE_SEMAPHORE_QUERY_COMPILE','Compilation'),
+ ('SLEEP_BPOOL_FLUSH','Idle'),
+ ('SLEEP_BUFFERPOOL_HELPLW','Idle'),
+ ('SLEEP_DBSTARTUP','Idle'),
+ ('SLEEP_DCOMSTARTUP','Idle'),
+ ('SLEEP_MASTERDBREADY','Idle'),
+ ('SLEEP_MASTERMDREADY','Idle'),
+ ('SLEEP_MASTERUPGRADED','Idle'),
+ ('SLEEP_MEMORYPOOL_ALLOCATEPAGES','Idle'),
+ ('SLEEP_MSDBSTARTUP','Idle'),
+ ('SLEEP_RETRY_VIRTUALALLOC','Idle'),
+ ('SLEEP_SYSTEMTASK','Idle'),
+ ('SLEEP_TASK','Idle'),
+ ('SLEEP_TEMPDBSTARTUP','Idle'),
+ ('SLEEP_WORKSPACE_ALLOCATEPAGE','Idle'),
+ ('SOS_SCHEDULER_YIELD','CPU'),
+ ('SQLCLR_APPDOMAIN','SQL CLR'),
+ ('SQLCLR_ASSEMBLY','SQL CLR'),
+ ('SQLCLR_DEADLOCK_DETECTION','SQL CLR'),
+ ('SQLCLR_QUANTUM_PUNISHMENT','SQL CLR'),
+ ('SQLTRACE_BUFFER_FLUSH','Idle'),
+ ('SQLTRACE_FILE_BUFFER','Tracing'),
+ ('SQLTRACE_FILE_READ_IO_COMPLETION','Tracing'),
+ ('SQLTRACE_FILE_WRITE_IO_COMPLETION','Tracing'),
+ ('SQLTRACE_INCREMENTAL_FLUSH_SLEEP','Idle'),
+ ('SQLTRACE_PENDING_BUFFER_WRITERS','Tracing'),
+ ('SQLTRACE_SHUTDOWN','Tracing'),
+ ('SQLTRACE_WAIT_ENTRIES','Idle'),
+ ('THREADPOOL','Worker Thread'),
+ ('TRACE_EVTNOTIF','Tracing'),
+ ('TRACEWRITE','Tracing'),
+ ('TRAN_MARKLATCH_DT','Transaction'),
+ ('TRAN_MARKLATCH_EX','Transaction'),
+ ('TRAN_MARKLATCH_KP','Transaction'),
+ ('TRAN_MARKLATCH_NL','Transaction'),
+ ('TRAN_MARKLATCH_SH','Transaction'),
+ ('TRAN_MARKLATCH_UP','Transaction'),
+ ('TRANSACTION_MUTEX','Transaction'),
+ ('WAIT_FOR_RESULTS','User Wait'),
+ ('WAITFOR','User Wait'),
+ ('WRITE_COMPLETION','Other Disk IO'),
+ ('WRITELOG','Tran Log IO'),
+ ('XACT_OWN_TRANSACTION','Transaction'),
+ ('XACT_RECLAIM_SESSION','Transaction'),
+ ('XACTLOCKINFO','Transaction'),
+ ('XACTWORKSPACE_MUTEX','Transaction'),
+ ('XE_DISPATCHER_WAIT','Idle'),
+ ('XE_TIMER_EVENT','Idle')) AS wc(wait_type, wait_category)
+ ON ws.wait_type = wc.wait_type
+ WHERE
+ ws.wait_type NOT IN (
+ N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP',
+ N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE',
+ N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE',
+ N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE',
+ N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE',
+ N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX',
+ N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT',
+ N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE',
+ N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE',
+ N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE',
+ N'PARALLEL_REDO_WORKER_WAIT_WORK',
+ N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS',
+ N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS',
+ N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST',
+ N'PREEMPTIVE_OS_DEVICEOPS',
+ N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER',
+ N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT',
+ N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE',
+ N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT',
+ N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP',
+ N'QDS_ASYNC_QUEUE',
+ N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH',
+ N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP',
+ N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY',
+ N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK',
+ N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP',
+ N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
+ N'SQLTRACE_WAIT_ENTRIES',
+ N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT',
+ N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE',
+ N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN',
+ N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT',
+ N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT')
+ AND waiting_tasks_count > 0
+ AND wait_time_ms > 100;
+`
+
+const sqlServerRequests string = `
+SET NOCOUNT ON;
+DECLARE
+ @SqlStatement AS nvarchar(max)
+ ,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int) * 100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+
+-- 2008R2 and before doesn't have open_transaction_count in sys.dm_exec_sessions
+DECLARE @Columns as nvarchar(max) = ''
+DECLARE @DatabaseColumn as nvarchar(max) = ''
+IF @MajorMinorVersion >= 1200
+ BEGIN
+ SET @Columns = ',s.open_transaction_count as open_transaction '
+ SET @DatabaseColumn = ' , DB_NAME(s.database_id) as session_db_name '
+ END
+ELSE
+ BEGIN
+ SET @Columns = ',r.open_transaction_count as open_transaction '
+ SET @DatabaseColumn = ' , DB_NAME(r.database_id) as session_db_name '
+ END
+
+SET @SqlStatement = N'
+SELECT blocking_session_id into #blockingSessions FROM sys.dm_exec_requests WHERE blocking_session_id != 0
+create index ix_blockingSessions_1 on #blockingSessions (blocking_session_id)
+SELECT
+''sqlserver_requests'' AS [measurement]
+, REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
+, DB_NAME() as [database_name]
+, s.session_id
+, ISNULL(r.request_id,0) as request_id '
++ @DatabaseColumn +
+N' , COALESCE(r.status,s.status) AS status
+, COALESCE(r.cpu_time,s.cpu_time) AS cpu_time_ms
+, COALESCE(r.total_elapsed_time,s.total_elapsed_time) AS total_elapsed_time_ms
+, COALESCE(r.logical_reads,s.logical_reads) AS logical_reads
+, COALESCE(r.writes,s.writes) AS writes
+, r.command
+, r.wait_time as wait_time_ms
+, r.wait_type
+, r.wait_resource
+, r.blocking_session_id
+, s.program_name
+, s.host_name
+, s.nt_user_name '
++ @Columns +
+N', LEFT (CASE COALESCE(r.transaction_isolation_level, s.transaction_isolation_level)
+ WHEN 0 THEN ''0-Read Committed''
+ WHEN 1 THEN ''1-Read Uncommitted (NOLOCK)''
+ WHEN 2 THEN ''2-Read Committed''
+ WHEN 3 THEN ''3-Repeatable Read''
+ WHEN 4 THEN ''4-Serializable''
+ WHEN 5 THEN ''5-Snapshot''
+ ELSE CONVERT (varchar(30), r.transaction_isolation_level) + ''-UNKNOWN''
+ END, 30) AS transaction_isolation_level
+, r.granted_query_memory as granted_query_memory_pages
+, r.percent_complete
+, SUBSTRING(
+ qt.text,
+ r.statement_start_offset / 2 + 1,
+ (CASE WHEN r.statement_end_offset = -1
+ THEN DATALENGTH(qt.text)
+ ELSE r.statement_end_offset
+ END - r.statement_start_offset) / 2 + 1
+ ) AS statement_text
+, qt.objectid
+, QUOTENAME(OBJECT_SCHEMA_NAME(qt.objectid,qt.dbid)) + ''.'' + QUOTENAME(OBJECT_NAME(qt.objectid,qt.dbid)) as stmt_object_name
+, DB_NAME(qt.dbid) stmt_db_name
+, CONVERT(varchar(20),[query_hash],1) as [query_hash]
+, CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash]
+FROM sys.dm_exec_sessions AS s
+LEFT OUTER JOIN sys.dm_exec_requests AS r
+ ON s.session_id = r.session_id
+OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt
+WHERE 1 = 1
+ AND (r.session_id IS NOT NULL AND (s.is_user_process = 1
+ OR r.status COLLATE Latin1_General_BIN NOT IN (''background'', ''sleeping'')))
+ OR (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions))
+OPTION(MAXDOP 1)'
+
+EXEC sp_executesql @SqlStatement
+`
+
+const sqlServerVolumeSpace string = `
+/* Only for on-prem version of SQL Server
+Gets data about disk space, only for volumes used by SQL Server (data available form sql 2008R2 and later)
+*/
+DECLARE
+ @EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
+ ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
+
+IF @EngineEdition IN (2,3,4) AND @MajorMinorVersion >= 1050
+ BEGIN
+ SELECT DISTINCT
+ 'sqlserver_volume_space' AS [measurement]
+ ,SERVERPROPERTY('machinename') AS [server_name]
+ ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ /*in [volume_mount_point] any trailing "\" char will be removed by telegraf */
+ ,[volume_mount_point]
+ ,vs.[total_bytes] AS [total_space_bytes]
+ ,vs.[available_bytes] AS [available_space_bytes]
+ ,vs.[total_bytes] - vs.[available_bytes] AS [used_space_bytes]
+ FROM
+ sys.master_files as mf
+ CROSS APPLY sys.dm_os_volume_stats(mf.database_id, mf.file_id) as vs
+ END
+`
+
+const sqlServerRingBufferCpu string = `
+/*The ring buffer has a new value every minute*/
+IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/
+BEGIN
+SELECT
+ 'sqlserver_cpu' AS [measurement]
+ ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ ,[SQLProcessUtilization] AS [sqlserver_process_cpu]
+ ,[SystemIdle] AS [system_idle_cpu]
+ ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu]
+FROM (
+ SELECT TOP 1
+ [record_id]
+ /*,dateadd(ms, (y.[timestamp] - (SELECT CAST([ms_ticks] AS BIGINT) FROM sys.dm_os_sys_info)), GETDATE()) AS [EventTime] --use for check/debug purpose*/
+ ,[SQLProcessUtilization]
+ ,[SystemIdle]
+ FROM (
+ SELECT record.value('(./Record/@id)[1]', 'int') AS [record_id]
+ ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle]
+ ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization]
+ ,[TIMESTAMP]
+ FROM (
+ SELECT [TIMESTAMP]
+ ,convert(XML, [record]) AS [record]
+ FROM sys.dm_os_ring_buffers
+ WHERE [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR'
+ AND [record] LIKE '%%'
+ ) AS x
+ ) AS y
+ ORDER BY record_id DESC
+) as z
+END
+`
diff --git a/plugins/inputs/stackdriver/README.md b/plugins/inputs/stackdriver/README.md
index f2ec1471bb140..6469b259b78ec 100644
--- a/plugins/inputs/stackdriver/README.md
+++ b/plugins/inputs/stackdriver/README.md
@@ -1,6 +1,7 @@
-# Stackdriver Input Plugin
+# Stackdriver Google Cloud Monitoring Input Plugin
-Stackdriver gathers metrics from the [Stackdriver Monitoring API][stackdriver].
+Query data from Google Cloud Monitoring (formerly Stackdriver) using the
+[Cloud Monitoring API v3][stackdriver].
This plugin accesses APIs which are [chargeable][pricing]; you might incur
costs.
diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go
index 4f4e35695fc21..431076743101a 100644
--- a/plugins/inputs/stackdriver/stackdriver.go
+++ b/plugins/inputs/stackdriver/stackdriver.go
@@ -3,7 +3,6 @@ package stackdriver
import (
"context"
"fmt"
- "log"
"math"
"strconv"
"strings"
@@ -128,6 +127,8 @@ type (
DistributionAggregationAligners []string `toml:"distribution_aggregation_aligners"`
Filter *ListTimeSeriesFilter `toml:"filter"`
+ Log telegraf.Logger
+
client metricClient
timeSeriesConfCache *timeSeriesConfCache
prevEnd time.Time
@@ -167,6 +168,7 @@ type (
// stackdriverMetricClient is a metric client for stackdriver
stackdriverMetricClient struct {
+ log telegraf.Logger
conn *monitoring.MetricClient
listMetricDescriptorsCalls selfstat.Stat
@@ -206,7 +208,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors(
mdChan := make(chan *metricpb.MetricDescriptor, 1000)
go func() {
- log.Printf("D! [inputs.stackdriver] ListMetricDescriptors: %s", req.Filter)
+ c.log.Debugf("List metric descriptor request filter: %s", req.Filter)
defer close(mdChan)
// Iterate over metric descriptors and send them to buffered channel
@@ -216,7 +218,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors(
mdDesc, mdErr := mdResp.Next()
if mdErr != nil {
if mdErr != iterator.Done {
- log.Printf("E! [inputs.stackdriver] Received error response: %s: %v", req, mdErr)
+ c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
}
break
}
@@ -235,7 +237,7 @@ func (c *stackdriverMetricClient) ListTimeSeries(
tsChan := make(chan *monitoringpb.TimeSeries, 1000)
go func() {
- log.Printf("D! [inputs.stackdriver] ListTimeSeries: %s", req.Filter)
+ c.log.Debugf("List time series request filter: %s", req.Filter)
defer close(tsChan)
// Iterate over timeseries and send them to buffered channel
@@ -245,7 +247,7 @@ func (c *stackdriverMetricClient) ListTimeSeries(
tsDesc, tsErr := tsResp.Next()
if tsErr != nil {
if tsErr != iterator.Done {
- log.Printf("E! [inputs.stackdriver] Received error response: %s: %v", req, tsErr)
+ c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr)
}
break
}
@@ -458,6 +460,7 @@ func (s *Stackdriver) initializeStackdriverClient(ctx context.Context) error {
"stackdriver", "list_timeseries_calls", tags)
s.client = &stackdriverMetricClient{
+ log: s.Log,
conn: client,
listMetricDescriptorsCalls: listMetricDescriptorsCalls,
listTimeSeriesCalls: listTimeSeriesCalls,
@@ -541,7 +544,7 @@ func (s *Stackdriver) generatetimeSeriesConfs(
for _, filter := range filters {
// Add filter for list metric descriptors if
// includeMetricTypePrefixes is specified,
- // this is more effecient than iterating over
+ // this is more efficient than iterating over
// all metric descriptors
req.Filter = filter
mdRespChan, err := s.client.ListMetricDescriptors(ctx, req)
diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go
index 99e5deabdadf3..8010ad4817924 100644
--- a/plugins/inputs/stackdriver/stackdriver_test.go
+++ b/plugins/inputs/stackdriver/stackdriver_test.go
@@ -2,6 +2,7 @@ package stackdriver
import (
"context"
+ "sync"
"testing"
"time"
@@ -27,6 +28,7 @@ type MockStackdriverClient struct {
CloseF func() error
calls []*Call
+ sync.Mutex
}
func (m *MockStackdriverClient) ListMetricDescriptors(
@@ -34,7 +36,9 @@ func (m *MockStackdriverClient) ListMetricDescriptors(
req *monitoringpb.ListMetricDescriptorsRequest,
) (<-chan *metricpb.MetricDescriptor, error) {
call := &Call{name: "ListMetricDescriptors", args: []interface{}{ctx, req}}
+ m.Lock()
m.calls = append(m.calls, call)
+ m.Unlock()
return m.ListMetricDescriptorsF(ctx, req)
}
@@ -43,13 +47,17 @@ func (m *MockStackdriverClient) ListTimeSeries(
req *monitoringpb.ListTimeSeriesRequest,
) (<-chan *monitoringpb.TimeSeries, error) {
call := &Call{name: "ListTimeSeries", args: []interface{}{ctx, req}}
+ m.Lock()
m.calls = append(m.calls, call)
+ m.Unlock()
return m.ListTimeSeriesF(ctx, req)
}
func (m *MockStackdriverClient) Close() error {
call := &Call{name: "Close", args: []interface{}{}}
+ m.Lock()
m.calls = append(m.calls, call)
+ m.Unlock()
return m.CloseF()
}
@@ -640,6 +648,7 @@ func TestGather(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
s := &Stackdriver{
+ Log: testutil.Logger{},
Project: "test",
RateLimit: 10,
GatherRawDistributionBuckets: true,
@@ -751,9 +760,8 @@ func TestGatherAlign(t *testing.T) {
},
},
}
- for _, tt := range tests {
+ for listCall, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- listCall := 0
var acc testutil.Accumulator
client := &MockStackdriverClient{
ListMetricDescriptorsF: func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) {
@@ -765,7 +773,6 @@ func TestGatherAlign(t *testing.T) {
ListTimeSeriesF: func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) {
ch := make(chan *monitoringpb.TimeSeries, 1)
ch <- tt.timeseries[listCall]
- listCall++
close(ch)
return ch, nil
},
@@ -775,6 +782,7 @@ func TestGatherAlign(t *testing.T) {
}
s := &Stackdriver{
+ Log: testutil.Logger{},
Project: "test",
RateLimit: 10,
GatherRawDistributionBuckets: false,
diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md
index c1093bf397b10..57953eed72600 100644
--- a/plugins/inputs/statsd/README.md
+++ b/plugins/inputs/statsd/README.md
@@ -1,4 +1,4 @@
-# Telegraf Service Plugin: statsd
+# StatsD Input Plugin
### Configuration
@@ -34,16 +34,22 @@
## Reset timings & histograms every interval (default=true)
delete_timings = true
- ## Percentiles to calculate for timing & histogram stats
- percentiles = [90]
+ ## Percentiles to calculate for timing & histogram stats.
+ percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]
## separator to use between elements of a statsd metric
metric_separator = "_"
## Parses tags in the datadog statsd format
## http://docs.datadoghq.com/guides/dogstatsd/
+ ## deprecated in 1.10; use datadog_extensions option instead
parse_data_dog_tags = false
+ ## Parses extensions to statsd in the datadog statsd format
+ ## currently supports metrics and datadog tags.
+ ## http://docs.datadoghq.com/guides/dogstatsd/
+ datadog_extensions = false
+
## Statsd data translation templates, more info can be read here:
## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
# templates = [
@@ -185,6 +191,7 @@ the accuracy of percentiles but also increases the memory usage and cpu time.
- **templates** []string: Templates for transforming statsd buckets into influx
measurements and tags.
- **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/)
+- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/)
### Statsd bucket -> InfluxDB line-protocol Templates
diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go
new file mode 100644
index 0000000000000..377db66e6d3ad
--- /dev/null
+++ b/plugins/inputs/statsd/datadog.go
@@ -0,0 +1,180 @@
+package statsd
+
+// this is adapted from datadog's apache licensed version at
+// https://github.com/DataDog/datadog-agent/blob/fcfc74f106ab1bd6991dfc6a7061c558d934158a/pkg/dogstatsd/parser.go#L173
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ priorityNormal = "normal"
+ priorityLow = "low"
+
+ eventInfo = "info"
+ eventWarning = "warning"
+ eventError = "error"
+ eventSuccess = "success"
+)
+
+var uncommenter = strings.NewReplacer("\\n", "\n")
+
+func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostname string) error {
+ // _e{title.length,text.length}:title|text
+ // [
+ // |d:date_happened
+ // |p:priority
+ // |h:hostname
+ // |t:alert_type
+ // |s:source_type_nam
+ // |#tag1,tag2
+ // ]
+ //
+ //
+ // tag is key:value
+ messageRaw := strings.SplitN(message, ":", 2)
+ if len(messageRaw) < 2 || len(messageRaw[0]) < 7 || len(messageRaw[1]) < 3 {
+ return fmt.Errorf("Invalid message format")
+ }
+ header := messageRaw[0]
+ message = messageRaw[1]
+
+ rawLen := strings.SplitN(header[3:], ",", 2)
+ if len(rawLen) != 2 {
+ return fmt.Errorf("Invalid message format")
+ }
+
+ titleLen, err := strconv.ParseInt(rawLen[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("Invalid message format, could not parse title.length: '%s'", rawLen[0])
+ }
+ if len(rawLen[1]) < 1 {
+ return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0])
+ }
+ textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64)
+ if err != nil {
+ return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0])
+ }
+ if titleLen+textLen+1 > int64(len(message)) {
+ return fmt.Errorf("Invalid message format, title.length and text.length exceed total message length")
+ }
+
+ rawTitle := message[:titleLen]
+ rawText := message[titleLen+1 : titleLen+1+textLen]
+ message = message[titleLen+1+textLen:]
+
+ if len(rawTitle) == 0 || len(rawText) == 0 {
+ return fmt.Errorf("Invalid event message format: empty 'title' or 'text' field")
+ }
+
+ name := rawTitle
+ tags := make(map[string]string, strings.Count(message, ",")+2) // allocate for the approximate number of tags
+ fields := make(map[string]interface{}, 9)
+ fields["alert_type"] = eventInfo // default event type
+ fields["text"] = uncommenter.Replace(string(rawText))
+ if defaultHostname != "" {
+ tags["source"] = defaultHostname
+ }
+ fields["priority"] = priorityNormal
+ ts := now
+ if len(message) < 2 {
+ s.acc.AddFields(name, fields, tags, ts)
+ return nil
+ }
+
+ rawMetadataFields := strings.Split(message[1:], "|")
+ for i := range rawMetadataFields {
+ if len(rawMetadataFields[i]) < 2 {
+ return errors.New("too short metadata field")
+ }
+ switch rawMetadataFields[i][:2] {
+ case "d:":
+ ts, err := strconv.ParseInt(rawMetadataFields[i][2:], 10, 64)
+ if err != nil {
+ continue
+ }
+ fields["ts"] = ts
+ case "p:":
+ switch rawMetadataFields[i][2:] {
+ case priorityLow:
+ fields["priority"] = priorityLow
+ case priorityNormal: // we already used this as a default
+ default:
+ continue
+ }
+ case "h:":
+ tags["source"] = rawMetadataFields[i][2:]
+ case "t:":
+ switch rawMetadataFields[i][2:] {
+ case eventError, eventWarning, eventSuccess, eventInfo:
+ fields["alert_type"] = rawMetadataFields[i][2:] // already set for info
+ default:
+ continue
+ }
+ case "k:":
+ tags["aggregation_key"] = rawMetadataFields[i][2:]
+ case "s:":
+ fields["source_type_name"] = rawMetadataFields[i][2:]
+ default:
+ if rawMetadataFields[i][0] == '#' {
+ parseDataDogTags(tags, rawMetadataFields[i][1:])
+ } else {
+ return fmt.Errorf("unknown metadata type: '%s'", rawMetadataFields[i])
+ }
+ }
+ }
+ // Use source tag because host is reserved tag key in Telegraf.
+ // In datadog the host tag and `h:` are interchangable, so we have to chech for the host tag.
+ if host, ok := tags["host"]; ok {
+ delete(tags, "host")
+ tags["source"] = host
+ }
+ s.acc.AddFields(name, fields, tags, ts)
+ return nil
+}
+
+func parseDataDogTags(tags map[string]string, message string) {
+ if len(message) == 0 {
+ return
+ }
+
+ start, i := 0, 0
+ var k string
+ var inVal bool // check if we are parsing the value part of the tag
+ for i = range message {
+ if message[i] == ',' {
+ if k == "" {
+ k = message[start:i]
+ tags[k] = "true" // this is because influx doesn't support empty tags
+ start = i + 1
+ continue
+ }
+ v := message[start:i]
+ if v == "" {
+ v = "true"
+ }
+ tags[k] = v
+ start = i + 1
+ k, inVal = "", false // reset state vars
+ } else if message[i] == ':' && !inVal {
+ k = message[start:i]
+ start = i + 1
+ inVal = true
+ }
+ }
+ if k == "" && start < i+1 {
+ tags[message[start:i+1]] = "true"
+ }
+ // grab the last value
+ if k != "" {
+ if start < i+1 {
+ tags[k] = message[start : i+1]
+ return
+ }
+ tags[k] = "true"
+ }
+}
diff --git a/plugins/inputs/statsd/datadog_test.go b/plugins/inputs/statsd/datadog_test.go
new file mode 100644
index 0000000000000..aaa046f38aa4c
--- /dev/null
+++ b/plugins/inputs/statsd/datadog_test.go
@@ -0,0 +1,484 @@
+package statsd
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEventGather(t *testing.T) {
+ now := time.Now()
+ type expected struct {
+ title string
+ tags map[string]string
+ fields map[string]interface{}
+ }
+ tests := []struct {
+ name string
+ message string
+ hostname string
+ now time.Time
+ err bool
+ expected expected
+ }{{
+ name: "basic",
+ message: "_e{10,9}:test title|test text",
+ hostname: "default-hostname",
+ now: now,
+ err: false,
+ expected: expected{
+ title: "test title",
+ tags: map[string]string{"source": "default-hostname"},
+ fields: map[string]interface{}{
+ "priority": priorityNormal,
+ "alert_type": "info",
+ "text": "test text",
+ },
+ },
+ },
+ {
+ name: "escape some stuff",
+ message: "_e{10,24}:test title|test\\line1\\nline2\\nline3",
+ hostname: "default-hostname",
+ now: now.Add(1),
+ err: false,
+ expected: expected{
+ title: "test title",
+ tags: map[string]string{"source": "default-hostname"},
+ fields: map[string]interface{}{
+ "priority": priorityNormal,
+ "alert_type": "info",
+ "text": "test\\line1\nline2\nline3",
+ },
+ },
+ },
+ {
+ name: "custom time",
+ message: "_e{10,9}:test title|test text|d:21",
+ hostname: "default-hostname",
+ now: now.Add(2),
+ err: false,
+ expected: expected{
+ title: "test title",
+ tags: map[string]string{"source": "default-hostname"},
+ fields: map[string]interface{}{
+ "priority": priorityNormal,
+ "alert_type": "info",
+ "text": "test text",
+ "ts": int64(21),
+ },
+ },
+ },
+ }
+ acc := &testutil.Accumulator{}
+ s := NewTestStatsd()
+ require.NoError(t, s.Start(acc))
+ defer s.Stop()
+
+ for i := range tests {
+ t.Run(tests[i].name, func(t *testing.T) {
+ err := s.parseEventMessage(tests[i].now, tests[i].message, tests[i].hostname)
+ if tests[i].err {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ require.Equal(t, uint64(i+1), acc.NMetrics())
+
+ require.NoError(t, err)
+ require.Equal(t, tests[i].expected.title, acc.Metrics[i].Measurement)
+ require.Equal(t, tests[i].expected.tags, acc.Metrics[i].Tags)
+ require.Equal(t, tests[i].expected.fields, acc.Metrics[i].Fields)
+ })
+ }
+}
+
+// These tests adapted from tests in
+// https://github.com/DataDog/datadog-agent/blob/master/pkg/dogstatsd/parser_test.go
+// to ensure compatibility with the datadog-agent parser
+
+func TestEvents(t *testing.T) {
+ now := time.Now()
+ type args struct {
+ now time.Time
+ message string
+ hostname string
+ }
+ type expected struct {
+ title string
+ text interface{}
+ now time.Time
+ ts interface{}
+ priority string
+ source string
+ alertType interface{}
+ aggregationKey string
+ sourceTypeName interface{}
+ checkTags map[string]string
+ }
+
+ tests := []struct {
+ name string
+ args args
+ expected expected
+ }{
+ {
+ name: "event minimal",
+ args: args{
+ now: now,
+ message: "_e{10,9}:test title|test text",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now,
+ priority: priorityNormal,
+ source: "default-hostname",
+ alertType: eventInfo,
+ aggregationKey: "",
+ },
+ },
+ {
+ name: "event multilines text",
+ args: args{
+ now: now.Add(1),
+ message: "_e{10,24}:test title|test\\line1\\nline2\\nline3",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test\\line1\nline2\nline3",
+ now: now.Add(1),
+ priority: priorityNormal,
+ source: "default-hostname",
+ alertType: eventInfo,
+ aggregationKey: "",
+ },
+ },
+ {
+ name: "event pipe in title",
+ args: args{
+ now: now.Add(2),
+ message: "_e{10,24}:test|title|test\\line1\\nline2\\nline3",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test|title",
+ text: "test\\line1\nline2\nline3",
+ now: now.Add(2),
+ priority: priorityNormal,
+ source: "default-hostname",
+ alertType: eventInfo,
+ aggregationKey: "",
+ },
+ },
+ {
+ name: "event metadata timestamp",
+ args: args{
+ now: now.Add(3),
+ message: "_e{10,9}:test title|test text|d:21",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(3),
+ priority: priorityNormal,
+ source: "default-hostname",
+ alertType: eventInfo,
+ aggregationKey: "",
+ ts: int64(21),
+ },
+ },
+ {
+ name: "event metadata priority",
+ args: args{
+ now: now.Add(4),
+ message: "_e{10,9}:test title|test text|p:low",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(4),
+ priority: priorityLow,
+ source: "default-hostname",
+ alertType: eventInfo,
+ },
+ },
+ {
+ name: "event metadata hostname",
+ args: args{
+ now: now.Add(5),
+ message: "_e{10,9}:test title|test text|h:localhost",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(5),
+ priority: priorityNormal,
+ source: "localhost",
+ alertType: eventInfo,
+ },
+ },
+ {
+ name: "event metadata hostname in tag",
+ args: args{
+ now: now.Add(6),
+ message: "_e{10,9}:test title|test text|#host:localhost",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(6),
+ priority: priorityNormal,
+ source: "localhost",
+ alertType: eventInfo,
+ },
+ },
+ {
+ name: "event metadata empty host tag",
+ args: args{
+ now: now.Add(7),
+ message: "_e{10,9}:test title|test text|#host:,other:tag",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(7),
+ priority: priorityNormal,
+ source: "true",
+ alertType: eventInfo,
+ checkTags: map[string]string{"other": "tag", "source": "true"},
+ },
+ },
+ {
+ name: "event metadata alert type",
+ args: args{
+ now: now.Add(8),
+ message: "_e{10,9}:test title|test text|t:warning",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(8),
+ priority: priorityNormal,
+ source: "default-hostname",
+ alertType: eventWarning,
+ },
+ },
+ {
+ name: "event metadata aggregation key",
+ args: args{
+ now: now.Add(9),
+ message: "_e{10,9}:test title|test text|k:some aggregation key",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(9),
+ priority: priorityNormal,
+ source: "default-hostname",
+ alertType: eventInfo,
+ aggregationKey: "some aggregation key",
+ },
+ },
+ {
+ name: "event metadata aggregation key",
+ args: args{
+ now: now.Add(10),
+ message: "_e{10,9}:test title|test text|k:some aggregation key",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(10),
+ priority: priorityNormal,
+ source: "default-hostname",
+ alertType: eventInfo,
+ aggregationKey: "some aggregation key",
+ },
+ },
+ {
+ name: "event metadata source type",
+ args: args{
+ now: now.Add(11),
+ message: "_e{10,9}:test title|test text|s:this is the source",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(11),
+ priority: priorityNormal,
+ source: "default-hostname",
+ sourceTypeName: "this is the source",
+ alertType: eventInfo,
+ },
+ },
+ {
+ name: "event metadata source type",
+ args: args{
+ now: now.Add(11),
+ message: "_e{10,9}:test title|test text|s:this is the source",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(11),
+ priority: priorityNormal,
+ source: "default-hostname",
+ sourceTypeName: "this is the source",
+ alertType: eventInfo,
+ },
+ },
+ {
+ name: "event metadata source tags",
+ args: args{
+ now: now.Add(11),
+ message: "_e{10,9}:test title|test text|#tag1,tag2:test",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(11),
+ priority: priorityNormal,
+ source: "default-hostname",
+ alertType: eventInfo,
+ checkTags: map[string]string{"tag1": "true", "tag2": "test", "source": "default-hostname"},
+ },
+ },
+ {
+ name: "event metadata multiple",
+ args: args{
+ now: now.Add(11),
+ message: "_e{10,9}:test title|test text|t:warning|d:12345|p:low|h:some.host|k:aggKey|s:source test|#tag1,tag2:test",
+ hostname: "default-hostname",
+ },
+ expected: expected{
+ title: "test title",
+ text: "test text",
+ now: now.Add(11),
+ priority: priorityLow,
+ source: "some.host",
+ ts: int64(12345),
+ alertType: eventWarning,
+ aggregationKey: "aggKey",
+ sourceTypeName: "source test",
+ checkTags: map[string]string{"aggregation_key": "aggKey", "tag1": "true", "tag2": "test", "source": "some.host"},
+ },
+ },
+ }
+ s := NewTestStatsd()
+ acc := &testutil.Accumulator{}
+ require.NoError(t, s.Start(acc))
+ defer s.Stop()
+ for i := range tests {
+ t.Run(tests[i].name, func(t *testing.T) {
+ acc.ClearMetrics()
+ err := s.parseEventMessage(tests[i].args.now, tests[i].args.message, tests[i].args.hostname)
+ require.NoError(t, err)
+ m := acc.Metrics[0]
+ require.Equal(t, tests[i].expected.title, m.Measurement)
+ require.Equal(t, tests[i].expected.text, m.Fields["text"])
+ require.Equal(t, tests[i].expected.now, m.Time)
+ require.Equal(t, tests[i].expected.ts, m.Fields["ts"])
+ require.Equal(t, tests[i].expected.priority, m.Fields["priority"])
+ require.Equal(t, tests[i].expected.source, m.Tags["source"])
+ require.Equal(t, tests[i].expected.alertType, m.Fields["alert_type"])
+ require.Equal(t, tests[i].expected.aggregationKey, m.Tags["aggregation_key"])
+ require.Equal(t, tests[i].expected.sourceTypeName, m.Fields["source_type_name"])
+ if tests[i].expected.checkTags != nil {
+ require.Equal(t, tests[i].expected.checkTags, m.Tags)
+ }
+ })
+ }
+}
+
+func TestEventError(t *testing.T) {
+ now := time.Now()
+ s := NewTestStatsd()
+ acc := &testutil.Accumulator{}
+ require.NoError(t, s.Start(acc))
+ defer s.Stop()
+
+ // missing length header
+ err := s.parseEventMessage(now, "_e:title|text", "default-hostname")
+ require.Error(t, err)
+
+ // greater length than packet
+ err = s.parseEventMessage(now, "_e{10,10}:title|text", "default-hostname")
+ require.Error(t, err)
+
+ // zero length
+ err = s.parseEventMessage(now, "_e{0,0}:a|a", "default-hostname")
+ require.Error(t, err)
+
+ // missing title or text length
+ err = s.parseEventMessage(now, "_e{5555:title|text", "default-hostname")
+ require.Error(t, err)
+
+ // missing wrong len format
+ err = s.parseEventMessage(now, "_e{a,1}:title|text", "default-hostname")
+ require.Error(t, err)
+
+ err = s.parseEventMessage(now, "_e{1,a}:title|text", "default-hostname")
+ require.Error(t, err)
+
+ // missing title or text length
+ err = s.parseEventMessage(now, "_e{5,}:title|text", "default-hostname")
+ require.Error(t, err)
+
+ err = s.parseEventMessage(now, "_e{100,:title|text", "default-hostname")
+ require.Error(t, err)
+
+ err = s.parseEventMessage(now, "_e,100:title|text", "default-hostname")
+ require.Error(t, err)
+
+ err = s.parseEventMessage(now, "_e{,4}:title|text", "default-hostname")
+ require.Error(t, err)
+
+ err = s.parseEventMessage(now, "_e{}:title|text", "default-hostname")
+ require.Error(t, err)
+
+ err = s.parseEventMessage(now, "_e{,}:title|text", "default-hostname")
+ require.Error(t, err)
+
+ // not enough information
+ err = s.parseEventMessage(now, "_e|text", "default-hostname")
+ require.Error(t, err)
+
+ err = s.parseEventMessage(now, "_e:|text", "default-hostname")
+ require.Error(t, err)
+
+ // invalid timestamp
+ err = s.parseEventMessage(now, "_e{5,4}:title|text|d:abc", "default-hostname")
+ require.NoError(t, err)
+
+ // invalid priority
+ err = s.parseEventMessage(now, "_e{5,4}:title|text|p:urgent", "default-hostname")
+ require.NoError(t, err)
+
+ // invalid priority
+ err = s.parseEventMessage(now, "_e{5,4}:title|text|p:urgent", "default-hostname")
+ require.NoError(t, err)
+
+ // invalid alert type
+ err = s.parseEventMessage(now, "_e{5,4}:title|text|t:test", "default-hostname")
+ require.NoError(t, err)
+
+ // unknown metadata
+ err = s.parseEventMessage(now, "_e{5,4}:title|text|x:1234", "default-hostname")
+ require.Error(t, err)
+}
diff --git a/plugins/inputs/statsd/running_stats.go b/plugins/inputs/statsd/running_stats.go
index 2395ab143f45d..e33749b2c2179 100644
--- a/plugins/inputs/statsd/running_stats.go
+++ b/plugins/inputs/statsd/running_stats.go
@@ -49,7 +49,7 @@ func (rs *RunningStats) AddValue(v float64) {
}
// These are used for the running mean and variance
- rs.n += 1
+ rs.n++
rs.ex += v - rs.k
rs.ex2 += (v - rs.k) * (v - rs.k)
@@ -99,7 +99,7 @@ func (rs *RunningStats) Count() int64 {
return rs.n
}
-func (rs *RunningStats) Percentile(n int) float64 {
+func (rs *RunningStats) Percentile(n float64) float64 {
if n > 100 {
n = 100
}
@@ -109,16 +109,16 @@ func (rs *RunningStats) Percentile(n int) float64 {
rs.sorted = true
}
- i := int(float64(len(rs.perc)) * float64(n) / float64(100))
+ i := float64(len(rs.perc)) * n / float64(100)
return rs.perc[clamp(i, 0, len(rs.perc)-1)]
}
-func clamp(i int, min int, max int) int {
- if i < min {
+func clamp(i float64, min int, max int) int {
+ if i < float64(min) {
return min
}
- if i > max {
+ if i > float64(max) {
return max
}
- return i
+ return int(i)
}
diff --git a/plugins/inputs/statsd/running_stats_test.go b/plugins/inputs/statsd/running_stats_test.go
index 4571f76d7a602..a52209c5665cb 100644
--- a/plugins/inputs/statsd/running_stats_test.go
+++ b/plugins/inputs/statsd/running_stats_test.go
@@ -26,6 +26,9 @@ func TestRunningStats_Single(t *testing.T) {
if rs.Percentile(100) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100))
}
+ if rs.Percentile(99.95) != 10.1 {
+ t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(99.95))
+ }
if rs.Percentile(90) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
}
@@ -67,6 +70,9 @@ func TestRunningStats_Duplicate(t *testing.T) {
if rs.Percentile(100) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100))
}
+ if rs.Percentile(99.95) != 10.1 {
+ t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(99.95))
+ }
if rs.Percentile(90) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
}
@@ -108,12 +114,21 @@ func TestRunningStats(t *testing.T) {
if rs.Percentile(100) != 45 {
t.Errorf("Expected %v, got %v", 45, rs.Percentile(100))
}
+ if rs.Percentile(99.98) != 45 {
+ t.Errorf("Expected %v, got %v", 45, rs.Percentile(99.98))
+ }
if rs.Percentile(90) != 32 {
t.Errorf("Expected %v, got %v", 32, rs.Percentile(90))
}
+ if rs.Percentile(50.1) != 11 {
+ t.Errorf("Expected %v, got %v", 11, rs.Percentile(50.1))
+ }
if rs.Percentile(50) != 11 {
t.Errorf("Expected %v, got %v", 11, rs.Percentile(50))
}
+ if rs.Percentile(49.9) != 10 {
+ t.Errorf("Expected %v, got %v", 10, rs.Percentile(49.9))
+ }
if rs.Percentile(0) != 5 {
t.Errorf("Expected %v, got %v", 5, rs.Percentile(0))
}
diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go
index 8b5e15502d20f..9c5780d00a596 100644
--- a/plugins/inputs/statsd/statsd.go
+++ b/plugins/inputs/statsd/statsd.go
@@ -5,7 +5,6 @@ import (
"bytes"
"errors"
"fmt"
- "log"
"net"
"sort"
"strconv"
@@ -21,7 +20,7 @@ import (
)
const (
- // UDP packet limit, see
+ // UDP_MAX_PACKET_SIZE is the UDP packet limit, see
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
UDP_MAX_PACKET_SIZE int = 64 * 1024
@@ -32,15 +31,11 @@ const (
defaultSeparator = "_"
defaultAllowPendingMessage = 10000
MaxTCPConnections = 250
-)
-
-var dropwarn = "E! Error: statsd message queue full. " +
- "We have dropped %d messages so far. " +
- "You may want to increase allowed_pending_messages in the config\n"
-var malformedwarn = "E! Statsd over TCP has received %d malformed packets" +
- " thus far."
+ parserGoRoutines = 5
+)
+// Statsd allows the importing of statsd and dogstatsd data.
type Statsd struct {
// Protocol used on listener - udp or tcp
Protocol string `toml:"protocol"`
@@ -54,7 +49,7 @@ type Statsd struct {
// Percentiles specifies the percentiles that will be calculated for timing
// and histogram stats.
- Percentiles []int
+ Percentiles []internal.Number
PercentileLimit int
DeleteGauges bool
@@ -67,7 +62,12 @@ type Statsd struct {
MetricSeparator string
// This flag enables parsing of tags in the dogstatsd extension to the
// statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/)
- ParseDataDogTags bool
+ ParseDataDogTags bool // depreciated in 1.10; use datadog_extensions
+
+ // Parses extensions to statsd in the datadog statsd format
+ // currently supports metrics and datadog tags.
+ // http://docs.datadoghq.com/guides/dogstatsd/
+ DataDogExtensions bool `toml:"datadog_extensions"`
// UDPPacketSize is deprecated, it's only here for legacy support
// we now always create 1 max size buffer and then copy only what we need
@@ -91,7 +91,7 @@ type Statsd struct {
malformed int
// Channel for all incoming statsd packets
- in chan *bytes.Buffer
+ in chan input
done chan struct{}
// Cache gauges, counters & sets so they can be aggregated as they arrive
@@ -124,13 +124,25 @@ type Statsd struct {
MaxConnections selfstat.Stat
CurrentConnections selfstat.Stat
TotalConnections selfstat.Stat
- PacketsRecv selfstat.Stat
- BytesRecv selfstat.Stat
+ TCPPacketsRecv selfstat.Stat
+ TCPBytesRecv selfstat.Stat
+ UDPPacketsRecv selfstat.Stat
+ UDPPacketsDrop selfstat.Stat
+ UDPBytesRecv selfstat.Stat
+ ParseTimeNS selfstat.Stat
+
+ Log telegraf.Logger
// A pool of byte slices to handle parsing
bufPool sync.Pool
}
+type input struct {
+ *bytes.Buffer
+ time.Time
+ Addr string
+}
+
// One statsd metric, form is :||@
type metric struct {
name string
@@ -205,7 +217,7 @@ const sampleConfig = `
delete_timings = true
## Percentiles to calculate for timing & histogram stats
- percentiles = [90]
+ percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]
## separator to use between elements of a statsd metric
metric_separator = "_"
@@ -214,6 +226,9 @@ const sampleConfig = `
## http://docs.datadoghq.com/guides/dogstatsd/
parse_data_dog_tags = false
+ ## Parses datadog extensions to the statsd format
+ datadog_extensions = false
+
## Statsd data translation templates, more info can be read here:
## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
# templates = [
@@ -239,12 +254,12 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
defer s.Unlock()
now := time.Now()
- for _, metric := range s.timings {
+ for _, m := range s.timings {
// Defining a template to parse field names for timers allows us to split
// out multiple fields per timer. In this case we prefix each stat with the
// field name and store these all in a single measurement.
fields := make(map[string]interface{})
- for fieldName, stats := range metric.fields {
+ for fieldName, stats := range m.fields {
var prefix string
if fieldName != defaultFieldName {
prefix = fieldName + "_"
@@ -256,46 +271,52 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
fields[prefix+"lower"] = stats.Lower()
fields[prefix+"count"] = stats.Count()
for _, percentile := range s.Percentiles {
- name := fmt.Sprintf("%s%v_percentile", prefix, percentile)
- fields[name] = stats.Percentile(percentile)
+ name := fmt.Sprintf("%s%v_percentile", prefix, percentile.Value)
+ fields[name] = stats.Percentile(percentile.Value)
}
}
- acc.AddFields(metric.name, fields, metric.tags, now)
+ acc.AddFields(m.name, fields, m.tags, now)
}
if s.DeleteTimings {
s.timings = make(map[string]cachedtimings)
}
- for _, metric := range s.gauges {
- acc.AddGauge(metric.name, metric.fields, metric.tags, now)
+ for _, m := range s.gauges {
+ acc.AddGauge(m.name, m.fields, m.tags, now)
}
if s.DeleteGauges {
s.gauges = make(map[string]cachedgauge)
}
- for _, metric := range s.counters {
- acc.AddCounter(metric.name, metric.fields, metric.tags, now)
+ for _, m := range s.counters {
+ acc.AddCounter(m.name, m.fields, m.tags, now)
}
if s.DeleteCounters {
s.counters = make(map[string]cachedcounter)
}
- for _, metric := range s.sets {
+ for _, m := range s.sets {
fields := make(map[string]interface{})
- for field, set := range metric.fields {
+ for field, set := range m.fields {
fields[field] = int64(len(set))
}
- acc.AddFields(metric.name, fields, metric.tags, now)
+ acc.AddFields(m.name, fields, m.tags, now)
}
if s.DeleteSets {
s.sets = make(map[string]cachedset)
}
-
return nil
}
-func (s *Statsd) Start(_ telegraf.Accumulator) error {
+func (s *Statsd) Start(ac telegraf.Accumulator) error {
+ if s.ParseDataDogTags {
+ s.DataDogExtensions = true
+ s.Log.Warn("'parse_data_dog_tags' config option is deprecated, please use 'datadog_extensions' instead")
+ }
+
+ s.acc = ac
+
// Make data structures
s.gauges = make(map[string]cachedgauge)
s.counters = make(map[string]cachedcounter)
@@ -312,10 +333,14 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
s.MaxConnections.Set(int64(s.MaxTCPConnections))
s.CurrentConnections = selfstat.Register("statsd", "tcp_current_connections", tags)
s.TotalConnections = selfstat.Register("statsd", "tcp_total_connections", tags)
- s.PacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags)
- s.BytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags)
-
- s.in = make(chan *bytes.Buffer, s.AllowedPendingMessages)
+ s.TCPPacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags)
+ s.TCPBytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags)
+ s.UDPPacketsRecv = selfstat.Register("statsd", "udp_packets_received", tags)
+ s.UDPPacketsDrop = selfstat.Register("statsd", "udp_packets_dropped", tags)
+ s.UDPBytesRecv = selfstat.Register("statsd", "udp_bytes_received", tags)
+ s.ParseTimeNS = selfstat.Register("statsd", "parse_time_ns", tags)
+
+ s.in = make(chan input, s.AllowedPendingMessages)
s.done = make(chan struct{})
s.accept = make(chan bool, s.MaxTCPConnections)
s.conns = make(map[string]*net.TCPConn)
@@ -329,8 +354,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
}
if s.ConvertNames {
- log.Printf("I! WARNING statsd: convert_names config option is deprecated," +
- " please use metric_separator instead")
+ s.Log.Warn("'convert_names' config option is deprecated, please use 'metric_separator' instead")
}
if s.MetricSeparator == "" {
@@ -348,7 +372,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
return err
}
- log.Println("I! Statsd UDP listener listening on: ", conn.LocalAddr().String())
+ s.Log.Infof("UDP listening on %q", conn.LocalAddr().String())
s.UDPlistener = conn
s.wg.Add(1)
@@ -366,7 +390,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
return err
}
- log.Println("I! TCP Statsd listening on: ", listener.Addr().String())
+ s.Log.Infof("TCP listening on %q", listener.Addr().String())
s.TCPlistener = listener
s.wg.Add(1)
@@ -376,13 +400,15 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
}()
}
- // Start the line parser
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- s.parser()
- }()
- log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress)
+ for i := 1; i <= parserGoRoutines; i++ {
+ // Start the line parser
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ s.parser()
+ }()
+ }
+ s.Log.Infof("Started the statsd service on %q", s.ServiceAddress)
return nil
}
@@ -439,21 +465,31 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error {
case <-s.done:
return nil
default:
- n, _, err := conn.ReadFromUDP(buf)
- if err != nil && !strings.Contains(err.Error(), "closed network") {
- log.Printf("E! Error READ: %s\n", err.Error())
- continue
+ n, addr, err := conn.ReadFromUDP(buf)
+ if err != nil {
+ if !strings.Contains(err.Error(), "closed network") {
+ s.Log.Errorf("Error reading: %s", err.Error())
+ continue
+ }
+ return err
}
+ s.UDPPacketsRecv.Incr(1)
+ s.UDPBytesRecv.Incr(int64(n))
b := s.bufPool.Get().(*bytes.Buffer)
b.Reset()
b.Write(buf[:n])
-
select {
- case s.in <- b:
+ case s.in <- input{
+ Buffer: b,
+ Time: time.Now(),
+ Addr: addr.IP.String()}:
default:
+ s.UDPPacketsDrop.Incr(1)
s.drops++
if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 {
- log.Printf(dropwarn, s.drops)
+ s.Log.Errorf("Statsd message queue full. "+
+ "We have dropped %d messages so far. "+
+ "You may want to increase allowed_pending_messages in the config", s.drops)
}
}
}
@@ -468,15 +504,22 @@ func (s *Statsd) parser() error {
select {
case <-s.done:
return nil
- case buf := <-s.in:
- lines := strings.Split(buf.String(), "\n")
- s.bufPool.Put(buf)
+ case in := <-s.in:
+ start := time.Now()
+ lines := strings.Split(in.Buffer.String(), "\n")
+ s.bufPool.Put(in.Buffer)
for _, line := range lines {
line = strings.TrimSpace(line)
- if line != "" {
+ switch {
+ case line == "":
+ case s.DataDogExtensions && strings.HasPrefix(line, "_e"):
+ s.parseEventMessage(in.Time, line, in.Addr)
+ default:
s.parseStatsdLine(line)
}
}
+ elapsed := time.Since(start)
+ s.ParseTimeNS.Set(elapsed.Nanoseconds())
}
}
}
@@ -488,7 +531,7 @@ func (s *Statsd) parseStatsdLine(line string) error {
defer s.Unlock()
lineTags := make(map[string]string)
- if s.ParseDataDogTags {
+ if s.DataDogExtensions {
recombinedSegments := make([]string, 0)
// datadog tags look like this:
// users.online:1|c|@0.5|#country:china,environment:production
@@ -499,24 +542,7 @@ func (s *Statsd) parseStatsdLine(line string) error {
for _, segment := range pipesplit {
if len(segment) > 0 && segment[0] == '#' {
// we have ourselves a tag; they are comma separated
- tagstr := segment[1:]
- tags := strings.Split(tagstr, ",")
- for _, tag := range tags {
- ts := strings.SplitN(tag, ":", 2)
- var k, v string
- switch len(ts) {
- case 1:
- // just a tag
- k = ts[0]
- v = ""
- case 2:
- k = ts[0]
- v = ts[1]
- }
- if k != "" {
- lineTags[k] = v
- }
- }
+ parseDataDogTags(lineTags, segment[1:])
} else {
recombinedSegments = append(recombinedSegments, segment)
}
@@ -527,8 +553,8 @@ func (s *Statsd) parseStatsdLine(line string) error {
// Validate splitting the line on ":"
bits := strings.Split(line, ":")
if len(bits) < 2 {
- log.Printf("E! Error: splitting ':', Unable to parse metric: %s\n", line)
- return errors.New("Error Parsing statsd line")
+ s.Log.Errorf("Splitting ':', unable to parse metric: %s", line)
+ return errors.New("error Parsing statsd line")
}
// Extract bucket name from individual metric bits
@@ -543,22 +569,22 @@ func (s *Statsd) parseStatsdLine(line string) error {
// Validate splitting the bit on "|"
pipesplit := strings.Split(bit, "|")
if len(pipesplit) < 2 {
- log.Printf("E! Error: splitting '|', Unable to parse metric: %s\n", line)
- return errors.New("Error Parsing statsd line")
+ s.Log.Errorf("Splitting '|', unable to parse metric: %s", line)
+ return errors.New("error parsing statsd line")
} else if len(pipesplit) > 2 {
sr := pipesplit[2]
- errmsg := "E! Error: parsing sample rate, %s, it must be in format like: " +
- "@0.1, @0.5, etc. Ignoring sample rate for line: %s\n"
+
if strings.Contains(sr, "@") && len(sr) > 1 {
samplerate, err := strconv.ParseFloat(sr[1:], 64)
if err != nil {
- log.Printf(errmsg, err.Error(), line)
+ s.Log.Errorf("Parsing sample rate: %s", err.Error())
} else {
// sample rate successfully parsed
m.samplerate = samplerate
}
} else {
- log.Printf(errmsg, "", line)
+ s.Log.Debugf("Sample rate must be in format like: "+
+ "@0.1, @0.5, etc. Ignoring sample rate for line: %s", line)
}
}
@@ -567,15 +593,15 @@ func (s *Statsd) parseStatsdLine(line string) error {
case "g", "c", "s", "ms", "h":
m.mtype = pipesplit[1]
default:
- log.Printf("E! Error: Statsd Metric type %s unsupported", pipesplit[1])
- return errors.New("Error Parsing statsd line")
+ s.Log.Errorf("Metric type %q unsupported", pipesplit[1])
+ return errors.New("error parsing statsd line")
}
// Parse the value
if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") {
if m.mtype != "g" && m.mtype != "c" {
- log.Printf("E! Error: +- values are only supported for gauges & counters: %s\n", line)
- return errors.New("Error Parsing statsd line")
+ s.Log.Errorf("+- values are only supported for gauges & counters, unable to parse metric: %s", line)
+ return errors.New("error parsing statsd line")
}
m.additive = true
}
@@ -584,8 +610,8 @@ func (s *Statsd) parseStatsdLine(line string) error {
case "g", "ms", "h":
v, err := strconv.ParseFloat(pipesplit[0], 64)
if err != nil {
- log.Printf("E! Error: parsing value to float64: %s\n", line)
- return errors.New("Error Parsing statsd line")
+ s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line)
+ return errors.New("error parsing statsd line")
}
m.floatvalue = v
case "c":
@@ -594,8 +620,8 @@ func (s *Statsd) parseStatsdLine(line string) error {
if err != nil {
v2, err2 := strconv.ParseFloat(pipesplit[0], 64)
if err2 != nil {
- log.Printf("E! Error: parsing value to int64: %s\n", line)
- return errors.New("Error Parsing statsd line")
+ s.Log.Errorf("Parsing value to int64, unable to parse metric: %s", line)
+ return errors.New("error parsing statsd line")
}
v = int64(v2)
}
@@ -622,7 +648,6 @@ func (s *Statsd) parseStatsdLine(line string) error {
case "h":
m.tags["metric_type"] = "histogram"
}
-
if len(lineTags) > 0 {
for k, v := range lineTags {
m.tags[k] = v
@@ -808,6 +833,11 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
s.CurrentConnections.Incr(-1)
}()
+ var remoteIP string
+ if addr, ok := conn.RemoteAddr().(*net.TCPAddr); ok {
+ remoteIP = addr.IP.String()
+ }
+
var n int
scanner := bufio.NewScanner(conn)
for {
@@ -822,8 +852,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
if n == 0 {
continue
}
- s.BytesRecv.Incr(int64(n))
- s.PacketsRecv.Incr(1)
+ s.TCPBytesRecv.Incr(int64(n))
+ s.TCPPacketsRecv.Incr(1)
b := s.bufPool.Get().(*bytes.Buffer)
b.Reset()
@@ -831,11 +861,13 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
b.WriteByte('\n')
select {
- case s.in <- b:
+ case s.in <- input{Buffer: b, Time: time.Now(), Addr: remoteIP}:
default:
s.drops++
if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 {
- log.Printf(dropwarn, s.drops)
+ s.Log.Errorf("Statsd message queue full. "+
+ "We have dropped %d messages so far. "+
+ "You may want to increase allowed_pending_messages in the config", s.drops)
}
}
}
@@ -845,9 +877,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
// refuser refuses a TCP connection
func (s *Statsd) refuser(conn *net.TCPConn) {
conn.Close()
- log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr())
- log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" +
- " adjust max_tcp_connections")
+ s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr())
+ s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections")
}
// forget a TCP connection
@@ -866,7 +897,7 @@ func (s *Statsd) remember(id string, conn *net.TCPConn) {
func (s *Statsd) Stop() {
s.Lock()
- log.Println("I! Stopping the statsd service")
+ s.Log.Infof("Stopping the statsd service")
close(s.done)
if s.isUDP() {
s.UDPlistener.Close()
@@ -892,7 +923,7 @@ func (s *Statsd) Stop() {
s.Lock()
close(s.in)
- log.Println("I! Stopped Statsd listener service on ", s.ServiceAddress)
+ s.Log.Infof("Stopped listener service on %q", s.ServiceAddress)
s.Unlock()
}
diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go
index 1e50c8341f7d1..f76681134a094 100644
--- a/plugins/inputs/statsd/statsd_test.go
+++ b/plugins/inputs/statsd/statsd_test.go
@@ -1,41 +1,30 @@
package statsd
import (
- "bytes"
- "errors"
"fmt"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"net"
+ "sync"
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
const (
- testMsg = "test.tcp.msg:100|c"
+ testMsg = "test.tcp.msg:100|c"
+ producerThreads = 10
)
-func newTestTcpListener() (*Statsd, chan *bytes.Buffer) {
- in := make(chan *bytes.Buffer, 1500)
- listener := &Statsd{
- Protocol: "tcp",
- ServiceAddress: "localhost:8125",
- AllowedPendingMessages: 10000,
- MaxTCPConnections: 250,
- in: in,
- done: make(chan struct{}),
- }
- return listener, in
-}
-
func NewTestStatsd() *Statsd {
- s := Statsd{}
+ s := Statsd{Log: testutil.Logger{}}
// Make data structures
s.done = make(chan struct{})
- s.in = make(chan *bytes.Buffer, s.AllowedPendingMessages)
+ s.in = make(chan input, s.AllowedPendingMessages)
s.gauges = make(map[string]cachedgauge)
s.counters = make(map[string]cachedcounter)
s.sets = make(map[string]cachedset)
@@ -46,9 +35,10 @@ func NewTestStatsd() *Statsd {
return &s
}
-// Test that MaxTCPConections is respected
+// Test that MaxTCPConnections is respected
func TestConcurrentConns(t *testing.T) {
listener := Statsd{
+ Log: testutil.Logger{},
Protocol: "tcp",
ServiceAddress: "localhost:8125",
AllowedPendingMessages: 10000,
@@ -76,9 +66,10 @@ func TestConcurrentConns(t *testing.T) {
assert.Zero(t, acc.NFields())
}
-// Test that MaxTCPConections is respected when max==1
+// Test that MaxTCPConnections is respected when max==1
func TestConcurrentConns1(t *testing.T) {
listener := Statsd{
+ Log: testutil.Logger{},
Protocol: "tcp",
ServiceAddress: "localhost:8125",
AllowedPendingMessages: 10000,
@@ -104,9 +95,10 @@ func TestConcurrentConns1(t *testing.T) {
assert.Zero(t, acc.NFields())
}
-// Test that MaxTCPConections is respected
+// Test that MaxTCPConnections is respected
func TestCloseConcurrentConns(t *testing.T) {
listener := Statsd{
+ Log: testutil.Logger{},
Protocol: "tcp",
ServiceAddress: "localhost:8125",
AllowedPendingMessages: 10000,
@@ -128,6 +120,7 @@ func TestCloseConcurrentConns(t *testing.T) {
// benchmark how long it takes to accept & process 100,000 metrics:
func BenchmarkUDP(b *testing.B) {
listener := Statsd{
+ Log: testutil.Logger{},
Protocol: "udp",
ServiceAddress: "localhost:8125",
AllowedPendingMessages: 250000,
@@ -146,18 +139,34 @@ func BenchmarkUDP(b *testing.B) {
if err != nil {
panic(err)
}
- for i := 0; i < 250000; i++ {
- fmt.Fprintf(conn, testMsg)
+
+ var wg sync.WaitGroup
+ for i := 1; i <= producerThreads; i++ {
+ wg.Add(1)
+ go sendRequests(conn, &wg)
}
+ wg.Wait()
+
// wait for 250,000 metrics to get added to accumulator
- time.Sleep(time.Millisecond)
+ for len(listener.in) > 0 {
+ fmt.Printf("Left in buffer: %v \n", len(listener.in))
+ time.Sleep(time.Millisecond)
+ }
listener.Stop()
}
}
+func sendRequests(conn net.Conn, wg *sync.WaitGroup) {
+ defer wg.Done()
+ for i := 0; i < 25000; i++ {
+ fmt.Fprintf(conn, testMsg)
+ }
+}
+
// benchmark how long it takes to accept & process 100,000 metrics:
func BenchmarkTCP(b *testing.B) {
listener := Statsd{
+ Log: testutil.Logger{},
Protocol: "tcp",
ServiceAddress: "localhost:8125",
AllowedPendingMessages: 250000,
@@ -177,11 +186,16 @@ func BenchmarkTCP(b *testing.B) {
if err != nil {
panic(err)
}
- for i := 0; i < 250000; i++ {
- fmt.Fprintf(conn, testMsg)
+ var wg sync.WaitGroup
+ for i := 1; i <= producerThreads; i++ {
+ wg.Add(1)
+ go sendRequests(conn, &wg)
}
+ wg.Wait()
// wait for 250,000 metrics to get added to accumulator
- time.Sleep(time.Millisecond)
+ for len(listener.in) > 0 {
+ time.Sleep(time.Millisecond)
+ }
listener.Stop()
}
}
@@ -189,7 +203,7 @@ func BenchmarkTCP(b *testing.B) {
// Valid lines should be parsed and their values should be cached
func TestParse_ValidLines(t *testing.T) {
s := NewTestStatsd()
- valid_lines := []string{
+ validLines := []string{
"valid:45|c",
"valid:45|s",
"valid:45|g",
@@ -197,7 +211,7 @@ func TestParse_ValidLines(t *testing.T) {
"valid.timer:45|h",
}
- for _, line := range valid_lines {
+ for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
@@ -210,7 +224,7 @@ func TestParse_Gauges(t *testing.T) {
s := NewTestStatsd()
// Test that gauge +- values work
- valid_lines := []string{
+ validLines := []string{
"plus.minus:100|g",
"plus.minus:-10|g",
"plus.minus:+30|g",
@@ -228,7 +242,7 @@ func TestParse_Gauges(t *testing.T) {
"scientific.notation.minus:4.7E-5|g",
}
- for _, line := range valid_lines {
+ for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
@@ -274,7 +288,7 @@ func TestParse_Gauges(t *testing.T) {
}
for _, test := range validations {
- err := test_validate_gauge(test.name, test.value, s.gauges)
+ err := testValidateGauge(test.name, test.value, s.gauges)
if err != nil {
t.Error(err.Error())
}
@@ -286,7 +300,7 @@ func TestParse_Sets(t *testing.T) {
s := NewTestStatsd()
// Test that sets work
- valid_lines := []string{
+ validLines := []string{
"unique.user.ids:100|s",
"unique.user.ids:100|s",
"unique.user.ids:100|s",
@@ -306,7 +320,7 @@ func TestParse_Sets(t *testing.T) {
"string.sets:bar|s",
}
- for _, line := range valid_lines {
+ for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
@@ -336,7 +350,7 @@ func TestParse_Sets(t *testing.T) {
}
for _, test := range validations {
- err := test_validate_set(test.name, test.value, s.sets)
+ err := testValidateSet(test.name, test.value, s.sets)
if err != nil {
t.Error(err.Error())
}
@@ -348,7 +362,7 @@ func TestParse_Counters(t *testing.T) {
s := NewTestStatsd()
// Test that counters work
- valid_lines := []string{
+ validLines := []string{
"small.inc:1|c",
"big.inc:100|c",
"big.inc:1|c",
@@ -363,7 +377,7 @@ func TestParse_Counters(t *testing.T) {
"negative.test:-5|c",
}
- for _, line := range valid_lines {
+ for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
@@ -401,7 +415,7 @@ func TestParse_Counters(t *testing.T) {
}
for _, test := range validations {
- err := test_validate_counter(test.name, test.value, s.counters)
+ err := testValidateCounter(test.name, test.value, s.counters)
if err != nil {
t.Error(err.Error())
}
@@ -411,11 +425,11 @@ func TestParse_Counters(t *testing.T) {
// Tests low-level functionality of timings
func TestParse_Timings(t *testing.T) {
s := NewTestStatsd()
- s.Percentiles = []int{90}
+ s.Percentiles = []internal.Number{{Value: 90.0}}
acc := &testutil.Accumulator{}
// Test that counters work
- valid_lines := []string{
+ validLines := []string{
"test.timing:1|ms",
"test.timing:11|ms",
"test.timing:1|ms",
@@ -423,7 +437,7 @@ func TestParse_Timings(t *testing.T) {
"test.timing:1|ms",
}
- for _, line := range valid_lines {
+ for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
@@ -464,7 +478,7 @@ func TestParseScientificNotation(t *testing.T) {
// Invalid lines should return an error
func TestParse_InvalidLines(t *testing.T) {
s := NewTestStatsd()
- invalid_lines := []string{
+ invalidLines := []string{
"i.dont.have.a.pipe:45g",
"i.dont.have.a.colon45|c",
"invalid.metric.type:45|e",
@@ -475,7 +489,7 @@ func TestParse_InvalidLines(t *testing.T) {
"invalid.value:d11|c",
"invalid.value:1d1|c",
}
- for _, line := range invalid_lines {
+ for _, line := range invalidLines {
err := s.parseStatsdLine(line)
if err == nil {
t.Errorf("Parsing line %s should have resulted in an error\n", line)
@@ -486,21 +500,21 @@ func TestParse_InvalidLines(t *testing.T) {
// Invalid sample rates should be ignored and not applied
func TestParse_InvalidSampleRate(t *testing.T) {
s := NewTestStatsd()
- invalid_lines := []string{
+ invalidLines := []string{
"invalid.sample.rate:45|c|0.1",
"invalid.sample.rate.2:45|c|@foo",
"invalid.sample.rate:45|g|@0.1",
"invalid.sample.rate:45|s|@0.1",
}
- for _, line := range invalid_lines {
+ for _, line := range invalidLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
}
- counter_validations := []struct {
+ counterValidations := []struct {
name string
value int64
cache map[string]cachedcounter
@@ -517,19 +531,19 @@ func TestParse_InvalidSampleRate(t *testing.T) {
},
}
- for _, test := range counter_validations {
- err := test_validate_counter(test.name, test.value, test.cache)
+ for _, test := range counterValidations {
+ err := testValidateCounter(test.name, test.value, test.cache)
if err != nil {
t.Error(err.Error())
}
}
- err := test_validate_gauge("invalid_sample_rate", 45, s.gauges)
+ err := testValidateGauge("invalid_sample_rate", 45, s.gauges)
if err != nil {
t.Error(err.Error())
}
- err = test_validate_set("invalid_sample_rate", 1, s.sets)
+ err = testValidateSet("invalid_sample_rate", 1, s.sets)
if err != nil {
t.Error(err.Error())
}
@@ -538,12 +552,12 @@ func TestParse_InvalidSampleRate(t *testing.T) {
// Names should be parsed like . -> _
func TestParse_DefaultNameParsing(t *testing.T) {
s := NewTestStatsd()
- valid_lines := []string{
+ validLines := []string{
"valid:1|c",
"valid.foo-bar:11|c",
}
- for _, line := range valid_lines {
+ for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
@@ -565,7 +579,7 @@ func TestParse_DefaultNameParsing(t *testing.T) {
}
for _, test := range validations {
- err := test_validate_counter(test.name, test.value, s.counters)
+ err := testValidateCounter(test.name, test.value, s.counters)
if err != nil {
t.Error(err.Error())
}
@@ -607,7 +621,7 @@ func TestParse_Template(t *testing.T) {
// Validate counters
for _, test := range validations {
- err := test_validate_counter(test.name, test.value, s.counters)
+ err := testValidateCounter(test.name, test.value, s.counters)
if err != nil {
t.Error(err.Error())
}
@@ -649,7 +663,7 @@ func TestParse_TemplateFilter(t *testing.T) {
// Validate counters
for _, test := range validations {
- err := test_validate_counter(test.name, test.value, s.counters)
+ err := testValidateCounter(test.name, test.value, s.counters)
if err != nil {
t.Error(err.Error())
}
@@ -687,7 +701,7 @@ func TestParse_TemplateSpecificity(t *testing.T) {
// Validate counters
for _, test := range validations {
- err := test_validate_counter(test.name, test.value, s.counters)
+ err := testValidateCounter(test.name, test.value, s.counters)
if err != nil {
t.Error(err.Error())
}
@@ -723,7 +737,7 @@ func TestParse_TemplateFields(t *testing.T) {
}
}
- counter_tests := []struct {
+ counterTests := []struct {
name string
value int64
field string
@@ -745,14 +759,14 @@ func TestParse_TemplateFields(t *testing.T) {
},
}
// Validate counters
- for _, test := range counter_tests {
- err := test_validate_counter(test.name, test.value, s.counters, test.field)
+ for _, test := range counterTests {
+ err := testValidateCounter(test.name, test.value, s.counters, test.field)
if err != nil {
t.Error(err.Error())
}
}
- gauge_tests := []struct {
+ gaugeTests := []struct {
name string
value float64
field string
@@ -769,14 +783,14 @@ func TestParse_TemplateFields(t *testing.T) {
},
}
// Validate gauges
- for _, test := range gauge_tests {
- err := test_validate_gauge(test.name, test.value, s.gauges, test.field)
+ for _, test := range gaugeTests {
+ err := testValidateGauge(test.name, test.value, s.gauges, test.field)
if err != nil {
t.Error(err.Error())
}
}
- set_tests := []struct {
+ setTests := []struct {
name string
value int64
field string
@@ -793,8 +807,8 @@ func TestParse_TemplateFields(t *testing.T) {
},
}
// Validate sets
- for _, test := range set_tests {
- err := test_validate_set(test.name, test.value, s.sets, test.field)
+ for _, test := range setTests {
+ err := testValidateSet(test.name, test.value, s.sets, test.field)
if err != nil {
t.Error(err.Error())
}
@@ -861,83 +875,125 @@ func TestParse_Tags(t *testing.T) {
}
}
-// Test that DataDog tags are parsed
func TestParse_DataDogTags(t *testing.T) {
- s := NewTestStatsd()
- s.ParseDataDogTags = true
-
- lines := []string{
- "my_counter:1|c|#host:localhost,environment:prod,endpoint:/:tenant?/oauth/ro",
- "my_gauge:10.1|g|#live",
- "my_set:1|s|#host:localhost",
- "my_timer:3|ms|@0.1|#live,host:localhost",
- }
-
- testTags := map[string]map[string]string{
- "my_counter": {
- "host": "localhost",
- "environment": "prod",
- "endpoint": "/:tenant?/oauth/ro",
+ tests := []struct {
+ name string
+ line string
+ expected []telegraf.Metric
+ }{
+ {
+ name: "counter",
+ line: "my_counter:1|c|#host:localhost,environment:prod,endpoint:/:tenant?/oauth/ro",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "my_counter",
+ map[string]string{
+ "endpoint": "/:tenant?/oauth/ro",
+ "environment": "prod",
+ "host": "localhost",
+ "metric_type": "counter",
+ },
+ map[string]interface{}{
+ "value": 1,
+ },
+ time.Now(),
+ telegraf.Counter,
+ ),
+ },
},
-
- "my_gauge": {
- "live": "",
+ {
+ name: "gauge",
+ line: "my_gauge:10.1|g|#live",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "my_gauge",
+ map[string]string{
+ "live": "true",
+ "metric_type": "gauge",
+ },
+ map[string]interface{}{
+ "value": 10.1,
+ },
+ time.Now(),
+ telegraf.Gauge,
+ ),
+ },
},
-
- "my_set": {
- "host": "localhost",
+ {
+ name: "set",
+ line: "my_set:1|s|#host:localhost",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "my_set",
+ map[string]string{
+ "host": "localhost",
+ "metric_type": "set",
+ },
+ map[string]interface{}{
+ "value": 1,
+ },
+ time.Now(),
+ ),
+ },
},
-
- "my_timer": {
- "live": "",
- "host": "localhost",
+ {
+ name: "timer",
+ line: "my_timer:3|ms|@0.1|#live,host:localhost",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "my_timer",
+ map[string]string{
+ "host": "localhost",
+ "live": "true",
+ "metric_type": "timing",
+ },
+ map[string]interface{}{
+ "count": 10,
+ "lower": float64(3),
+ "mean": float64(3),
+ "stddev": float64(0),
+ "sum": float64(30),
+ "upper": float64(3),
+ },
+ time.Now(),
+ ),
+ },
+ },
+ {
+ name: "empty tag set",
+ line: "cpu:42|c|#",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "metric_type": "counter",
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Now(),
+ telegraf.Counter,
+ ),
+ },
},
}
- for _, line := range lines {
- err := s.parseStatsdLine(line)
- if err != nil {
- t.Errorf("Parsing line %s should not have resulted in an error\n", line)
- }
- }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var acc testutil.Accumulator
- sourceTags := map[string]map[string]string{
- "my_gauge": tagsForItem(s.gauges),
- "my_counter": tagsForItem(s.counters),
- "my_set": tagsForItem(s.sets),
- "my_timer": tagsForItem(s.timings),
- }
+ s := NewTestStatsd()
+ s.DataDogExtensions = true
- for statName, tags := range testTags {
- for k, v := range tags {
- otherValue := sourceTags[statName][k]
- if sourceTags[statName][k] != v {
- t.Errorf("Error with %s, tag %s: %s != %s", statName, k, v, otherValue)
- }
- }
- }
-}
+ err := s.parseStatsdLine(tt.line)
+ require.NoError(t, err)
+ err = s.Gather(&acc)
+ require.NoError(t, err)
-func tagsForItem(m interface{}) map[string]string {
- switch m.(type) {
- case map[string]cachedcounter:
- for _, v := range m.(map[string]cachedcounter) {
- return v.tags
- }
- case map[string]cachedgauge:
- for _, v := range m.(map[string]cachedgauge) {
- return v.tags
- }
- case map[string]cachedset:
- for _, v := range m.(map[string]cachedset) {
- return v.tags
- }
- case map[string]cachedtimings:
- for _, v := range m.(map[string]cachedtimings) {
- return v.tags
- }
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(),
+ testutil.SortMetrics(), testutil.IgnoreTime())
+ })
}
- return nil
}
// Test that statsd buckets are parsed to measurement names properly
@@ -945,8 +1001,8 @@ func TestParseName(t *testing.T) {
s := NewTestStatsd()
tests := []struct {
- in_name string
- out_name string
+ inName string
+ outName string
}{
{
"foobar",
@@ -963,9 +1019,9 @@ func TestParseName(t *testing.T) {
}
for _, test := range tests {
- name, _, _ := s.parseName(test.in_name)
- if name != test.out_name {
- t.Errorf("Expected: %s, got %s", test.out_name, name)
+ name, _, _ := s.parseName(test.inName)
+ if name != test.outName {
+ t.Errorf("Expected: %s, got %s", test.outName, name)
}
}
@@ -973,8 +1029,8 @@ func TestParseName(t *testing.T) {
s.MetricSeparator = "."
tests = []struct {
- in_name string
- out_name string
+ inName string
+ outName string
}{
{
"foobar",
@@ -991,9 +1047,9 @@ func TestParseName(t *testing.T) {
}
for _, test := range tests {
- name, _, _ := s.parseName(test.in_name)
- if name != test.out_name {
- t.Errorf("Expected: %s, got %s", test.out_name, name)
+ name, _, _ := s.parseName(test.inName)
+ if name != test.outName {
+ t.Errorf("Expected: %s, got %s", test.outName, name)
}
}
}
@@ -1004,12 +1060,12 @@ func TestParse_MeasurementsWithSameName(t *testing.T) {
s := NewTestStatsd()
// Test that counters work
- valid_lines := []string{
+ validLines := []string{
"test.counter,host=localhost:1|c",
"test.counter,host=localhost,region=west:1|c",
}
- for _, line := range valid_lines {
+ for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
@@ -1024,7 +1080,7 @@ func TestParse_MeasurementsWithSameName(t *testing.T) {
// Test that measurements with multiple bits, are treated as different outputs
// but are equal to their single-measurement representation
func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
- single_lines := []string{
+ singleLines := []string{
"valid.multiple:0|ms|@0.1",
"valid.multiple:0|ms|",
"valid.multiple:1|ms",
@@ -1050,7 +1106,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
"valid.multiple.mixed:1|g",
}
- multiple_lines := []string{
+ multipleLines := []string{
"valid.multiple:0|ms|@0.1:0|ms|:1|ms",
"valid.multiple.duplicate:1|c:1|c:2|c:1|c",
"valid.multiple.duplicate:1|h:1|h:2|h:1|h",
@@ -1059,28 +1115,28 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
"valid.multiple.mixed:1|c:1|ms:2|s:1|g",
}
- s_single := NewTestStatsd()
- s_multiple := NewTestStatsd()
+ sSingle := NewTestStatsd()
+ sMultiple := NewTestStatsd()
- for _, line := range single_lines {
- err := s_single.parseStatsdLine(line)
+ for _, line := range singleLines {
+ err := sSingle.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
}
- for _, line := range multiple_lines {
- err := s_multiple.parseStatsdLine(line)
+ for _, line := range multipleLines {
+ err := sMultiple.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
}
- if len(s_single.timings) != 3 {
- t.Errorf("Expected 3 measurement, found %d", len(s_single.timings))
+ if len(sSingle.timings) != 3 {
+ t.Errorf("Expected 3 measurement, found %d", len(sSingle.timings))
}
- if cachedtiming, ok := s_single.timings["metric_type=timingvalid_multiple"]; !ok {
+ if cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"]; !ok {
t.Errorf("Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found")
} else {
if cachedtiming.name != "valid_multiple" {
@@ -1100,63 +1156,63 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
}
}
- // test if s_single and s_multiple did compute the same stats for valid.multiple.duplicate
- if err := test_validate_set("valid_multiple_duplicate", 2, s_single.sets); err != nil {
+ // test if sSingle and sMultiple did compute the same stats for valid.multiple.duplicate
+ if err := testValidateSet("valid_multiple_duplicate", 2, sSingle.sets); err != nil {
t.Error(err.Error())
}
- if err := test_validate_set("valid_multiple_duplicate", 2, s_multiple.sets); err != nil {
+ if err := testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets); err != nil {
t.Error(err.Error())
}
- if err := test_validate_counter("valid_multiple_duplicate", 5, s_single.counters); err != nil {
+ if err := testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters); err != nil {
t.Error(err.Error())
}
- if err := test_validate_counter("valid_multiple_duplicate", 5, s_multiple.counters); err != nil {
+ if err := testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters); err != nil {
t.Error(err.Error())
}
- if err := test_validate_gauge("valid_multiple_duplicate", 1, s_single.gauges); err != nil {
+ if err := testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges); err != nil {
t.Error(err.Error())
}
- if err := test_validate_gauge("valid_multiple_duplicate", 1, s_multiple.gauges); err != nil {
+ if err := testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges); err != nil {
t.Error(err.Error())
}
- // test if s_single and s_multiple did compute the same stats for valid.multiple.mixed
- if err := test_validate_set("valid_multiple_mixed", 1, s_single.sets); err != nil {
+ // test if sSingle and sMultiple did compute the same stats for valid.multiple.mixed
+ if err := testValidateSet("valid_multiple_mixed", 1, sSingle.sets); err != nil {
t.Error(err.Error())
}
- if err := test_validate_set("valid_multiple_mixed", 1, s_multiple.sets); err != nil {
+ if err := testValidateSet("valid_multiple_mixed", 1, sMultiple.sets); err != nil {
t.Error(err.Error())
}
- if err := test_validate_counter("valid_multiple_mixed", 1, s_single.counters); err != nil {
+ if err := testValidateCounter("valid_multiple_mixed", 1, sSingle.counters); err != nil {
t.Error(err.Error())
}
- if err := test_validate_counter("valid_multiple_mixed", 1, s_multiple.counters); err != nil {
+ if err := testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters); err != nil {
t.Error(err.Error())
}
- if err := test_validate_gauge("valid_multiple_mixed", 1, s_single.gauges); err != nil {
+ if err := testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges); err != nil {
t.Error(err.Error())
}
- if err := test_validate_gauge("valid_multiple_mixed", 1, s_multiple.gauges); err != nil {
+ if err := testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges); err != nil {
t.Error(err.Error())
}
}
// Tests low-level functionality of timings when multiple fields is enabled
// and a measurement template has been defined which can parse field names
-func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) {
+func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) {
s := NewTestStatsd()
s.Templates = []string{"measurement.field"}
- s.Percentiles = []int{90}
+ s.Percentiles = []internal.Number{{Value: 90.0}}
acc := &testutil.Accumulator{}
validLines := []string{
@@ -1204,10 +1260,10 @@ func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) {
// Tests low-level functionality of timings when multiple fields is enabled
// but a measurement template hasn't been defined so we can't parse field names
// In this case the behaviour should be the same as normal behaviour
-func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) {
+func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) {
s := NewTestStatsd()
s.Templates = []string{}
- s.Percentiles = []int{90}
+ s.Percentiles = []internal.Number{{Value: 90.0}}
acc := &testutil.Accumulator{}
validLines := []string{
@@ -1420,14 +1476,14 @@ func TestParse_Gauges_Delete(t *testing.T) {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
- err = test_validate_gauge("current_users", 100, s.gauges)
+ err = testValidateGauge("current_users", 100, s.gauges)
if err != nil {
t.Error(err.Error())
}
s.Gather(fakeacc)
- err = test_validate_gauge("current_users", 100, s.gauges)
+ err = testValidateGauge("current_users", 100, s.gauges)
if err == nil {
t.Error("current_users_gauge metric should have been deleted")
}
@@ -1446,14 +1502,14 @@ func TestParse_Sets_Delete(t *testing.T) {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
- err = test_validate_set("unique_user_ids", 1, s.sets)
+ err = testValidateSet("unique_user_ids", 1, s.sets)
if err != nil {
t.Error(err.Error())
}
s.Gather(fakeacc)
- err = test_validate_set("unique_user_ids", 1, s.sets)
+ err = testValidateSet("unique_user_ids", 1, s.sets)
if err == nil {
t.Error("unique_user_ids_set metric should have been deleted")
}
@@ -1472,14 +1528,14 @@ func TestParse_Counters_Delete(t *testing.T) {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
- err = test_validate_counter("total_users", 100, s.counters)
+ err = testValidateCounter("total_users", 100, s.counters)
if err != nil {
t.Error(err.Error())
}
s.Gather(fakeacc)
- err = test_validate_counter("total_users", 100, s.counters)
+ err = testValidateCounter("total_users", 100, s.counters)
if err == nil {
t.Error("total_users_counter metric should have been deleted")
}
@@ -1504,8 +1560,7 @@ func TestParseKeyValue(t *testing.T) {
}
// Test utility functions
-
-func test_validate_set(
+func testValidateSet(
name string,
value int64,
cache map[string]cachedset,
@@ -1527,17 +1582,16 @@ func test_validate_set(
}
}
if !found {
- return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name))
+ return fmt.Errorf("test Error: Metric name %s not found", name)
}
if value != int64(len(metric.fields[f])) {
- return errors.New(fmt.Sprintf("Measurement: %s, expected %d, actual %d\n",
- name, value, len(metric.fields[f])))
+ return fmt.Errorf("measurement: %s, expected %d, actual %d", name, value, len(metric.fields[f]))
}
return nil
}
-func test_validate_counter(
+func testValidateCounter(
name string,
valueExpected int64,
cache map[string]cachedcounter,
@@ -1559,17 +1613,16 @@ func test_validate_counter(
}
}
if !found {
- return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name))
+ return fmt.Errorf("test Error: Metric name %s not found", name)
}
if valueExpected != valueActual {
- return errors.New(fmt.Sprintf("Measurement: %s, expected %d, actual %d\n",
- name, valueExpected, valueActual))
+ return fmt.Errorf("measurement: %s, expected %d, actual %d", name, valueExpected, valueActual)
}
return nil
}
-func test_validate_gauge(
+func testValidateGauge(
name string,
valueExpected float64,
cache map[string]cachedgauge,
@@ -1591,12 +1644,104 @@ func test_validate_gauge(
}
}
if !found {
- return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name))
+ return fmt.Errorf("test Error: Metric name %s not found", name)
}
if valueExpected != valueActual {
- return errors.New(fmt.Sprintf("Measurement: %s, expected %f, actual %f\n",
- name, valueExpected, valueActual))
+ return fmt.Errorf("Measurement: %s, expected %f, actual %f", name, valueExpected, valueActual)
}
return nil
}
+
+func TestTCP(t *testing.T) {
+ statsd := Statsd{
+ Log: testutil.Logger{},
+ Protocol: "tcp",
+ ServiceAddress: "localhost:0",
+ AllowedPendingMessages: 10000,
+ MaxTCPConnections: 2,
+ }
+ var acc testutil.Accumulator
+ require.NoError(t, statsd.Start(&acc))
+ defer statsd.Stop()
+
+ addr := statsd.TCPlistener.Addr().String()
+
+ conn, err := net.Dial("tcp", addr)
+ _, err = conn.Write([]byte("cpu.time_idle:42|c\n"))
+ require.NoError(t, err)
+ err = conn.Close()
+ require.NoError(t, err)
+
+ for {
+ err = statsd.Gather(&acc)
+ require.NoError(t, err)
+
+ if len(acc.Metrics) > 0 {
+ break
+ }
+ }
+
+ testutil.RequireMetricsEqual(t,
+ []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu_time_idle",
+ map[string]string{
+ "metric_type": "counter",
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Now(),
+ telegraf.Counter,
+ ),
+ },
+ acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime(),
+ )
+}
+
+func TestUdp(t *testing.T) {
+ statsd := Statsd{
+ Log: testutil.Logger{},
+ Protocol: "udp",
+ ServiceAddress: "localhost:8125",
+ AllowedPendingMessages: 250000,
+ }
+ var acc testutil.Accumulator
+ require.NoError(t, statsd.Start(&acc))
+ defer statsd.Stop()
+
+ conn, err := net.Dial("udp", "127.0.0.1:8125")
+ _, err = conn.Write([]byte("cpu.time_idle:42|c\n"))
+ require.NoError(t, err)
+ err = conn.Close()
+ require.NoError(t, err)
+
+ for {
+ err = statsd.Gather(&acc)
+ require.NoError(t, err)
+
+ if len(acc.Metrics) > 0 {
+ break
+ }
+ }
+
+ testutil.RequireMetricsEqual(t,
+ []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu_time_idle",
+ map[string]string{
+ "metric_type": "counter",
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Now(),
+ telegraf.Counter,
+ ),
+ },
+ acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime(),
+ )
+}
diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md
new file mode 100644
index 0000000000000..18b26298e7af4
--- /dev/null
+++ b/plugins/inputs/suricata/README.md
@@ -0,0 +1,140 @@
+# Suricata Input Plugin
+
+This plugin reports internal performance counters of the Suricata IDS/IPS
+engine, such as captured traffic volume, memory usage, uptime, flow counters,
+and much more. It provides a socket for the Suricata log output to write JSON
+stats output to, and processes the incoming data to fit Telegraf's format.
+
+### Configuration
+
+```toml
+[[inputs.suricata]]
+ ## Data sink for Suricata stats log.
+ # This is expected to be a filename of a
+ # unix socket to be created for listening.
+ source = "/var/run/suricata-stats.sock"
+
+ # Delimiter for flattening field keys, e.g. subitem "alert" of "detect"
+ # becomes "detect_alert" when delimiter is "_".
+ delimiter = "_"
+```
+
+### Metrics
+
+Fields in the 'suricata' measurement follow the JSON format used by Suricata's
+stats output.
+See http://suricata.readthedocs.io/en/latest/performance/statistics.html for
+more information.
+
+All fields are numeric.
+- suricata
+ - tags:
+ - thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics
+ - fields:
+ - app_layer_flow_dcerpc_udp
+ - app_layer_flow_dns_tcp
+ - app_layer_flow_dns_udp
+ - app_layer_flow_enip_udp
+ - app_layer_flow_failed_tcp
+ - app_layer_flow_failed_udp
+ - app_layer_flow_http
+ - app_layer_flow_ssh
+ - app_layer_flow_tls
+ - app_layer_tx_dns_tcp
+ - app_layer_tx_dns_udp
+ - app_layer_tx_enip_udp
+ - app_layer_tx_http
+ - app_layer_tx_smtp
+ - capture_kernel_drops
+ - capture_kernel_packets
+ - decoder_avg_pkt_size
+ - decoder_bytes
+ - decoder_ethernet
+ - decoder_gre
+ - decoder_icmpv4
+ - decoder_icmpv4_ipv4_unknown_ver
+ - decoder_icmpv6
+ - decoder_invalid
+ - decoder_ipv4
+ - decoder_ipv6
+ - decoder_max_pkt_size
+ - decoder_pkts
+ - decoder_tcp
+ - decoder_tcp_hlen_too_small
+ - decoder_tcp_invalid_optlen
+ - decoder_teredo
+ - decoder_udp
+ - decoder_vlan
+ - detect_alert
+ - dns_memcap_global
+ - dns_memuse
+ - flow_memuse
+ - flow_mgr_closed_pruned
+ - flow_mgr_est_pruned
+ - flow_mgr_flows_checked
+ - flow_mgr_flows_notimeout
+ - flow_mgr_flows_removed
+ - flow_mgr_flows_timeout
+ - flow_mgr_flows_timeout_inuse
+ - flow_mgr_new_pruned
+ - flow_mgr_rows_checked
+ - flow_mgr_rows_empty
+ - flow_mgr_rows_maxlen
+ - flow_mgr_rows_skipped
+ - flow_spare
+ - flow_tcp_reuse
+ - http_memuse
+ - tcp_memuse
+ - tcp_pseudo
+ - tcp_reassembly_gap
+ - tcp_reassembly_memuse
+ - tcp_rst
+ - tcp_sessions
+ - tcp_syn
+ - tcp_synack
+ - ...
+
+
+#### Suricata configuration
+
+Suricata needs to deliver the 'stats' event type to a given unix socket for
+this plugin to pick up. This can be done, for example, by creating an additional
+output in the Suricata configuration file:
+
+```yaml
+- eve-log:
+ enabled: yes
+ filetype: unix_stream
+ filename: /tmp/suricata-stats.sock
+ types:
+ - stats:
+ threads: yes
+```
+
+#### FreeBSD tuning
+
+
+Under FreeBSD it is necessary to increase the localhost buffer space to at least 16384, default is 8192
+otherwise messages from Suricata are truncated as they exceed the default available buffer space,
+consequently no statistics are processed by the plugin.
+
+```text
+sysctl -w net.local.stream.recvspace=16384
+sysctl -w net.local.stream.sendspace=16384
+```
+
+
+### Example Output
+
+```text
+suricata,host=myhost,thread=FM#01 flow_mgr_rows_empty=0,flow_mgr_rows_checked=65536,flow_mgr_closed_pruned=0,flow_emerg_mode_over=0,flow_mgr_flows_timeout_inuse=0,flow_mgr_rows_skipped=65535,flow_mgr_bypassed_pruned=0,flow_mgr_flows_removed=0,flow_mgr_est_pruned=0,flow_mgr_flows_notimeout=1,flow_mgr_flows_checked=1,flow_mgr_rows_busy=0,flow_spare=10000,flow_mgr_rows_maxlen=1,flow_mgr_new_pruned=0,flow_emerg_mode_entered=0,flow_tcp_reuse=0,flow_mgr_flows_timeout=0 1568368562545197545
+suricata,host=myhost,thread=W#04-wlp4s0 decoder_ltnull_pkt_too_small=0,decoder_ipraw_invalid_ip_version=0,defrag_ipv4_reassembled=0,tcp_no_flow=0,app_layer_flow_tls=1,decoder_udp=25,defrag_ipv6_fragments=0,defrag_ipv4_fragments=0,decoder_tcp=59,decoder_vlan=0,decoder_pkts=84,decoder_vlan_qinq=0,decoder_avg_pkt_size=574,flow_memcap=0,defrag_max_frag_hits=0,tcp_ssn_memcap_drop=0,capture_kernel_packets=84,app_layer_flow_dcerpc_udp=0,app_layer_tx_dns_tcp=0,tcp_rst=0,decoder_icmpv4=0,app_layer_tx_tls=0,decoder_ipv4=84,decoder_erspan=0,decoder_ltnull_unsupported_type=0,decoder_invalid=0,app_layer_flow_ssh=0,capture_kernel_drops=0,app_layer_flow_ftp=0,app_layer_tx_http=0,tcp_pseudo_failed=0,defrag_ipv6_reassembled=0,defrag_ipv6_timeouts=0,tcp_pseudo=0,tcp_sessions=1,decoder_ethernet=84,decoder_raw=0,decoder_sctp=0,app_layer_flow_dns_udp=1,decoder_gre=0,app_layer_flow_http=0,app_layer_flow_imap=0,tcp_segment_memcap_drop=0,detect_alert=0,app_layer_flow_failed_tcp=0,decoder_teredo=0,decoder_mpls=0,decoder_ppp=0,decoder_max_pkt_size=1422,decoder_ipv6=0,tcp_reassembly_gap=0,app_layer_flow_dcerpc_tcp=0,decoder_ipv4_in_ipv6=0,tcp_stream_depth_reached=0,app_layer_flow_dns_tcp=0,app_layer_flow_smtp=0,tcp_syn=1,decoder_sll=0,tcp_invalid_checksum=0,app_layer_tx_dns_udp=1,decoder_bytes=48258,defrag_ipv4_timeouts=0,app_layer_flow_msn=0,decoder_pppoe=0,decoder_null=0,app_layer_flow_failed_udp=3,app_layer_tx_smtp=0,decoder_icmpv6=0,decoder_ipv6_in_ipv6=0,tcp_synack=1,app_layer_flow_smb=0,decoder_dce_pkt_too_small=0 1568368562545174807
+suricata,host=myhost,thread=W#01-wlp4s0 tcp_synack=0,app_layer_flow_imap=0,decoder_ipv4_in_ipv6=0,decoder_max_pkt_size=684,decoder_gre=0,defrag_ipv4_timeouts=0,tcp_invalid_checksum=0,decoder_ipv4=53,flow_memcap=0,app_layer_tx_http=0,app_layer_tx_smtp=0,decoder_null=0,tcp_no_flow=0,app_layer_tx_tls=0,app_layer_flow_ssh=0,app_layer_flow_smtp=0,decoder_pppoe=0,decoder_teredo=0,decoder_ipraw_invalid_ip_version=0,decoder_ltnull_pkt_too_small=0,tcp_rst=0,decoder_ppp=0,decoder_ipv6=29,app_layer_flow_dns_udp=3,decoder_vlan=0,app_layer_flow_dcerpc_tcp=0,tcp_syn=0,defrag_ipv4_fragments=0,defrag_ipv6_timeouts=0,decoder_raw=0,defrag_ipv6_reassembled=0,tcp_reassembly_gap=0,tcp_sessions=0,decoder_udp=44,tcp_segment_memcap_drop=0,app_layer_tx_dns_udp=3,app_layer_flow_tls=0,decoder_tcp=37,defrag_ipv4_reassembled=0,app_layer_flow_failed_udp=6,app_layer_flow_ftp=0,decoder_icmpv6=1,tcp_stream_depth_reached=0,capture_kernel_drops=0,decoder_sll=0,decoder_bytes=15883,decoder_ethernet=91,tcp_pseudo=0,app_layer_flow_http=0,decoder_sctp=0,decoder_pkts=91,decoder_avg_pkt_size=174,decoder_erspan=0,app_layer_flow_msn=0,app_layer_flow_smb=0,capture_kernel_packets=91,decoder_icmpv4=0,decoder_ipv6_in_ipv6=0,tcp_ssn_memcap_drop=0,decoder_vlan_qinq=0,decoder_ltnull_unsupported_type=0,decoder_invalid=0,defrag_max_frag_hits=0,tcp_pseudo_failed=0,detect_alert=0,app_layer_tx_dns_tcp=0,app_layer_flow_failed_tcp=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_tcp=0,defrag_ipv6_fragments=0,decoder_mpls=0,decoder_dce_pkt_too_small=0 1568368562545148438
+suricata,host=myhost flow_memuse=7094464,tcp_memuse=3276800,tcp_reassembly_memuse=12332832,dns_memuse=0,dns_memcap_state=0,dns_memcap_global=0,http_memuse=0,http_memcap=0 1568368562545144569
+suricata,host=myhost,thread=W#07-wlp4s0 app_layer_tx_http=0,app_layer_tx_dns_tcp=0,decoder_vlan=0,decoder_pppoe=0,decoder_sll=0,decoder_tcp=0,flow_memcap=0,app_layer_flow_msn=0,tcp_no_flow=0,tcp_rst=0,tcp_segment_memcap_drop=0,tcp_sessions=0,detect_alert=0,defrag_ipv6_reassembled=0,decoder_ipraw_invalid_ip_version=0,decoder_erspan=0,decoder_icmpv4=0,app_layer_tx_dns_udp=2,decoder_ltnull_pkt_too_small=0,decoder_bytes=1998,decoder_ipv6=1,defrag_ipv4_fragments=0,defrag_ipv6_fragments=0,app_layer_tx_smtp=0,decoder_ltnull_unsupported_type=0,decoder_max_pkt_size=342,app_layer_flow_ftp=0,decoder_ipv6_in_ipv6=0,defrag_ipv4_reassembled=0,defrag_ipv6_timeouts=0,app_layer_flow_dns_tcp=0,decoder_avg_pkt_size=181,defrag_ipv4_timeouts=0,tcp_stream_depth_reached=0,decoder_mpls=0,app_layer_flow_dns_udp=2,tcp_ssn_memcap_drop=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_failed_udp=2,app_layer_flow_smb=0,app_layer_flow_failed_tcp=0,decoder_invalid=0,decoder_null=0,decoder_gre=0,decoder_ethernet=11,app_layer_flow_ssh=0,defrag_max_frag_hits=0,capture_kernel_drops=0,tcp_pseudo_failed=0,app_layer_flow_smtp=0,decoder_udp=10,decoder_sctp=0,decoder_teredo=0,decoder_icmpv6=1,tcp_pseudo=0,tcp_synack=0,app_layer_tx_tls=0,app_layer_flow_imap=0,capture_kernel_packets=11,decoder_pkts=11,decoder_raw=0,decoder_ppp=0,tcp_syn=0,tcp_invalid_checksum=0,app_layer_flow_tls=0,decoder_ipv4_in_ipv6=0,app_layer_flow_http=0,decoder_dce_pkt_too_small=0,decoder_ipv4=10,decoder_vlan_qinq=0,tcp_reassembly_gap=0,app_layer_flow_dcerpc_udp=0 1568368562545110847
+suricata,host=myhost,thread=W#06-wlp4s0 app_layer_tx_smtp=0,decoder_ipv6_in_ipv6=0,decoder_dce_pkt_too_small=0,tcp_segment_memcap_drop=0,tcp_sessions=1,decoder_ppp=0,tcp_pseudo_failed=0,app_layer_tx_dns_tcp=0,decoder_invalid=0,defrag_ipv4_timeouts=0,app_layer_flow_smb=0,app_layer_flow_ssh=0,decoder_bytes=19407,decoder_null=0,app_layer_flow_tls=1,decoder_avg_pkt_size=473,decoder_pkts=41,decoder_pppoe=0,decoder_tcp=32,defrag_ipv4_reassembled=0,tcp_reassembly_gap=0,decoder_raw=0,flow_memcap=0,defrag_ipv6_timeouts=0,app_layer_flow_smtp=0,app_layer_tx_http=0,decoder_sll=0,decoder_udp=8,decoder_ltnull_pkt_too_small=0,decoder_ltnull_unsupported_type=0,decoder_ipv4_in_ipv6=0,decoder_vlan=0,decoder_max_pkt_size=1422,tcp_no_flow=0,app_layer_flow_failed_tcp=0,app_layer_flow_dns_tcp=0,app_layer_flow_ftp=0,decoder_icmpv4=0,defrag_max_frag_hits=0,tcp_rst=0,app_layer_flow_msn=0,app_layer_flow_failed_udp=2,app_layer_flow_dns_udp=0,app_layer_flow_dcerpc_udp=0,decoder_ipv4=39,decoder_ethernet=41,defrag_ipv6_reassembled=0,tcp_ssn_memcap_drop=0,app_layer_tx_tls=0,decoder_gre=0,decoder_vlan_qinq=0,tcp_pseudo=0,app_layer_flow_imap=0,app_layer_flow_dcerpc_tcp=0,defrag_ipv4_fragments=0,defrag_ipv6_fragments=0,tcp_synack=1,app_layer_flow_http=0,app_layer_tx_dns_udp=0,capture_kernel_packets=41,decoder_ipv6=2,tcp_invalid_checksum=0,tcp_stream_depth_reached=0,decoder_ipraw_invalid_ip_version=0,decoder_icmpv6=1,tcp_syn=1,detect_alert=0,capture_kernel_drops=0,decoder_teredo=0,decoder_erspan=0,decoder_sctp=0,decoder_mpls=0 1568368562545084670
+suricata,host=myhost,thread=W#02-wlp4s0 decoder_tcp=53,tcp_rst=3,tcp_reassembly_gap=0,defrag_ipv6_timeouts=0,tcp_ssn_memcap_drop=0,app_layer_flow_dcerpc_tcp=0,decoder_max_pkt_size=1422,decoder_ipv6_in_ipv6=0,tcp_no_flow=0,app_layer_flow_ftp=0,app_layer_flow_ssh=0,decoder_pkts=82,decoder_sctp=0,tcp_invalid_checksum=0,app_layer_flow_dns_tcp=0,decoder_ipraw_invalid_ip_version=0,decoder_bytes=26441,decoder_erspan=0,tcp_pseudo_failed=0,tcp_syn=1,app_layer_tx_http=0,app_layer_tx_smtp=0,decoder_teredo=0,decoder_ipv4=80,defrag_ipv4_fragments=0,tcp_stream_depth_reached=0,app_layer_flow_smb=0,capture_kernel_packets=82,decoder_null=0,decoder_ltnull_pkt_too_small=0,decoder_ppp=0,decoder_icmpv6=1,app_layer_flow_dns_udp=2,app_layer_flow_http=0,app_layer_tx_dns_udp=3,decoder_mpls=0,decoder_sll=0,defrag_ipv4_reassembled=0,tcp_segment_memcap_drop=0,app_layer_flow_imap=0,decoder_ltnull_unsupported_type=0,decoder_icmpv4=0,decoder_raw=0,defrag_ipv4_timeouts=0,app_layer_flow_failed_udp=8,decoder_gre=0,capture_kernel_drops=0,defrag_ipv6_reassembled=0,tcp_pseudo=0,app_layer_flow_tls=1,decoder_avg_pkt_size=322,decoder_dce_pkt_too_small=0,decoder_ethernet=82,defrag_ipv6_fragments=0,tcp_sessions=1,tcp_synack=1,app_layer_tx_dns_tcp=0,decoder_vlan=0,flow_memcap=0,decoder_vlan_qinq=0,decoder_udp=28,decoder_invalid=0,detect_alert=0,app_layer_flow_failed_tcp=0,app_layer_tx_tls=0,decoder_pppoe=0,decoder_ipv6=2,decoder_ipv4_in_ipv6=0,defrag_max_frag_hits=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_smtp=0,app_layer_flow_msn=0 1568368562545061864
+suricata,host=myhost,thread=W#08-wlp4s0 decoder_dce_pkt_too_small=0,app_layer_tx_dns_tcp=0,decoder_pkts=58,decoder_ppp=0,decoder_raw=0,decoder_ipv4_in_ipv6=0,decoder_max_pkt_size=1392,tcp_invalid_checksum=0,tcp_syn=0,decoder_ipv4=51,decoder_ipv6_in_ipv6=0,decoder_tcp=0,decoder_ltnull_pkt_too_small=0,flow_memcap=0,decoder_udp=58,tcp_ssn_memcap_drop=0,tcp_pseudo=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_udp=5,app_layer_tx_http=0,capture_kernel_drops=0,decoder_vlan=0,tcp_segment_memcap_drop=0,app_layer_flow_ftp=0,app_layer_flow_imap=0,app_layer_flow_http=0,app_layer_flow_tls=0,decoder_icmpv4=0,decoder_sctp=0,defrag_ipv4_timeouts=0,tcp_reassembly_gap=0,detect_alert=0,decoder_ethernet=58,tcp_pseudo_failed=0,decoder_teredo=0,defrag_ipv4_reassembled=0,tcp_sessions=0,app_layer_flow_msn=0,decoder_ipraw_invalid_ip_version=0,tcp_no_flow=0,app_layer_flow_dns_tcp=0,decoder_null=0,defrag_ipv4_fragments=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_failed_udp=8,app_layer_tx_tls=0,decoder_bytes=15800,decoder_ipv6=7,tcp_stream_depth_reached=0,decoder_invalid=0,decoder_ltnull_unsupported_type=0,app_layer_tx_dns_udp=6,decoder_pppoe=0,decoder_avg_pkt_size=272,decoder_erspan=0,defrag_ipv6_timeouts=0,app_layer_flow_failed_tcp=0,decoder_gre=0,decoder_sll=0,defrag_max_frag_hits=0,app_layer_flow_ssh=0,capture_kernel_packets=58,decoder_mpls=0,decoder_vlan_qinq=0,tcp_rst=0,app_layer_flow_smb=0,app_layer_tx_smtp=0,decoder_icmpv6=0,defrag_ipv6_fragments=0,defrag_ipv6_reassembled=0,tcp_synack=0,app_layer_flow_smtp=0 1568368562545035575
+suricata,host=myhost,thread=W#05-wlp4s0 tcp_reassembly_gap=0,capture_kernel_drops=0,decoder_ltnull_unsupported_type=0,tcp_sessions=0,tcp_stream_depth_reached=0,tcp_pseudo_failed=0,app_layer_flow_failed_tcp=0,app_layer_tx_dns_tcp=0,decoder_null=0,decoder_dce_pkt_too_small=0,decoder_udp=7,tcp_rst=3,app_layer_flow_dns_tcp=0,decoder_invalid=0,defrag_ipv4_reassembled=0,tcp_synack=0,app_layer_flow_ftp=0,decoder_bytes=3117,decoder_pppoe=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_smb=0,decoder_ipv6_in_ipv6=0,decoder_ipraw_invalid_ip_version=0,app_layer_flow_imap=0,app_layer_tx_dns_udp=2,decoder_ppp=0,decoder_ipv4=21,decoder_tcp=14,flow_memcap=0,tcp_syn=0,tcp_invalid_checksum=0,decoder_teredo=0,decoder_ltnull_pkt_too_small=0,defrag_max_frag_hits=0,app_layer_tx_tls=0,decoder_pkts=24,decoder_sll=0,defrag_ipv6_fragments=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_smtp=0,decoder_icmpv6=3,defrag_ipv6_timeouts=0,decoder_ipv6=3,decoder_raw=0,defrag_ipv6_reassembled=0,tcp_no_flow=0,detect_alert=0,app_layer_flow_tls=0,decoder_ethernet=24,decoder_vlan=0,decoder_icmpv4=0,decoder_ipv4_in_ipv6=0,app_layer_flow_failed_udp=1,decoder_mpls=0,decoder_max_pkt_size=653,decoder_sctp=0,defrag_ipv4_timeouts=0,tcp_ssn_memcap_drop=0,app_layer_flow_dns_udp=1,app_layer_tx_smtp=0,capture_kernel_packets=24,decoder_vlan_qinq=0,decoder_gre=0,app_layer_flow_ssh=0,app_layer_flow_msn=0,defrag_ipv4_fragments=0,app_layer_flow_http=0,tcp_segment_memcap_drop=0,tcp_pseudo=0,app_layer_tx_http=0,decoder_erspan=0,decoder_avg_pkt_size=129 1568368562545009684
+suricata,host=myhost,thread=W#03-wlp4s0 app_layer_flow_failed_tcp=0,decoder_teredo=0,decoder_ipv6_in_ipv6=0,tcp_pseudo_failed=0,tcp_stream_depth_reached=0,tcp_syn=0,decoder_gre=0,tcp_segment_memcap_drop=0,tcp_ssn_memcap_drop=0,app_layer_tx_smtp=0,decoder_raw=0,decoder_ltnull_pkt_too_small=0,tcp_sessions=0,tcp_reassembly_gap=0,app_layer_flow_ssh=0,app_layer_flow_imap=0,decoder_ipv4=463,decoder_ethernet=463,capture_kernel_packets=463,decoder_pppoe=0,defrag_ipv4_reassembled=0,app_layer_flow_tls=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_udp=0,decoder_vlan=0,decoder_ipraw_invalid_ip_version=0,decoder_mpls=0,tcp_no_flow=0,decoder_avg_pkt_size=445,decoder_udp=432,flow_memcap=0,app_layer_tx_dns_udp=0,app_layer_flow_msn=0,app_layer_flow_http=0,app_layer_flow_dcerpc_tcp=0,decoder_ipv6=0,decoder_ipv4_in_ipv6=0,defrag_ipv4_timeouts=0,defrag_ipv4_fragments=0,defrag_ipv6_timeouts=0,decoder_sctp=0,defrag_ipv6_fragments=0,app_layer_flow_dns_tcp=0,app_layer_tx_tls=0,defrag_max_frag_hits=0,decoder_bytes=206345,decoder_vlan_qinq=0,decoder_invalid=0,decoder_ppp=0,tcp_rst=0,detect_alert=0,capture_kernel_drops=0,app_layer_flow_failed_udp=4,decoder_null=0,decoder_icmpv4=0,decoder_icmpv6=0,decoder_ltnull_unsupported_type=0,defrag_ipv6_reassembled=0,tcp_invalid_checksum=0,tcp_synack=0,decoder_tcp=31,tcp_pseudo=0,app_layer_flow_smb=0,app_layer_flow_smtp=0,decoder_max_pkt_size=1463,decoder_dce_pkt_too_small=0,app_layer_tx_http=0,decoder_pkts=463,decoder_sll=0,app_layer_flow_ftp=0,app_layer_tx_dns_tcp=0,decoder_erspan=0 1568368562544966078
+```
diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go
new file mode 100644
index 0000000000000..17c0b571510b0
--- /dev/null
+++ b/plugins/inputs/suricata/suricata.go
@@ -0,0 +1,229 @@
+package suricata
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "sync"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+const (
+ // InBufSize is the input buffer size for JSON received via socket.
+ // Set to 10MB, as depending on the number of threads the output might be
+ // large.
+ InBufSize = 10 * 1024 * 1024
+)
+
+// Suricata is a Telegraf input plugin for Suricata runtime statistics.
+type Suricata struct {
+ Source string `toml:"source"`
+ Delimiter string `toml:"delimiter"`
+
+ inputListener *net.UnixListener
+ cancel context.CancelFunc
+
+ Log telegraf.Logger `toml:"-"`
+
+ wg sync.WaitGroup
+}
+
+// Description returns the plugin description.
+func (s *Suricata) Description() string {
+ return "Suricata stats plugin"
+}
+
+const sampleConfig = `
+ ## Data sink for Suricata stats log
+ # This is expected to be a filename of a
+ # unix socket to be created for listening.
+ source = "/var/run/suricata-stats.sock"
+
+ # Delimiter for flattening field keys, e.g. subitem "alert" of "detect"
+ # becomes "detect_alert" when delimiter is "_".
+ delimiter = "_"
+`
+
+// SampleConfig returns a sample TOML section to illustrate configuration
+// options.
+func (s *Suricata) SampleConfig() string {
+ return sampleConfig
+}
+
+// Start initiates background collection of JSON data from the socket
+// provided to Suricata.
+func (s *Suricata) Start(acc telegraf.Accumulator) error {
+ var err error
+ s.inputListener, err = net.ListenUnix("unix", &net.UnixAddr{
+ Name: s.Source,
+ Net: "unix",
+ })
+ if err != nil {
+ return err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ s.cancel = cancel
+ s.inputListener.SetUnlinkOnClose(true)
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ go s.handleServerConnection(ctx, acc)
+ }()
+ return nil
+}
+
+// Stop causes the plugin to cease collecting JSON data from the socket provided
+// to Suricata.
+func (s *Suricata) Stop() {
+ s.inputListener.Close()
+ if s.cancel != nil {
+ s.cancel()
+ }
+ s.wg.Wait()
+}
+
+func (s *Suricata) readInput(ctx context.Context, acc telegraf.Accumulator, conn net.Conn) error {
+ reader := bufio.NewReaderSize(conn, InBufSize)
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ line, rerr := reader.ReadBytes('\n')
+ if rerr != nil {
+ return rerr
+ } else if len(line) > 0 {
+ s.parse(acc, line)
+ }
+ }
+ }
+}
+
+func (s *Suricata) handleServerConnection(ctx context.Context, acc telegraf.Accumulator) {
+ var err error
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ var conn net.Conn
+ conn, err = s.inputListener.Accept()
+ if err != nil {
+ if !strings.HasSuffix(err.Error(), ": use of closed network connection") {
+ acc.AddError(err)
+ }
+ continue
+ }
+ err = s.readInput(ctx, acc, conn)
+ // we want to handle EOF as an opportunity to wait for a new
+ // connection -- this could, for example, happen when Suricata is
+ // restarted while Telegraf is running.
+ if err != io.EOF {
+ acc.AddError(err)
+ return
+ }
+ }
+ }
+}
+
+func flexFlatten(outmap map[string]interface{}, field string, v interface{}, delimiter string) error {
+ switch t := v.(type) {
+ case map[string]interface{}:
+ for k, v := range t {
+ var err error
+ if field == "" {
+ err = flexFlatten(outmap, k, v, delimiter)
+ } else {
+ err = flexFlatten(outmap, fmt.Sprintf("%s%s%s", field, delimiter, k), v, delimiter)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ case float64:
+ outmap[field] = v.(float64)
+ default:
+ return fmt.Errorf("Unsupported type %T encountered", t)
+ }
+ return nil
+}
+
+func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) {
+ // initial parsing
+ var result map[string]interface{}
+ err := json.Unmarshal([]byte(sjson), &result)
+ if err != nil {
+ acc.AddError(err)
+ return
+ }
+
+ // check for presence of relevant stats
+ if _, ok := result["stats"]; !ok {
+ s.Log.Debug("Input does not contain necessary 'stats' sub-object")
+ return
+ }
+
+ if _, ok := result["stats"].(map[string]interface{}); !ok {
+ s.Log.Debug("The 'stats' sub-object does not have required structure")
+ return
+ }
+
+ fields := make(map[string](map[string]interface{}))
+ totalmap := make(map[string]interface{})
+ for k, v := range result["stats"].(map[string]interface{}) {
+ if k == "threads" {
+ if v, ok := v.(map[string]interface{}); ok {
+ for k, t := range v {
+ outmap := make(map[string]interface{})
+ if threadStruct, ok := t.(map[string]interface{}); ok {
+ err = flexFlatten(outmap, "", threadStruct, s.Delimiter)
+ if err != nil {
+ s.Log.Debug(err)
+ // we skip this thread as something did not parse correctly
+ continue
+ }
+ fields[k] = outmap
+ }
+ }
+ } else {
+ s.Log.Debug("The 'threads' sub-object does not have required structure")
+ }
+ } else {
+ err = flexFlatten(totalmap, k, v, s.Delimiter)
+ if err != nil {
+ s.Log.Debug(err.Error())
+ // we skip this subitem as something did not parse correctly
+ }
+ }
+ }
+ fields["total"] = totalmap
+
+ for k := range fields {
+ if k == "Global" {
+ acc.AddFields("suricata", fields[k], nil)
+ } else {
+ acc.AddFields("suricata", fields[k], map[string]string{"thread": k})
+ }
+ }
+}
+
+// Gather measures and submits one full set of telemetry to Telegraf.
+// Not used here, submission is completely input-driven.
+func (s *Suricata) Gather(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func init() {
+ inputs.Add("suricata", func() telegraf.Input {
+ return &Suricata{
+ Source: "/var/run/suricata-stats.sock",
+ Delimiter: "_",
+ }
+ })
+}
diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go
new file mode 100644
index 0000000000000..9c9c2ddc3694c
--- /dev/null
+++ b/plugins/inputs/suricata/suricata_test.go
@@ -0,0 +1,282 @@
+package suricata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"capture":{"kernel_packets":905344474,"kernel_drops":78355440,"kernel_packets_delta":2376742,"kernel_drops_delta":82049}}}`
+var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}`
+
+func TestSuricataLarge(t *testing.T) {
+ dir, err := ioutil.TempDir("", "test")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+ tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63()))
+
+ s := Suricata{
+ Source: tmpfn,
+ Delimiter: ".",
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+ acc := testutil.Accumulator{}
+ require.NoError(t, s.Start(&acc))
+ defer s.Stop()
+
+ data, err := ioutil.ReadFile("testdata/test1.json")
+ require.NoError(t, err)
+
+ c, err := net.Dial("unix", tmpfn)
+ require.NoError(t, err)
+ c.Write([]byte(data))
+ c.Write([]byte("\n"))
+ c.Close()
+
+ acc.Wait(1)
+}
+
+func TestSuricata(t *testing.T) {
+ dir, err := ioutil.TempDir("", "test")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+ tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63()))
+
+ s := Suricata{
+ Source: tmpfn,
+ Delimiter: ".",
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+ acc := testutil.Accumulator{}
+ require.NoError(t, s.Start(&acc))
+ defer s.Stop()
+
+ c, err := net.Dial("unix", tmpfn)
+ require.NoError(t, err)
+ c.Write([]byte(ex2))
+ c.Write([]byte("\n"))
+ c.Close()
+
+ acc.Wait(1)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "suricata",
+ map[string]string{
+ "thread": "total",
+ },
+ map[string]interface{}{
+ "capture.kernel_packets": float64(905344474),
+ "capture.kernel_drops": float64(78355440),
+ "capture.kernel_packets_delta": float64(2376742),
+ "capture.kernel_drops_delta": float64(82049),
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
+}
+
+func TestThreadStats(t *testing.T) {
+ dir, err := ioutil.TempDir("", "test")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+ tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63()))
+
+ s := Suricata{
+ Source: tmpfn,
+ Delimiter: ".",
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+
+ acc := testutil.Accumulator{}
+ require.NoError(t, s.Start(&acc))
+ defer s.Stop()
+
+ c, err := net.Dial("unix", tmpfn)
+ require.NoError(t, err)
+ c.Write([]byte(""))
+ c.Write([]byte("\n"))
+ c.Write([]byte("foobard}\n"))
+ c.Write([]byte(ex3))
+ c.Write([]byte("\n"))
+ c.Close()
+ acc.Wait(1)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "suricata",
+ map[string]string{
+ "thread": "W#05-wlp4s0",
+ },
+ map[string]interface{}{
+ "capture.kernel_packets": float64(905344474),
+ "capture.kernel_drops": float64(78355440),
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
+}
+
+func TestSuricataInvalid(t *testing.T) {
+ dir, err := ioutil.TempDir("", "test")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+ tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63()))
+
+ s := Suricata{
+ Source: tmpfn,
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+ acc := testutil.Accumulator{}
+ acc.SetDebug(true)
+
+ require.NoError(t, s.Start(&acc))
+ defer s.Stop()
+
+ c, err := net.Dial("unix", tmpfn)
+ require.NoError(t, err)
+ c.Write([]byte("sfjiowef"))
+ c.Write([]byte("\n"))
+ c.Close()
+
+ acc.WaitError(1)
+}
+
+func TestSuricataInvalidPath(t *testing.T) {
+ tmpfn := fmt.Sprintf("/t%d/X", rand.Int63())
+ s := Suricata{
+ Source: tmpfn,
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+
+ acc := testutil.Accumulator{}
+ require.Error(t, s.Start(&acc))
+}
+
+func TestSuricataTooLongLine(t *testing.T) {
+ dir, err := ioutil.TempDir("", "test")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+ tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63()))
+
+ s := Suricata{
+ Source: tmpfn,
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+ acc := testutil.Accumulator{}
+
+ require.NoError(t, s.Start(&acc))
+ defer s.Stop()
+
+ c, err := net.Dial("unix", tmpfn)
+ require.NoError(t, err)
+ c.Write([]byte(strings.Repeat("X", 20000000)))
+ c.Write([]byte("\n"))
+ c.Close()
+
+ acc.WaitError(1)
+
+}
+
+func TestSuricataEmptyJSON(t *testing.T) {
+ dir, err := ioutil.TempDir("", "test")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+ tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63()))
+
+ s := Suricata{
+ Source: tmpfn,
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+ acc := testutil.Accumulator{}
+ require.NoError(t, s.Start(&acc))
+ defer s.Stop()
+
+ c, err := net.Dial("unix", tmpfn)
+ if err != nil {
+ log.Println(err)
+
+ }
+ c.Write([]byte("\n"))
+ c.Close()
+
+ acc.WaitError(1)
+}
+
+func TestSuricataDisconnectSocket(t *testing.T) {
+ dir, err := ioutil.TempDir("", "test")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+ tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63()))
+
+ s := Suricata{
+ Source: tmpfn,
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+ acc := testutil.Accumulator{}
+
+ require.NoError(t, s.Start(&acc))
+ defer s.Stop()
+
+ c, err := net.Dial("unix", tmpfn)
+ require.NoError(t, err)
+ c.Write([]byte(ex2))
+ c.Write([]byte("\n"))
+ c.Close()
+
+ c, err = net.Dial("unix", tmpfn)
+ require.NoError(t, err)
+ c.Write([]byte(ex3))
+ c.Write([]byte("\n"))
+ c.Close()
+
+ acc.Wait(2)
+}
+
+func TestSuricataStartStop(t *testing.T) {
+ dir, err := ioutil.TempDir("", "test")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+ tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63()))
+
+ s := Suricata{
+ Source: tmpfn,
+ Log: testutil.Logger{
+ Name: "inputs.suricata",
+ },
+ }
+ acc := testutil.Accumulator{}
+ require.NoError(t, s.Start(&acc))
+ s.Stop()
+}
diff --git a/plugins/inputs/suricata/suricata_testutil.go b/plugins/inputs/suricata/suricata_testutil.go
new file mode 100644
index 0000000000000..55aa2bb9bae69
--- /dev/null
+++ b/plugins/inputs/suricata/suricata_testutil.go
@@ -0,0 +1,38 @@
+package suricata
+
+import (
+ "bytes"
+ "sync"
+)
+
+// A thread-safe Buffer wrapper to enable concurrent access to log output.
+type buffer struct {
+ b bytes.Buffer
+ m sync.Mutex
+}
+
+func (b *buffer) Read(p []byte) (n int, err error) {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.b.Read(p)
+}
+func (b *buffer) Write(p []byte) (n int, err error) {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.b.Write(p)
+}
+func (b *buffer) String() string {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.b.String()
+}
+func (b *buffer) Reset() {
+ b.m.Lock()
+ defer b.m.Unlock()
+ b.b.Reset()
+}
+func (b *buffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.b.Bytes()
+}
diff --git a/plugins/inputs/suricata/testdata/test1.json b/plugins/inputs/suricata/testdata/test1.json
new file mode 100644
index 0000000000000..31208c4d11919
--- /dev/null
+++ b/plugins/inputs/suricata/testdata/test1.json
@@ -0,0 +1 @@
+{ "timestamp": "2019-08-08T16:26:33.000244+0200", "event_type": "stats", "stats": { "uptime": 15, "capture": { "kernel_packets": 135, "kernel_packets_delta": 74, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 141, "pkts_delta": 63, "bytes": 26018, "bytes_delta": 13415, "invalid": 0, "invalid_delta": 0, "ipv4": 132, "ipv4_delta": 58, "ipv6": 4, "ipv6_delta": 2, "ethernet": 141, "ethernet_delta": 63, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 79, "tcp_delta": 35, "udp": 53, "udp_delta": 23, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 4, "icmpv6_delta": 2, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 184, "avg_pkt_size_delta": 23, "max_pkt_size": 1422, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0, "spare": 10000, "spare_delta": 0, "emerg_mode_entered": 0, "emerg_mode_entered_delta": 0, "emerg_mode_over": 0, "emerg_mode_over_delta": 0, "tcp_reuse": 0, "tcp_reuse_delta": 0, "memuse": 7083520, "memuse_delta": 4608 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 1, "sessions_delta": 1, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 1, "syn_delta": 1, "synack": 1, "synack_delta": 1, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0, "memuse": 3276800, "memuse_delta": 0, "reassembly_memuse": 12332832, "reassembly_memuse_delta": 0 }, "detect": { "alert": 2, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 1, "tls_delta": 1, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 5, "dns_udp_delta": 2, "failed_udp": 12, "failed_udp_delta": 6 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 12, "dns_udp_delta": 2 } }, "flow_mgr": { "closed_pruned": 0, "closed_pruned_delta": 0, "new_pruned": 0, "new_pruned_delta": 0, "est_pruned": 0, "est_pruned_delta": 0, "bypassed_pruned": 0, "bypassed_pruned_delta": 0, "flows_checked": 1, "flows_checked_delta": 1, "flows_notimeout": 1, "flows_notimeout_delta": 1, "flows_timeout": 0, "flows_timeout_delta": 0, "flows_timeout_inuse": 0, "flows_timeout_inuse_delta": 0, "flows_removed": 0, "flows_removed_delta": 0, "rows_checked": 65536, "rows_checked_delta": 0, "rows_skipped": 65535, "rows_skipped_delta": -1, "rows_empty": 0, "rows_empty_delta": 0, "rows_busy": 0, "rows_busy_delta": 0, "rows_maxlen": 1, "rows_maxlen_delta": 1 }, "dns": { "memuse": 1402, "memuse_delta": 595, "memcap_state": 0, "memcap_state_delta": 0, "memcap_global": 0, "memcap_global_delta": 0 }, "http": { "memuse": 0, "memuse_delta": 0, "memcap": 0, "memcap_delta": 0 }, "threads": { "W#01-wlp4s0": { "capture": { "kernel_packets": 25, "kernel_packets_delta": 22, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 25, "pkts_delta": 22, "bytes": 7026, "bytes_delta": 6828, "invalid": 0, "invalid_delta": 0, "ipv4": 19, "ipv4_delta": 19, "ipv6": 1, "ipv6_delta": 0, "ethernet": 25, "ethernet_delta": 22, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 17, "tcp_delta": 17, "udp": 2, "udp_delta": 2, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 281, "avg_pkt_size_delta": 215, "max_pkt_size": 1422, "max_pkt_size_delta": 1336, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 1, "sessions_delta": 1, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 1, "syn_delta": 1, "synack": 1, "synack_delta": 1, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 1, "tls_delta": 1, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 1 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#02-wlp4s0": { "capture": { "kernel_packets": 32, "kernel_packets_delta": 21, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 32, "pkts_delta": 19, "bytes": 5378, "bytes_delta": 3085, "invalid": 0, "invalid_delta": 0, "ipv4": 32, "ipv4_delta": 19, "ipv6": 0, "ipv6_delta": 0, "ethernet": 32, "ethernet_delta": 19, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 25, "tcp_delta": 12, "udp": 7, "udp_delta": 7, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 168, "avg_pkt_size_delta": -8, "max_pkt_size": 626, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 2, "failed_udp_delta": 2 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#03-wlp4s0": { "capture": { "kernel_packets": 44, "kernel_packets_delta": 9, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 45, "pkts_delta": 9, "bytes": 9392, "bytes_delta": 1718, "invalid": 0, "invalid_delta": 0, "ipv4": 45, "ipv4_delta": 9, "ipv6": 0, "ipv6_delta": 0, "ethernet": 45, "ethernet_delta": 9, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 33, "tcp_delta": 2, "udp": 12, "udp_delta": 7, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 208, "avg_pkt_size_delta": -5, "max_pkt_size": 1422, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 1, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 5, "failed_udp_delta": 2 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#04-wlp4s0": { "capture": { "kernel_packets": 4, "kernel_packets_delta": 0, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 10, "pkts_delta": 0, "bytes": 740, "bytes_delta": 0, "invalid": 0, "invalid_delta": 0, "ipv4": 10, "ipv4_delta": 0, "ipv6": 0, "ipv6_delta": 0, "ethernet": 10, "ethernet_delta": 0, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 10, "udp_delta": 0, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 74, "avg_pkt_size_delta": 0, "max_pkt_size": 86, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 1, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 4, "dns_udp_delta": 0 } } }, "W#05-wlp4s0": { "capture": { "kernel_packets": 14, "kernel_packets_delta": 11, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 14, "pkts_delta": 4, "bytes": 1723, "bytes_delta": 797, "invalid": 0, "invalid_delta": 0, "ipv4": 13, "ipv4_delta": 3, "ipv6": 1, "ipv6_delta": 1, "ethernet": 14, "ethernet_delta": 4, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 2, "tcp_delta": 2, "udp": 11, "udp_delta": 1, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 1, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 123, "avg_pkt_size_delta": 31, "max_pkt_size": 478, "max_pkt_size_delta": 299, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 4, "dns_udp_delta": 0 } } }, "W#06-wlp4s0": { "capture": { "kernel_packets": 11, "kernel_packets_delta": 8, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 11, "pkts_delta": 6, "bytes": 1254, "bytes_delta": 696, "invalid": 0, "invalid_delta": 0, "ipv4": 10, "ipv4_delta": 6, "ipv6": 1, "ipv6_delta": 0, "ethernet": 11, "ethernet_delta": 6, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 2, "tcp_delta": 2, "udp": 8, "udp_delta": 4, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 114, "avg_pkt_size_delta": 3, "max_pkt_size": 215, "max_pkt_size_delta": 62, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 2, "dns_udp_delta": 1, "failed_udp": 1, "failed_udp_delta": 1 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 3, "dns_udp_delta": 1 } } }, "W#07-wlp4s0": { "capture": { "kernel_packets": 1, "kernel_packets_delta": 0, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 1, "pkts_delta": 0, "bytes": 214, "bytes_delta": 0, "invalid": 0, "invalid_delta": 0, "ipv4": 1, "ipv4_delta": 0, "ipv6": 0, "ipv6_delta": 0, "ethernet": 1, "ethernet_delta": 0, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 1, "udp_delta": 0, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 214, "avg_pkt_size_delta": 0, "max_pkt_size": 214, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#08-wlp4s0": { "capture": { "kernel_packets": 4, "kernel_packets_delta": 3, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 3, "pkts_delta": 3, "bytes": 291, "bytes_delta": 291, "invalid": 0, "invalid_delta": 0, "ipv4": 2, "ipv4_delta": 2, "ipv6": 1, "ipv6_delta": 1, "ethernet": 3, "ethernet_delta": 3, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 2, "udp_delta": 2, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 1, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 97, "avg_pkt_size_delta": 97, "max_pkt_size": 134, "max_pkt_size_delta": 134, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 1, "failed_udp": 0, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 1, "dns_udp_delta": 1 } } }, "FM#01": { "flow_mgr": { "closed_pruned": 0, "closed_pruned_delta": 0, "new_pruned": 0, "new_pruned_delta": 0, "est_pruned": 0, "est_pruned_delta": 0, "bypassed_pruned": 0, "bypassed_pruned_delta": 0, "flows_checked": 1, "flows_checked_delta": 1, "flows_notimeout": 1, "flows_notimeout_delta": 1, "flows_timeout": 0, "flows_timeout_delta": 0, "flows_timeout_inuse": 0, "flows_timeout_inuse_delta": 0, "flows_removed": 0, "flows_removed_delta": 0, "rows_checked": 65536, "rows_checked_delta": 0, "rows_skipped": 65535, "rows_skipped_delta": -1, "rows_empty": 0, "rows_empty_delta": 0, "rows_busy": 0, "rows_busy_delta": 0, "rows_maxlen": 1, "rows_maxlen_delta": 1 }, "flow": { "spare": 10000, "spare_delta": 0, "emerg_mode_entered": 0, "emerg_mode_entered_delta": 0, "emerg_mode_over": 0, "emerg_mode_over_delta": 0, "tcp_reuse": 0, "tcp_reuse_delta": 0 } }, "Global": { "tcp": { "memuse": 3276800, "memuse_delta": 0, "reassembly_memuse": 12332832, "reassembly_memuse_delta": 0 }, "dns": { "memuse": 1402, "memuse_delta": 595, "memcap_state": 0, "memcap_state_delta": 0, "memcap_global": 0, "memcap_global_delta": 0 }, "http": { "memuse": 0, "memuse_delta": 0, "memcap": 0, "memcap_delta": 0 }, "flow": { "memuse": 7083520, "memuse_delta": 4608 } } } }}
\ No newline at end of file
diff --git a/plugins/inputs/synproxy/README.md b/plugins/inputs/synproxy/README.md
new file mode 100644
index 0000000000000..efb8203515c69
--- /dev/null
+++ b/plugins/inputs/synproxy/README.md
@@ -0,0 +1,49 @@
+# Synproxy Input Plugin
+
+The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter module used for SYN attack mitigation.
+The use of synproxy is documented in `man iptables-extensions` under the SYNPROXY section.
+
+
+### Configuration
+
+The synproxy plugin does not need any configuration
+
+```toml
+[[inputs.synproxy]]
+ # no configuration
+```
+
+### Metrics
+
+The following synproxy counters are gathered
+
+- synproxy
+ - fields:
+ - cookie_invalid (uint32, packets, counter) - Invalid cookies
+ - cookie_retrans (uint32, packets, counter) - Cookies retransmitted
+ - cookie_valid (uint32, packets, counter) - Valid cookies
+ - entries (uint32, packets, counter) - Entries
+ - syn_received (uint32, packets, counter) - SYN received
+ - conn_reopened (uint32, packets, counter) - Connections reopened
+
+### Sample Queries
+
+Get the number of packets per 5 minutes for the measurement in the last hour from InfluxDB:
+```sql
+SELECT difference(last("cookie_invalid")) AS "cookie_invalid", difference(last("cookie_retrans")) AS "cookie_retrans", difference(last("cookie_valid")) AS "cookie_valid", difference(last("entries")) AS "entries", difference(last("syn_received")) AS "syn_received", difference(last("conn_reopened")) AS "conn_reopened" FROM synproxy WHERE time > NOW() - 1h GROUP BY time(5m) FILL(null);
+```
+
+### Troubleshooting
+
+Execute the following CLI command in Linux to test the synproxy counters:
+```sh
+cat /proc/net/stat/synproxy
+```
+
+### Example Output
+
+This section shows example output in Line Protocol format.
+
+```
+synproxy,host=Filter-GW01,rack=filter-node1 conn_reopened=0i,cookie_invalid=235i,cookie_retrans=0i,cookie_valid=8814i,entries=0i,syn_received=8742i 1549550634000000000
+```
diff --git a/plugins/inputs/synproxy/synproxy.go b/plugins/inputs/synproxy/synproxy.go
new file mode 100644
index 0000000000000..6a5b2b3239ed9
--- /dev/null
+++ b/plugins/inputs/synproxy/synproxy.go
@@ -0,0 +1,40 @@
+package synproxy
+
+import (
+ "os"
+ "path"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type Synproxy struct {
+ Log telegraf.Logger `toml:"-"`
+
+ // Synproxy stats filename (proc filesystem)
+ statFile string
+}
+
+func (k *Synproxy) Description() string {
+ return "Get synproxy counter statistics from procfs"
+}
+
+func (k *Synproxy) SampleConfig() string {
+ return ""
+}
+
+func getHostProc() string {
+ procPath := "/proc"
+ if os.Getenv("HOST_PROC") != "" {
+ procPath = os.Getenv("HOST_PROC")
+ }
+ return procPath
+}
+
+func init() {
+ inputs.Add("synproxy", func() telegraf.Input {
+ return &Synproxy{
+ statFile: path.Join(getHostProc(), "/net/stat/synproxy"),
+ }
+ })
+}
diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go
new file mode 100644
index 0000000000000..bcc9729384282
--- /dev/null
+++ b/plugins/inputs/synproxy/synproxy_linux.go
@@ -0,0 +1,90 @@
+// +build linux
+
+package synproxy
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/influxdata/telegraf"
+)
+
+func (k *Synproxy) Gather(acc telegraf.Accumulator) error {
+ data, err := k.getSynproxyStat()
+ if err != nil {
+ return err
+ }
+
+ acc.AddCounter("synproxy", data, map[string]string{})
+ return nil
+}
+
+func inSlice(haystack []string, needle string) bool {
+ for _, val := range haystack {
+ if needle == val {
+ return true
+ }
+ }
+ return false
+}
+
+func (k *Synproxy) getSynproxyStat() (map[string]interface{}, error) {
+ var hname []string
+ counters := []string{"entries", "syn_received", "cookie_invalid", "cookie_valid", "cookie_retrans", "conn_reopened"}
+ fields := make(map[string]interface{})
+
+ // Open synproxy file in proc filesystem
+ file, err := os.Open(k.statFile)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ // Initialise expected fields
+ for _, val := range counters {
+ fields[val] = uint32(0)
+ }
+
+ scanner := bufio.NewScanner(file)
+ // Read header row
+ if scanner.Scan() {
+ line := scanner.Text()
+ // Parse fields separated by whitespace
+ dataFields := strings.Fields(line)
+ for _, val := range dataFields {
+ if !inSlice(counters, val) {
+ val = ""
+ }
+ hname = append(hname, val)
+ }
+ }
+ if len(hname) == 0 {
+ return nil, fmt.Errorf("invalid data")
+ }
+ // Read data rows
+ for scanner.Scan() {
+ line := scanner.Text()
+ // Parse fields separated by whitespace
+ dataFields := strings.Fields(line)
+ // If number of data fields do not match number of header fields
+ if len(dataFields) != len(hname) {
+ return nil, fmt.Errorf("invalid number of columns in data, expected %d found %d", len(hname),
+ len(dataFields))
+ }
+ for i, val := range dataFields {
+ // Convert from hexstring to int32
+ x, err := strconv.ParseUint(val, 16, 32)
+ // If field is not a valid hexstring
+ if err != nil {
+ return nil, fmt.Errorf("invalid value '%s' found", val)
+ }
+ if hname[i] != "" {
+ fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x)
+ }
+ }
+ }
+ return fields, nil
+}
diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go
new file mode 100644
index 0000000000000..71a223644d8ed
--- /dev/null
+++ b/plugins/inputs/synproxy/synproxy_notlinux.go
@@ -0,0 +1,23 @@
+// +build !linux
+
+package synproxy
+
+import (
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+func (k *Synproxy) Init() error {
+ k.Log.Warn("Current platform is not supported")
+ return nil
+}
+
+func (k *Synproxy) Gather(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func init() {
+ inputs.Add("synproxy", func() telegraf.Input {
+ return &Synproxy{}
+ })
+}
diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go
new file mode 100644
index 0000000000000..83d752ff16f8c
--- /dev/null
+++ b/plugins/inputs/synproxy/synproxy_test.go
@@ -0,0 +1,169 @@
+// +build linux
+
+package synproxy
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSynproxyFileNormal(t *testing.T) {
+ testSynproxyFileData(t, synproxyFileNormal, synproxyResultNormal)
+}
+
+func TestSynproxyFileOverflow(t *testing.T) {
+ testSynproxyFileData(t, synproxyFileOverflow, synproxyResultOverflow)
+}
+
+func TestSynproxyFileExtended(t *testing.T) {
+ testSynproxyFileData(t, synproxyFileExtended, synproxyResultNormal)
+}
+
+func TestSynproxyFileAltered(t *testing.T) {
+ testSynproxyFileData(t, synproxyFileAltered, synproxyResultNormal)
+}
+
+func TestSynproxyFileHeaderMismatch(t *testing.T) {
+ tmpfile := makeFakeSynproxyFile([]byte(synproxyFileHeaderMismatch))
+ defer os.Remove(tmpfile)
+
+ k := Synproxy{
+ statFile: tmpfile,
+ }
+
+ acc := testutil.Accumulator{}
+ err := k.Gather(&acc)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "invalid number of columns in data")
+}
+
+func TestSynproxyFileInvalidHex(t *testing.T) {
+ tmpfile := makeFakeSynproxyFile([]byte(synproxyFileInvalidHex))
+ defer os.Remove(tmpfile)
+
+ k := Synproxy{
+ statFile: tmpfile,
+ }
+
+ acc := testutil.Accumulator{}
+ err := k.Gather(&acc)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "invalid value")
+}
+
+func TestNoSynproxyFile(t *testing.T) {
+ tmpfile := makeFakeSynproxyFile([]byte(synproxyFileNormal))
+ // Remove file to generate "no such file" error
+ os.Remove(tmpfile)
+
+ k := Synproxy{
+ statFile: tmpfile,
+ }
+
+ acc := testutil.Accumulator{}
+ err := k.Gather(&acc)
+ assert.Error(t, err)
+}
+
+// Valid Synproxy file
+const synproxyFileNormal = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened
+00000000 00007a88 00002af7 00007995 00000000 00000000
+00000000 0000892c 000015e3 00008852 00000000 00000000
+00000000 00007a80 00002ccc 0000796a 00000000 00000000
+00000000 000079f7 00002bf5 0000790a 00000000 00000000
+00000000 00007a08 00002c9a 00007901 00000000 00000000
+00000000 00007cfc 00002b36 000078fd 00000000 00000000
+00000000 000079c2 00002c2b 000078d6 00000000 00000000
+00000000 0000798a 00002ba8 000078a0 00000000 00000000`
+
+const synproxyFileOverflow = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened
+00000000 80000001 e0000000 80000001 00000000 00000000
+00000000 80000003 f0000009 80000003 00000000 00000000`
+
+const synproxyFileHeaderMismatch = `entries syn_received cookie_invalid cookie_valid cookie_retrans
+00000000 00000002 00000000 00000002 00000000 00000000
+00000000 00000004 00000015 00000004 00000000 00000000
+00000000 00000003 00000000 00000003 00000000 00000000
+00000000 00000002 00000000 00000002 00000000 00000000
+00000000 00000003 00000009 00000003 00000000 00000000
+00000000 00000003 00000009 00000003 00000000 00000000
+00000000 00000001 00000000 00000001 00000000 00000000
+00000000 00000003 00000009 00000003 00000000 00000000`
+
+const synproxyFileInvalidHex = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened
+entries 00000002 00000000 00000002 00000000 00000000
+00000000 00000003 00000009 00000003 00000000 00000000`
+
+const synproxyFileExtended = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened new_counter
+00000000 00007a88 00002af7 00007995 00000000 00000000 00000000
+00000000 0000892c 000015e3 00008852 00000000 00000000 00000000
+00000000 00007a80 00002ccc 0000796a 00000000 00000000 00000000
+00000000 000079f7 00002bf5 0000790a 00000000 00000000 00000000
+00000000 00007a08 00002c9a 00007901 00000000 00000000 00000000
+00000000 00007cfc 00002b36 000078fd 00000000 00000000 00000000
+00000000 000079c2 00002c2b 000078d6 00000000 00000000 00000000
+00000000 0000798a 00002ba8 000078a0 00000000 00000000 00000000`
+
+const synproxyFileAltered = `entries cookie_invalid cookie_valid syn_received conn_reopened
+00000000 00002af7 00007995 00007a88 00000000
+00000000 000015e3 00008852 0000892c 00000000
+00000000 00002ccc 0000796a 00007a80 00000000
+00000000 00002bf5 0000790a 000079f7 00000000
+00000000 00002c9a 00007901 00007a08 00000000
+00000000 00002b36 000078fd 00007cfc 00000000
+00000000 00002c2b 000078d6 000079c2 00000000
+00000000 00002ba8 000078a0 0000798a 00000000`
+
+var synproxyResultNormal = map[string]interface{}{
+ "entries": uint32(0x00000000),
+ "syn_received": uint32(0x0003e27b),
+ "cookie_invalid": uint32(0x0001493e),
+ "cookie_valid": uint32(0x0003d7cf),
+ "cookie_retrans": uint32(0x00000000),
+ "conn_reopened": uint32(0x00000000),
+}
+
+var synproxyResultOverflow = map[string]interface{}{
+ "entries": uint32(0x00000000),
+ "syn_received": uint32(0x00000004),
+ "cookie_invalid": uint32(0xd0000009),
+ "cookie_valid": uint32(0x00000004),
+ "cookie_retrans": uint32(0x00000000),
+ "conn_reopened": uint32(0x00000000),
+}
+
+func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string]interface{}) {
+ tmpfile := makeFakeSynproxyFile([]byte(fileData))
+ defer os.Remove(tmpfile)
+
+ k := Synproxy{
+ statFile: tmpfile,
+ }
+
+ acc := testutil.Accumulator{}
+ err := k.Gather(&acc)
+ assert.NoError(t, err)
+
+ acc.AssertContainsFields(t, "synproxy", telegrafData)
+}
+
+func makeFakeSynproxyFile(content []byte) string {
+ tmpfile, err := ioutil.TempFile("", "synproxy_test")
+ if err != nil {
+ panic(err)
+ }
+
+ if _, err := tmpfile.Write(content); err != nil {
+ panic(err)
+ }
+ if err := tmpfile.Close(); err != nil {
+ panic(err)
+ }
+
+ return tmpfile.Name()
+}
diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md
index 8183d2c9046c9..32c5f2717b630 100644
--- a/plugins/inputs/syslog/README.md
+++ b/plugins/inputs/syslog/README.md
@@ -1,9 +1,10 @@
# Syslog Input Plugin
The syslog plugin listens for syslog messages transmitted over
-[UDP](https://tools.ietf.org/html/rfc5426) or
-[TCP](https://tools.ietf.org/html/rfc6587) or
-[TLS](https://tools.ietf.org/html/rfc5425), with or without the octet counting framing.
+a Unix Domain socket,
+[UDP](https://tools.ietf.org/html/rfc5426),
+[TCP](https://tools.ietf.org/html/rfc6587), or
+[TLS](https://tools.ietf.org/html/rfc5425); with or without the octet counting framing.
Syslog messages should be formatted according to
[RFC 5424](https://tools.ietf.org/html/rfc5424).
@@ -12,10 +13,12 @@ Syslog messages should be formatted according to
```toml
[[inputs.syslog]]
- ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
## Protocol, address and port to host the syslog receiver.
## If no host is specified, then localhost is used.
## If no port is specified, 6514 is used (RFC5425#section-4.1).
+ ## ex: server = "tcp://localhost:6514"
+ ## server = "udp://:6514"
+ ## server = "unix:///var/run/telegraf-syslog.sock"
server = "tcp://:6514"
## TLS Config
@@ -44,7 +47,7 @@ Syslog messages should be formatted according to
## Must be one of "octect-counting", "non-transparent".
# framing = "octet-counting"
- ## The trailer to be expected in case of non-trasparent framing (default = "LF").
+ ## The trailer to be expected in case of non-transparent framing (default = "LF").
## Must be one of "LF", or "NUL".
# trailer = "LF"
diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go
index f55d080a13fba..10f2ddf511d22 100644
--- a/plugins/inputs/syslog/commons_test.go
+++ b/plugins/inputs/syslog/commons_test.go
@@ -1,9 +1,12 @@
package syslog
import (
+ "time"
+
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
+ framing "github.com/influxdata/telegraf/internal/syslog"
"github.com/influxdata/telegraf/testutil"
- "time"
)
var (
@@ -13,16 +16,16 @@ var (
type testCasePacket struct {
name string
data []byte
- wantBestEffort *testutil.Metric
- wantStrict *testutil.Metric
+ wantBestEffort telegraf.Metric
+ wantStrict telegraf.Metric
werr bool
}
type testCaseStream struct {
name string
data []byte
- wantBestEffort []testutil.Metric
- wantStrict []testutil.Metric
+ wantBestEffort []telegraf.Metric
+ wantStrict []telegraf.Metric
werr int // how many errors we expect in the strict mode?
}
@@ -37,7 +40,7 @@ func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog {
}
}
-func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f Framing) *Syslog {
+func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog {
d := &internal.Duration{
Duration: defaultReadTimeout,
}
diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go
index 1dea841448d7c..d0352c6ae1c7f 100644
--- a/plugins/inputs/syslog/nontransparent_test.go
+++ b/plugins/inputs/syslog/nontransparent_test.go
@@ -9,8 +9,9 @@ import (
"testing"
"time"
- "github.com/google/go-cmp/cmp"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
+ framing "github.com/influxdata/telegraf/internal/syslog"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -20,10 +21,16 @@ func getTestCasesForNonTransparent() []testCaseStream {
{
name: "1st/avg/ok",
data: []byte(`<29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`),
- wantStrict: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantStrict: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "notice",
+ "facility": "daemon",
+ "hostname": "web1",
+ "appname": "someservice",
+ },
+ map[string]interface{}{
"version": uint16(1),
"timestamp": time.Unix(1456029177, 0).UnixNano(),
"procid": "2341",
@@ -35,19 +42,19 @@ func getTestCasesForNonTransparent() []testCaseStream {
"severity_code": 5,
"facility_code": 3,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ },
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "notice",
"facility": "daemon",
"hostname": "web1",
"appname": "someservice",
},
- Time: defaultTime,
- },
- },
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(1),
"timestamp": time.Unix(1456029177, 0).UnixNano(),
"procid": "2341",
@@ -59,75 +66,69 @@ func getTestCasesForNonTransparent() []testCaseStream {
"severity_code": 5,
"facility_code": 3,
},
- Tags: map[string]string{
- "severity": "notice",
- "facility": "daemon",
- "hostname": "web1",
- "appname": "someservice",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
werr: 1,
},
{
name: "1st/min/ok//2nd/min/ok",
data: []byte("<1>2 - - - - - -\n<4>11 - - - - - -\n"),
- wantStrict: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantStrict: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(2),
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
+ defaultTime,
+ ),
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "warning",
"facility": "kern",
},
- Time: defaultTime,
- },
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(11),
"severity_code": 4,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "warning",
+ defaultTime.Add(time.Nanosecond),
+ ),
+ },
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
"facility": "kern",
},
- Time: defaultTime.Add(time.Nanosecond),
- },
- },
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(2),
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
+ defaultTime,
+ ),
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "warning",
"facility": "kern",
},
- Time: defaultTime,
- },
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(11),
"severity_code": 4,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "warning",
- "facility": "kern",
- },
- Time: defaultTime.Add(time.Nanosecond),
- },
+ defaultTime.Add(time.Nanosecond),
+ ),
},
},
}
@@ -138,7 +139,7 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan
for _, tc := range getTestCasesForNonTransparent() {
t.Run(tc.name, func(t *testing.T) {
// Creation of a strict mode receiver
- receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, NonTransparent)
+ receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.NonTransparent)
require.NotNil(t, receiver)
if wantTLS {
receiver.ServerConfig = *pki.TLSServerConfig()
@@ -185,13 +186,7 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan
if len(acc.Errors) != tc.werr {
t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tc.werr, acc.Errors)
}
- var got []testutil.Metric
- for _, metric := range acc.Metrics {
- got = append(got, *metric)
- }
- if !cmp.Equal(tc.wantStrict, got) {
- t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantStrict, got))
- }
+ testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics())
})
}
}
@@ -200,7 +195,7 @@ func testBestEffortNonTransparent(t *testing.T, protocol string, address string,
for _, tc := range getTestCasesForNonTransparent() {
t.Run(tc.name, func(t *testing.T) {
// Creation of a best effort mode receiver
- receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, NonTransparent)
+ receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.NonTransparent)
require.NotNil(t, receiver)
if wantTLS {
receiver.ServerConfig = *pki.TLSServerConfig()
@@ -239,14 +234,7 @@ func testBestEffortNonTransparent(t *testing.T, protocol string, address string,
acc.Wait(len(tc.wantBestEffort))
}
- // Verify
- var got []testutil.Metric
- for _, metric := range acc.Metrics {
- got = append(got, *metric)
- }
- if !cmp.Equal(tc.wantBestEffort, got) {
- t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantBestEffort, got))
- }
+ testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics())
})
}
}
diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go
index c61805131f594..210b64dbe11c8 100644
--- a/plugins/inputs/syslog/octetcounting_test.go
+++ b/plugins/inputs/syslog/octetcounting_test.go
@@ -10,8 +10,9 @@ import (
"testing"
"time"
- "github.com/google/go-cmp/cmp"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
+ framing "github.com/influxdata/telegraf/internal/syslog"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -21,10 +22,16 @@ func getTestCasesForOctetCounting() []testCaseStream {
{
name: "1st/avg/ok",
data: []byte(`188 <29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`),
- wantStrict: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantStrict: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "notice",
+ "facility": "daemon",
+ "hostname": "web1",
+ "appname": "someservice",
+ },
+ map[string]interface{}{
"version": uint16(1),
"timestamp": time.Unix(1456029177, 0).UnixNano(),
"procid": "2341",
@@ -36,19 +43,19 @@ func getTestCasesForOctetCounting() []testCaseStream {
"severity_code": 5,
"facility_code": 3,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ },
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "notice",
"facility": "daemon",
"hostname": "web1",
"appname": "someservice",
},
- Time: defaultTime,
- },
- },
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(1),
"timestamp": time.Unix(1456029177, 0).UnixNano(),
"procid": "2341",
@@ -60,236 +67,236 @@ func getTestCasesForOctetCounting() []testCaseStream {
"severity_code": 5,
"facility_code": 3,
},
- Tags: map[string]string{
- "severity": "notice",
- "facility": "daemon",
- "hostname": "web1",
- "appname": "someservice",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
},
{
name: "1st/min/ok//2nd/min/ok",
data: []byte("16 <1>2 - - - - - -17 <4>11 - - - - - -"),
- wantStrict: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantStrict: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(2),
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
+ defaultTime,
+ ),
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "warning",
"facility": "kern",
},
- Time: defaultTime,
- },
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(11),
"severity_code": 4,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "warning",
+ defaultTime.Add(time.Nanosecond),
+ ),
+ },
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
"facility": "kern",
},
- Time: defaultTime.Add(time.Nanosecond),
- },
- },
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(2),
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
+ defaultTime,
+ ),
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "warning",
"facility": "kern",
},
- Time: defaultTime,
- },
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(11),
"severity_code": 4,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "warning",
- "facility": "kern",
- },
- Time: defaultTime.Add(time.Nanosecond),
- },
+ defaultTime.Add(time.Nanosecond),
+ ),
},
},
{
name: "1st/utf8/ok",
data: []byte("23 <1>1 - - - - - - hellø"),
- wantStrict: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantStrict: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(1),
"message": "hellø",
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ },
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "alert",
"facility": "kern",
},
- Time: defaultTime,
- },
- },
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(1),
"message": "hellø",
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
},
{
name: "1st/nl/ok", // newline
data: []byte("28 <1>3 - - - - - - hello\nworld"),
- wantStrict: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantStrict: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(3),
"message": "hello\nworld",
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ },
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "alert",
"facility": "kern",
},
- Time: defaultTime,
- },
- },
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(3),
"message": "hello\nworld",
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
},
{
name: "1st/uf/ko", // underflow (msglen less than provided octets)
data: []byte("16 <1>2"),
wantStrict: nil,
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(2),
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
werr: 1,
},
{
name: "1st/min/ok",
data: []byte("16 <1>1 - - - - - -"),
- wantStrict: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantStrict: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(1),
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ },
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "alert",
"facility": "kern",
},
- Time: defaultTime,
- },
- },
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(1),
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
},
{
name: "1st/uf/mf", // The first "underflow" message breaks also the second one
data: []byte("16 <1>217 <11>1 - - - - - -"),
wantStrict: nil,
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(217),
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
werr: 1,
},
// {
- // name: "1st/of/ko", // overflow (msglen greather then max allowed octets)
+ // name: "1st/of/ko", // overflow (msglen greater than max allowed octets)
// data: []byte(fmt.Sprintf("8193 <%d>%d %s %s %s %s %s 12 %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)),
// want: []testutil.Metric{},
// },
{
name: "1st/max/ok",
data: []byte(fmt.Sprintf("8192 <%d>%d %s %s %s %s %s - %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)),
- wantStrict: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantStrict: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "debug",
+ "facility": "local7",
+ "hostname": maxH,
+ "appname": maxA,
+ },
+ map[string]interface{}{
"version": maxV,
"timestamp": time.Unix(1514764799, 999999000).UnixNano(),
"message": message7681,
@@ -298,19 +305,19 @@ func getTestCasesForOctetCounting() []testCaseStream {
"facility_code": 23,
"severity_code": 7,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ },
+ wantBestEffort: []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "debug",
"facility": "local7",
"hostname": maxH,
"appname": maxA,
},
- Time: defaultTime,
- },
- },
- wantBestEffort: []testutil.Metric{
- {
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": maxV,
"timestamp": time.Unix(1514764799, 999999000).UnixNano(),
"message": message7681,
@@ -319,14 +326,8 @@ func getTestCasesForOctetCounting() []testCaseStream {
"facility_code": 23,
"severity_code": 7,
},
- Tags: map[string]string{
- "severity": "debug",
- "facility": "local7",
- "hostname": maxH,
- "appname": maxA,
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
},
}
@@ -338,7 +339,7 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want
for _, tc := range getTestCasesForOctetCounting() {
t.Run(tc.name, func(t *testing.T) {
// Creation of a strict mode receiver
- receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, OctetCounting)
+ receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.OctetCounting)
require.NotNil(t, receiver)
if wantTLS {
receiver.ServerConfig = *pki.TLSServerConfig()
@@ -385,13 +386,7 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want
if len(acc.Errors) != tc.werr {
t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tc.werr, acc.Errors)
}
- var got []testutil.Metric
- for _, metric := range acc.Metrics {
- got = append(got, *metric)
- }
- if !cmp.Equal(tc.wantStrict, got) {
- t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantStrict, got))
- }
+ testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics())
})
}
}
@@ -400,7 +395,7 @@ func testBestEffortOctetCounting(t *testing.T, protocol string, address string,
for _, tc := range getTestCasesForOctetCounting() {
t.Run(tc.name, func(t *testing.T) {
// Creation of a best effort mode receiver
- receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, OctetCounting)
+ receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.OctetCounting)
require.NotNil(t, receiver)
if wantTLS {
receiver.ServerConfig = *pki.TLSServerConfig()
@@ -439,14 +434,7 @@ func testBestEffortOctetCounting(t *testing.T, protocol string, address string,
acc.Wait(len(tc.wantBestEffort))
}
- // Verify
- var got []testutil.Metric
- for _, metric := range acc.Metrics {
- got = append(got, *metric)
- }
- if !cmp.Equal(tc.wantBestEffort, got) {
- t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantBestEffort, got))
- }
+ testutil.RequireMetricsEqual(t, tc.wantBestEffort, acc.GetTelegrafMetrics())
})
}
}
diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go
index ba856b0ac2bc8..31007bad928a3 100644
--- a/plugins/inputs/syslog/rfc5426_test.go
+++ b/plugins/inputs/syslog/rfc5426_test.go
@@ -10,88 +10,89 @@ import (
"testing"
"time"
- "github.com/google/go-cmp/cmp"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func getTestCasesForRFC5426() []testCasePacket {
testCases := []testCasePacket{
- {
- name: "empty",
- data: []byte(""),
- werr: true,
- },
{
name: "complete",
data: []byte("<1>1 - - - - - - A"),
- wantBestEffort: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantBestEffort: testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(1),
"message": "A",
"facility_code": 0,
"severity_code": 1,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ wantStrict: testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "alert",
"facility": "kern",
},
- Time: defaultTime,
- },
- wantStrict: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(1),
"message": "A",
"facility_code": 0,
"severity_code": 1,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
{
name: "one/per/packet",
data: []byte("<1>3 - - - - - - A<1>4 - - - - - - B"),
- wantBestEffort: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantBestEffort: testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(3),
"message": "A<1>4 - - - - - - B",
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ wantStrict: testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "alert",
"facility": "kern",
},
- Time: defaultTime,
- },
- wantStrict: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(3),
"message": "A<1>4 - - - - - - B",
"severity_code": 1,
"facility_code": 0,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
{
name: "average",
data: []byte(`<29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`),
- wantBestEffort: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantBestEffort: testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "notice",
+ "facility": "daemon",
+ "hostname": "web1",
+ "appname": "someservice",
+ },
+ map[string]interface{}{
"version": uint16(1),
"timestamp": time.Unix(1456029177, 0).UnixNano(),
"procid": "2341",
@@ -103,17 +104,17 @@ func getTestCasesForRFC5426() []testCasePacket {
"severity_code": 5,
"facility_code": 3,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ wantStrict: testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "notice",
"facility": "daemon",
"hostname": "web1",
"appname": "someservice",
},
- Time: defaultTime,
- },
- wantStrict: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(1),
"timestamp": time.Unix(1456029177, 0).UnixNano(),
"procid": "2341",
@@ -125,21 +126,21 @@ func getTestCasesForRFC5426() []testCasePacket {
"severity_code": 5,
"facility_code": 3,
},
- Tags: map[string]string{
- "severity": "notice",
- "facility": "daemon",
- "hostname": "web1",
- "appname": "someservice",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
{
name: "max",
data: []byte(fmt.Sprintf("<%d>%d %s %s %s %s %s - %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)),
- wantBestEffort: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantBestEffort: testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "debug",
+ "facility": "local7",
+ "hostname": maxH,
+ "appname": maxA,
+ },
+ map[string]interface{}{
"version": maxV,
"timestamp": time.Unix(1514764799, 999999000).UnixNano(),
"message": message7681,
@@ -148,17 +149,17 @@ func getTestCasesForRFC5426() []testCasePacket {
"severity_code": 7,
"facility_code": 23,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ wantStrict: testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "debug",
"facility": "local7",
"hostname": maxH,
"appname": maxA,
},
- Time: defaultTime,
- },
- wantStrict: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": maxV,
"timestamp": time.Unix(1514764799, 999999000).UnixNano(),
"message": message7681,
@@ -167,64 +168,58 @@ func getTestCasesForRFC5426() []testCasePacket {
"severity_code": 7,
"facility_code": 23,
},
- Tags: map[string]string{
- "severity": "debug",
- "facility": "local7",
- "hostname": maxH,
- "appname": maxA,
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
{
name: "minimal/incomplete",
data: []byte("<1>2"),
- wantBestEffort: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantBestEffort: testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(2),
"facility_code": 0,
"severity_code": 1,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
werr: true,
},
{
name: "trim message",
data: []byte("<1>1 - - - - - - \tA\n"),
- wantBestEffort: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ wantBestEffort: testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
"version": uint16(1),
"message": "\tA",
"facility_code": 0,
"severity_code": 1,
},
- Tags: map[string]string{
+ defaultTime,
+ ),
+ wantStrict: testutil.MustMetric(
+ "syslog",
+ map[string]string{
"severity": "alert",
"facility": "kern",
},
- Time: defaultTime,
- },
- wantStrict: &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
+ map[string]interface{}{
"version": uint16(1),
"message": "\tA",
"facility_code": 0,
"severity_code": 1,
},
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: defaultTime,
- },
+ defaultTime,
+ ),
},
}
@@ -240,14 +235,10 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool)
require.NoError(t, receiver.Start(acc))
defer receiver.Stop()
- // Clear
- acc.ClearMetrics()
- acc.Errors = make([]error, 0)
-
// Connect
conn, err := net.Dial(protocol, address)
require.NotNil(t, conn)
- require.Nil(t, err)
+ require.NoError(t, err)
// Write
_, err = conn.Write(tc.data)
@@ -269,19 +260,17 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool)
}
// Compare
- var got *testutil.Metric
- var want *testutil.Metric
+ var got telegraf.Metric
+ var want telegraf.Metric
if len(acc.Metrics) > 0 {
- got = acc.Metrics[0]
+ got = acc.GetTelegrafMetrics()[0]
}
if bestEffort {
want = tc.wantBestEffort
} else {
want = tc.wantStrict
}
- if !cmp.Equal(want, got) {
- t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, got))
- }
+ testutil.RequireMetricEqual(t, want, got)
})
}
}
@@ -337,7 +326,7 @@ func TestTimeIncrement_udp(t *testing.T) {
conn, err := net.Dial("udp", address)
require.NotNil(t, conn)
defer conn.Close()
- require.Nil(t, err)
+ require.NoError(t, err)
// Write
_, e := conn.Write([]byte("<1>1 - - - - - -"))
@@ -346,23 +335,22 @@ func TestTimeIncrement_udp(t *testing.T) {
// Wait
acc.Wait(1)
- want := &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
- "version": uint16(1),
- "facility_code": 0,
- "severity_code": 1,
- },
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: getNow(),
- }
-
- if !cmp.Equal(want, acc.Metrics[0]) {
- t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0]))
+ want := []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
+ "version": uint16(1),
+ "facility_code": 0,
+ "severity_code": 1,
+ },
+ getNow(),
+ ),
}
+ testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics())
// New one with different time
atomic.StoreInt64(&i, atomic.LoadInt64(&i)+1)
@@ -377,23 +365,22 @@ func TestTimeIncrement_udp(t *testing.T) {
// Wait
acc.Wait(1)
- want = &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
- "version": uint16(1),
- "facility_code": 0,
- "severity_code": 1,
- },
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: getNow(),
- }
-
- if !cmp.Equal(want, acc.Metrics[0]) {
- t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0]))
+ want = []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
+ "version": uint16(1),
+ "facility_code": 0,
+ "severity_code": 1,
+ },
+ getNow(),
+ ),
}
+ testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics())
// New one with same time as previous one
@@ -407,21 +394,20 @@ func TestTimeIncrement_udp(t *testing.T) {
// Wait
acc.Wait(1)
- want = &testutil.Metric{
- Measurement: "syslog",
- Fields: map[string]interface{}{
- "version": uint16(1),
- "facility_code": 0,
- "severity_code": 1,
- },
- Tags: map[string]string{
- "severity": "alert",
- "facility": "kern",
- },
- Time: getNow().Add(time.Nanosecond),
- }
-
- if !cmp.Equal(want, acc.Metrics[0]) {
- t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0]))
+ want = []telegraf.Metric{
+ testutil.MustMetric(
+ "syslog",
+ map[string]string{
+ "severity": "alert",
+ "facility": "kern",
+ },
+ map[string]interface{}{
+ "version": uint16(1),
+ "facility_code": 0,
+ "severity_code": 1,
+ },
+ getNow().Add(time.Nanosecond),
+ ),
}
+ testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics())
}
diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go
index 51d2ee4557e3c..6b3615a3e80ce 100644
--- a/plugins/inputs/syslog/syslog.go
+++ b/plugins/inputs/syslog/syslog.go
@@ -12,13 +12,14 @@ import (
"time"
"unicode"
- "github.com/influxdata/go-syslog"
- "github.com/influxdata/go-syslog/nontransparent"
- "github.com/influxdata/go-syslog/octetcounting"
- "github.com/influxdata/go-syslog/rfc5424"
+ "github.com/influxdata/go-syslog/v2"
+ "github.com/influxdata/go-syslog/v2/nontransparent"
+ "github.com/influxdata/go-syslog/v2/octetcounting"
+ "github.com/influxdata/go-syslog/v2/rfc5424"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- tlsConfig "github.com/influxdata/telegraf/internal/tls"
+ framing "github.com/influxdata/telegraf/internal/syslog"
+ tlsConfig "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -32,7 +33,7 @@ type Syslog struct {
KeepAlivePeriod *internal.Duration
MaxConnections int
ReadTimeout *internal.Duration
- Framing Framing
+ Framing framing.Framing
Trailer nontransparent.TrailerType
BestEffort bool
Separator string `toml:"sdparam_separator"`
@@ -83,10 +84,10 @@ var sampleConfig = `
## The framing technique with which it is expected that messages are transported (default = "octet-counting").
## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
## or the non-transparent framing technique (RFC6587#section-3.4.2).
- ## Must be one of "octect-counting", "non-transparent".
+ ## Must be one of "octet-counting", "non-transparent".
# framing = "octet-counting"
- ## The trailer to be expected in case of non-trasparent framing (default = "LF").
+ ## The trailer to be expected in case of non-transparent framing (default = "LF").
## Must be one of "LF", or "NUL".
# trailer = "LF"
@@ -312,8 +313,8 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) {
opts = append(opts, syslog.WithBestEffort())
}
- // Select the parser to use depeding on transport framing
- if s.Framing == OctetCounting {
+ // Select the parser to use depending on transport framing
+ if s.Framing == framing.OctetCounting {
// Octet counting transparent framing
p = octetcounting.NewParser(opts...)
} else {
@@ -445,7 +446,7 @@ func init() {
ReadTimeout: &internal.Duration{
Duration: defaultReadTimeout,
},
- Framing: OctetCounting,
+ Framing: framing.OctetCounting,
Trailer: nontransparent.LF,
Separator: "_",
}
diff --git a/plugins/inputs/sysstat/README.md b/plugins/inputs/sysstat/README.md
index d8e0e95d84f9f..9775c1a305c95 100644
--- a/plugins/inputs/sysstat/README.md
+++ b/plugins/inputs/sysstat/README.md
@@ -16,18 +16,15 @@ the created binary data file with the `sadf` utility.
## On Debian and Arch Linux the default path is /usr/lib/sa/sadc whereas
## on RHEL and CentOS the default path is /usr/lib64/sa/sadc
sadc_path = "/usr/lib/sa/sadc" # required
- #
- #
+
## Path to the sadf command, if it is not in PATH
# sadf_path = "/usr/bin/sadf"
- #
- #
+
## Activities is a list of activities, that are passed as argument to the
## sadc collector utility (e.g: DISK, SNMP etc...)
## The more activities that are added, the more data is collected.
# activities = ["DISK"]
- #
- #
+
## Group metrics to measurements.
##
## If group is false each metric will be prefixed with a description
@@ -35,8 +32,7 @@ the created binary data file with the `sadf` utility.
##
## If Group is true, corresponding metrics are grouped to a single measurement.
# group = true
- #
- #
+
## Options for the sadf command. The values on the left represent the sadf options and
## the values on the right their description (wich are used for grouping and prefixing metrics).
##
@@ -58,8 +54,7 @@ the created binary data file with the `sadf` utility.
-w = "task"
# -H = "hugepages" # only available for newer linux distributions
# "-I ALL" = "interrupts" # requires INT activity
- #
- #
+
## Device tags can be used to add additional tags for devices. For example the configuration below
## adds a tag vg with value rootvg for all metrics with sda devices.
# [[inputs.sysstat.device_tags.sda]]
diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go
index 42ce89550aff6..9f530024b52d8 100644
--- a/plugins/inputs/sysstat/sysstat.go
+++ b/plugins/inputs/sysstat/sysstat.go
@@ -7,7 +7,6 @@ import (
"encoding/csv"
"fmt"
"io"
- "log"
"os"
"os/exec"
"path"
@@ -67,6 +66,8 @@ type Sysstat struct {
DeviceTags map[string][]map[string]string `toml:"device_tags"`
tmpFile string
interval int
+
+ Log telegraf.Logger
}
func (*Sysstat) Description() string {
@@ -81,18 +82,15 @@ var sampleConfig = `
## Arch: /usr/lib/sa/sadc
## RHEL/CentOS: /usr/lib64/sa/sadc
sadc_path = "/usr/lib/sa/sadc" # required
- #
- #
+
## Path to the sadf command, if it is not in PATH
# sadf_path = "/usr/bin/sadf"
- #
- #
+
## Activities is a list of activities, that are passed as argument to the
## sadc collector utility (e.g: DISK, SNMP etc...)
## The more activities that are added, the more data is collected.
# activities = ["DISK"]
- #
- #
+
## Group metrics to measurements.
##
## If group is false each metric will be prefixed with a description
@@ -100,8 +98,7 @@ var sampleConfig = `
##
## If Group is true, corresponding metrics are grouped to a single measurement.
# group = true
- #
- #
+
## Options for the sadf command. The values on the left represent the sadf
## options and the values on the right their description (which are used for
## grouping and prefixing metrics).
@@ -125,8 +122,7 @@ var sampleConfig = `
-w = "task"
# -H = "hugepages" # only available for newer linux distributions
# "-I ALL" = "interrupts" # requires INT activity
- #
- #
+
## Device tags can be used to add additional tags for devices.
## For example the configuration below adds a tag vg with value rootvg for
## all metrics with sda devices.
@@ -196,7 +192,7 @@ func (s *Sysstat) collect() error {
out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval))
if err != nil {
if err := os.Remove(s.tmpFile); err != nil {
- log.Printf("E! failed to remove tmp file after %s command: %s", strings.Join(cmd.Args, " "), err)
+ s.Log.Errorf("Failed to remove tmp file after %q command: %s", strings.Join(cmd.Args, " "), err.Error())
}
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
}
@@ -335,6 +331,7 @@ func (s *Sysstat) sadfOptions(activityOption string) []string {
// escape removes % and / chars in field names
func escape(dirty string) string {
var fieldEscaper = strings.NewReplacer(
+ `%%`, "pct_",
`%`, "pct_",
`/`, "_per_",
)
diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go
index 876e6d2c80169..4aecfaacc2a15 100644
--- a/plugins/inputs/sysstat/sysstat_test.go
+++ b/plugins/inputs/sysstat/sysstat_test.go
@@ -13,6 +13,7 @@ import (
)
var s = Sysstat{
+ Log: testutil.Logger{},
interval: 10,
Sadc: "/usr/lib/sa/sadc",
Sadf: "/usr/bin/sadf",
@@ -225,6 +226,10 @@ func TestEscape(t *testing.T) {
"%util",
"pct_util",
},
+ {
+ "%%util",
+ "pct_util",
+ },
{
"bread/s",
"bread_per_s",
diff --git a/plugins/inputs/system/README.md b/plugins/inputs/system/README.md
index efaa8a17fa5f6..8b16c1de08d25 100644
--- a/plugins/inputs/system/README.md
+++ b/plugins/inputs/system/README.md
@@ -3,6 +3,8 @@
The system plugin gathers general stats on system load, uptime,
and number of users logged in. It is similar to the unix `uptime` command.
+Number of CPUs is obtained from the /proc/cpuinfo file.
+
### Configuration:
```toml
@@ -13,7 +15,7 @@ and number of users logged in. It is similar to the unix `uptime` command.
#### Permissions:
The `n_users` field requires read access to `/var/run/utmp`, and may require
-the `telegraf` user to be added to the `utmp` group on some systems.
+the `telegraf` user to be added to the `utmp` group on some systems. If this file does not exist `n_users` will be skipped.
### Metrics:
diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go
index 256aca0596040..824dbe446d5be 100644
--- a/plugins/inputs/system/ps.go
+++ b/plugins/inputs/system/ps.go
@@ -171,7 +171,14 @@ func (s *SystemPS) SwapStat() (*mem.SwapMemoryStat, error) {
}
func (s *SystemPS) Temperature() ([]host.TemperatureStat, error) {
- return host.SensorsTemperatures()
+ temp, err := host.SensorsTemperatures()
+ if err != nil {
+ _, ok := err.(*host.Warnings)
+ if !ok {
+ return temp, err
+ }
+ }
+ return temp, nil
}
func (s *SystemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) {
diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go
index 5c68870bbc1a2..32747cca20314 100644
--- a/plugins/inputs/system/system.go
+++ b/plugins/inputs/system/system.go
@@ -5,47 +5,56 @@ import (
"bytes"
"fmt"
"os"
- "runtime"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/host"
"github.com/shirou/gopsutil/load"
)
-type SystemStats struct{}
+type SystemStats struct {
+ Log telegraf.Logger
+}
-func (_ *SystemStats) Description() string {
+func (*SystemStats) Description() string {
return "Read metrics about system load & uptime"
}
-func (_ *SystemStats) SampleConfig() string {
+func (*SystemStats) SampleConfig() string {
return `
## Uncomment to remove deprecated metrics.
# fielddrop = ["uptime_format"]
`
}
-func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
+func (s *SystemStats) Gather(acc telegraf.Accumulator) error {
loadavg, err := load.Avg()
if err != nil && !strings.Contains(err.Error(), "not implemented") {
return err
}
+ numCPUs, err := cpu.Counts(true)
+ if err != nil {
+ return err
+ }
+
fields := map[string]interface{}{
"load1": loadavg.Load1,
"load5": loadavg.Load5,
"load15": loadavg.Load15,
- "n_cpus": runtime.NumCPU(),
+ "n_cpus": numCPUs,
}
users, err := host.Users()
if err == nil {
fields["n_users"] = len(users)
- } else if !os.IsPermission(err) {
- return err
+ } else if os.IsNotExist(err) {
+ s.Log.Debugf("Reading users: %s", err.Error())
+ } else if os.IsPermission(err) {
+ s.Log.Debug(err.Error())
}
now := time.Now()
diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md
new file mode 100644
index 0000000000000..fc8306dee2da9
--- /dev/null
+++ b/plugins/inputs/systemd_units/README.md
@@ -0,0 +1,135 @@
+# systemd Units Input Plugin
+
+The systemd_units plugin gathers systemd unit status on Linux. It relies on
+`systemctl list-units --all --type=service` to collect data on service status.
+
+The results are tagged with the unit name and provide enumerated fields for
+loaded, active and running fields, indicating the unit health.
+
+This plugin is related to the [win_services module](/plugins/inputs/win_services/), which
+fulfills the same purpose on windows.
+
+In addition to services, this plugin can gather other unit types as well,
+see `systemctl list-units --all --type help` for possible options.
+
+### Configuration
+```toml
+[[inputs.systemd_units]]
+ ## Set timeout for systemctl execution
+ # timeout = "1s"
+ #
+ ## Filter for a specific unit type, default is "service", other possible
+ ## values are "socket", "target", "device", "mount", "automount", "swap",
+ ## "timer", "path", "slice" and "scope ":
+ # unittype = "service"
+```
+
+### Metrics
+- systemd_units:
+ - tags:
+ - name (string, unit name)
+ - load (string, load state)
+ - active (string, active state)
+ - sub (string, sub state)
+ - fields:
+ - load_code (int, see below)
+ - active_code (int, see below)
+ - sub_code (int, see below)
+
+#### Load
+
+enumeration of [unit_load_state_table](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L87)
+
+| Value | Meaning | Description |
+| ----- | ------- | ----------- |
+| 0 | loaded | unit is ~ |
+| 1 | stub | unit is ~ |
+| 2 | not-found | unit is ~ |
+| 3 | bad-setting | unit is ~ |
+| 4 | error | unit is ~ |
+| 5 | merged | unit is ~ |
+| 6 | masked | unit is ~ |
+
+#### Active
+
+enumeration of [unit_active_state_table](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L99)
+
+| Value | Meaning | Description |
+| ----- | ------- | ----------- |
+| 0 | active | unit is ~ |
+| 1 | reloading | unit is ~ |
+| 2 | inactive | unit is ~ |
+| 3 | failed | unit is ~ |
+| 4 | activating | unit is ~ |
+| 5 | deactivating | unit is ~ |
+
+#### Sub
+
+enumeration of sub states, see various [unittype_state_tables](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L163);
+duplicates were removed, tables are hex aligned to keep some space for future
+values
+
+| Value | Meaning | Description |
+| ----- | ------- | ----------- |
+| | | service_state_table start at 0x0000 |
+| 0x0000 | running | unit is ~ |
+| 0x0001 | dead | unit is ~ |
+| 0x0002 | start-pre | unit is ~ |
+| 0x0003 | start | unit is ~ |
+| 0x0004 | exited | unit is ~ |
+| 0x0005 | reload | unit is ~ |
+| 0x0006 | stop | unit is ~ |
+| 0x0007 | stop-watchdog | unit is ~ |
+| 0x0008 | stop-sigterm | unit is ~ |
+| 0x0009 | stop-sigkill | unit is ~ |
+| 0x000a | stop-post | unit is ~ |
+| 0x000b | final-sigterm | unit is ~ |
+| 0x000c | failed | unit is ~ |
+| 0x000d | auto-restart | unit is ~ |
+| | | service_state_table start at 0x0010 |
+| 0x0010 | waiting | unit is ~ |
+| | | service_state_table start at 0x0020 |
+| 0x0020 | tentative | unit is ~ |
+| 0x0021 | plugged | unit is ~ |
+| | | service_state_table start at 0x0030 |
+| 0x0030 | mounting | unit is ~ |
+| 0x0031 | mounting-done | unit is ~ |
+| 0x0032 | mounted | unit is ~ |
+| 0x0033 | remounting | unit is ~ |
+| 0x0034 | unmounting | unit is ~ |
+| 0x0035 | remounting-sigterm | unit is ~ |
+| 0x0036 | remounting-sigkill | unit is ~ |
+| 0x0037 | unmounting-sigterm | unit is ~ |
+| 0x0038 | unmounting-sigkill | unit is ~ |
+| | | service_state_table start at 0x0040 |
+| | | service_state_table start at 0x0050 |
+| 0x0050 | abandoned | unit is ~ |
+| | | service_state_table start at 0x0060 |
+| 0x0060 | active | unit is ~ |
+| | | service_state_table start at 0x0070 |
+| 0x0070 | start-chown | unit is ~ |
+| 0x0071 | start-post | unit is ~ |
+| 0x0072 | listening | unit is ~ |
+| 0x0073 | stop-pre | unit is ~ |
+| 0x0074 | stop-pre-sigterm | unit is ~ |
+| 0x0075 | stop-pre-sigkill | unit is ~ |
+| 0x0076 | final-sigkill | unit is ~ |
+| | | service_state_table start at 0x0080 |
+| 0x0080 | activating | unit is ~ |
+| 0x0081 | activating-done | unit is ~ |
+| 0x0082 | deactivating | unit is ~ |
+| 0x0083 | deactivating-sigterm | unit is ~ |
+| 0x0084 | deactivating-sigkill | unit is ~ |
+| | | service_state_table start at 0x0090 |
+| | | service_state_table start at 0x00a0 |
+| 0x00a0 | elapsed | unit is ~ |
+| | | |
+
+### Example Output
+
+```
+systemd_units,host=host1.example.com,name=dbus.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000
+systemd_units,host=host1.example.com,name=networking.service,load=loaded,active=failed,sub=failed load_code=0i,active_code=3i,sub_code=12i 1533730725000000000
+systemd_units,host=host1.example.com,name=ssh.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000
+...
+```
diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go
new file mode 100644
index 0000000000000..64caf03d007f3
--- /dev/null
+++ b/plugins/inputs/systemd_units/systemd_units_linux.go
@@ -0,0 +1,221 @@
+package systemd_units
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// SystemdUnits is a telegraf plugin to gather systemd unit status
+type SystemdUnits struct {
+ Timeout internal.Duration
+ UnitType string `toml:"unittype"`
+ systemctl systemctl
+}
+
+type systemctl func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error)
+
+const measurement = "systemd_units"
+
+// Below are mappings of systemd state tables as defined in
+// https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c
+// Duplicate strings are removed from this list.
+var load_map = map[string]int{
+ "loaded": 0,
+ "stub": 1,
+ "not-found": 2,
+ "bad-setting": 3,
+ "error": 4,
+ "merged": 5,
+ "masked": 6,
+}
+
+var active_map = map[string]int{
+ "active": 0,
+ "reloading": 1,
+ "inactive": 2,
+ "failed": 3,
+ "activating": 4,
+ "deactivating": 5,
+}
+
+var sub_map = map[string]int{
+ // service_state_table, offset 0x0000
+ "running": 0x0000,
+ "dead": 0x0001,
+ "start-pre": 0x0002,
+ "start": 0x0003,
+ "exited": 0x0004,
+ "reload": 0x0005,
+ "stop": 0x0006,
+ "stop-watchdog": 0x0007,
+ "stop-sigterm": 0x0008,
+ "stop-sigkill": 0x0009,
+ "stop-post": 0x000a,
+ "final-sigterm": 0x000b,
+ "failed": 0x000c,
+ "auto-restart": 0x000d,
+
+ // automount_state_table, offset 0x0010
+ "waiting": 0x0010,
+
+ // device_state_table, offset 0x0020
+ "tentative": 0x0020,
+ "plugged": 0x0021,
+
+ // mount_state_table, offset 0x0030
+ "mounting": 0x0030,
+ "mounting-done": 0x0031,
+ "mounted": 0x0032,
+ "remounting": 0x0033,
+ "unmounting": 0x0034,
+ "remounting-sigterm": 0x0035,
+ "remounting-sigkill": 0x0036,
+ "unmounting-sigterm": 0x0037,
+ "unmounting-sigkill": 0x0038,
+
+ // path_state_table, offset 0x0040
+
+ // scope_state_table, offset 0x0050
+ "abandoned": 0x0050,
+
+ // slice_state_table, offset 0x0060
+ "active": 0x0060,
+
+ // socket_state_table, offset 0x0070
+ "start-chown": 0x0070,
+ "start-post": 0x0071,
+ "listening": 0x0072,
+ "stop-pre": 0x0073,
+ "stop-pre-sigterm": 0x0074,
+ "stop-pre-sigkill": 0x0075,
+ "final-sigkill": 0x0076,
+
+ // swap_state_table, offset 0x0080
+ "activating": 0x0080,
+ "activating-done": 0x0081,
+ "deactivating": 0x0082,
+ "deactivating-sigterm": 0x0083,
+ "deactivating-sigkill": 0x0084,
+
+ // target_state_table, offset 0x0090
+
+ // timer_state_table, offset 0x00a0
+ "elapsed": 0x00a0,
+}
+
+var (
+ defaultTimeout = internal.Duration{Duration: time.Second}
+ defaultUnitType = "service"
+)
+
+// Description returns a short description of the plugin
+func (s *SystemdUnits) Description() string {
+ return "Gather systemd units state"
+}
+
+// SampleConfig returns sample configuration options.
+func (s *SystemdUnits) SampleConfig() string {
+ return `
+ ## Set timeout for systemctl execution
+ # timeout = "1s"
+ #
+ ## Filter for a specific unit type, default is "service", other possible
+ ## values are "socket", "target", "device", "mount", "automount", "swap",
+ ## "timer", "path", "slice" and "scope ":
+ # unittype = "service"
+`
+}
+
+// Gather parses systemctl outputs and adds counters to the Accumulator
+func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error {
+ out, err := s.systemctl(s.Timeout, s.UnitType)
+ if err != nil {
+ return err
+ }
+
+ scanner := bufio.NewScanner(out)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ data := strings.Fields(line)
+ if len(data) < 4 {
+ acc.AddError(fmt.Errorf("Error parsing line (expected at least 4 fields): %s", line))
+ continue
+ }
+ name := data[0]
+ load := data[1]
+ active := data[2]
+ sub := data[3]
+ tags := map[string]string{
+ "name": name,
+ "load": load,
+ "active": active,
+ "sub": sub,
+ }
+
+ var (
+ load_code int
+ active_code int
+ sub_code int
+ ok bool
+ )
+ if load_code, ok = load_map[load]; !ok {
+ acc.AddError(fmt.Errorf("Error parsing field 'load', value not in map: %s", load))
+ continue
+ }
+ if active_code, ok = active_map[active]; !ok {
+ acc.AddError(fmt.Errorf("Error parsing field 'active', value not in map: %s", active))
+ continue
+ }
+ if sub_code, ok = sub_map[sub]; !ok {
+ acc.AddError(fmt.Errorf("Error parsing field 'sub', value not in map: %s", sub))
+ continue
+ }
+ fields := map[string]interface{}{
+ "load_code": load_code,
+ "active_code": active_code,
+ "sub_code": sub_code,
+ }
+
+ acc.AddFields(measurement, fields, tags)
+ }
+
+ return nil
+}
+
+func setSystemctl(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) {
+ // is systemctl available ?
+ systemctlPath, err := exec.LookPath("systemctl")
+ if err != nil {
+ return nil, err
+ }
+
+ cmd := exec.Command(systemctlPath, "list-units", "--all", fmt.Sprintf("--type=%s", UnitType), "--no-legend")
+
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err = internal.RunTimeout(cmd, Timeout.Duration)
+ if err != nil {
+ return &out, fmt.Errorf("error running systemctl list-units --all --type=%s --no-legend: %s", UnitType, err)
+ }
+
+ return &out, nil
+}
+
+func init() {
+ inputs.Add("systemd_units", func() telegraf.Input {
+ return &SystemdUnits{
+ systemctl: setSystemctl,
+ Timeout: defaultTimeout,
+ UnitType: defaultUnitType,
+ }
+ })
+}
diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go
new file mode 100644
index 0000000000000..f45922bb91af0
--- /dev/null
+++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go
@@ -0,0 +1,100 @@
+package systemd_units
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func TestSystemdUnits(t *testing.T) {
+ tests := []struct {
+ name string
+ line string
+ tags map[string]string
+ fields map[string]interface{}
+ status int
+ err error
+ }{
+ {
+ name: "example loaded active running",
+ line: "example.service loaded active running example service description",
+ tags: map[string]string{"name": "example.service", "load": "loaded", "active": "active", "sub": "running"},
+ fields: map[string]interface{}{
+ "load_code": 0,
+ "active_code": 0,
+ "sub_code": 0,
+ },
+ },
+ {
+ name: "example loaded active exited",
+ line: "example.service loaded active exited example service description",
+ tags: map[string]string{"name": "example.service", "load": "loaded", "active": "active", "sub": "exited"},
+ fields: map[string]interface{}{
+ "load_code": 0,
+ "active_code": 0,
+ "sub_code": 4,
+ },
+ },
+ {
+ name: "example loaded failed failed",
+ line: "example.service loaded failed failed example service description",
+ tags: map[string]string{"name": "example.service", "load": "loaded", "active": "failed", "sub": "failed"},
+ fields: map[string]interface{}{
+ "load_code": 0,
+ "active_code": 3,
+ "sub_code": 12,
+ },
+ },
+ {
+ name: "example not-found inactive dead",
+ line: "example.service not-found inactive dead example service description",
+ tags: map[string]string{"name": "example.service", "load": "not-found", "active": "inactive", "sub": "dead"},
+ fields: map[string]interface{}{
+ "load_code": 2,
+ "active_code": 2,
+ "sub_code": 1,
+ },
+ },
+ {
+ name: "example unknown unknown unknown",
+ line: "example.service unknown unknown unknown example service description",
+ err: fmt.Errorf("Error parsing field 'load', value not in map: %s", "unknown"),
+ },
+ {
+ name: "example too few fields",
+ line: "example.service loaded fai",
+ err: fmt.Errorf("Error parsing line (expected at least 4 fields): %s", "example.service loaded fai"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ systemd_units := &SystemdUnits{
+ systemctl: func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) {
+ return bytes.NewBufferString(tt.line), nil
+ },
+ }
+ acc := new(testutil.Accumulator)
+ err := acc.GatherError(systemd_units.Gather)
+ if !reflect.DeepEqual(tt.err, err) {
+ t.Errorf("%s: expected error '%#v' got '%#v'", tt.name, tt.err, err)
+ }
+ if len(acc.Metrics) > 0 {
+ m := acc.Metrics[0]
+ if !reflect.DeepEqual(m.Measurement, measurement) {
+ t.Errorf("%s: expected measurement '%#v' got '%#v'\n", tt.name, measurement, m.Measurement)
+ }
+ if !reflect.DeepEqual(m.Tags, tt.tags) {
+ t.Errorf("%s: expected tags\n%#v got\n%#v\n", tt.name, tt.tags, m.Tags)
+ }
+ if !reflect.DeepEqual(m.Fields, tt.fields) {
+ t.Errorf("%s: expected fields\n%#v got\n%#v\n", tt.name, tt.fields, m.Fields)
+ }
+ }
+ })
+ }
+}
diff --git a/plugins/inputs/systemd_units/systemd_units_notlinux.go b/plugins/inputs/systemd_units/systemd_units_notlinux.go
new file mode 100644
index 0000000000000..f53cea3de6eba
--- /dev/null
+++ b/plugins/inputs/systemd_units/systemd_units_notlinux.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package systemd_units
diff --git a/plugins/inputs/tail/README.md b/plugins/inputs/tail/README.md
index 27cb6418eb987..1be8a5e93a42b 100644
--- a/plugins/inputs/tail/README.md
+++ b/plugins/inputs/tail/README.md
@@ -19,12 +19,11 @@ see http://man7.org/linux/man-pages/man1/tail.1.html for more details.
The plugin expects messages in one of the
[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
-### Configuration:
+### Configuration
```toml
-# Stream a log file, like the tail -f command
[[inputs.tail]]
- ## files to tail.
+ ## File names or a pattern to tail.
## These accept standard unix glob matching rules, but with the addition of
## ** as a "super asterisk". ie:
## "/var/log/**.log" -> recursively find all .log files in /var/log
@@ -34,14 +33,30 @@ The plugin expects messages in one of the
## See https://github.com/gobwas/glob for more examples
##
files = ["/var/mymetrics.out"]
+
## Read file from beginning.
- from_beginning = false
+ # from_beginning = false
+
## Whether file is a named pipe
- pipe = false
+ # pipe = false
## Method used to watch for file updates. Can be either "inotify" or "poll".
# watch_method = "inotify"
+ ## Maximum lines of the file to process that have not yet be written by the
+ ## output. For best throughput set based on the number of metrics on each
+ ## line and the size of the output's metric_batch_size.
+ # max_undelivered_lines = 1000
+
+ ## Character encoding to use when interpreting the file contents. Invalid
+ ## characters are replaced using the unicode replacement character. When set
+ ## to the empty string the data is not decoded to text.
+ ## ex: character_encoding = "utf-8"
+ ## character_encoding = "utf-16le"
+ ## character_encoding = "utf-16be"
+ ## character_encoding = ""
+ # character_encoding = ""
+
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -49,7 +64,7 @@ The plugin expects messages in one of the
data_format = "influx"
```
-### Metrics:
+### Metrics
Metrics are produced according to the `data_format` option. Additionally a
tag labeled `path` is added to the metric containing the filename being tailed.
diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go
index bdfa2de444031..70dc09e980e03 100644
--- a/plugins/inputs/tail/tail.go
+++ b/plugins/inputs/tail/tail.go
@@ -3,45 +3,72 @@
package tail
import (
- "fmt"
- "log"
+ "context"
+ "errors"
+ "io"
"strings"
"sync"
+ "github.com/dimchansky/utfbom"
"github.com/influxdata/tail"
-
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/globpath"
+ "github.com/influxdata/telegraf/plugins/common/encoding"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/parsers/csv"
)
const (
- defaultWatchMethod = "inotify"
+ defaultWatchMethod = "inotify"
+ defaultMaxUndeliveredLines = 1000
)
-type Tail struct {
- Files []string
- FromBeginning bool
- Pipe bool
- WatchMethod string
+var (
+ offsets = make(map[string]int64)
+ offsetsMutex = new(sync.Mutex)
+)
+type empty struct{}
+type semaphore chan empty
+
+type Tail struct {
+ Files []string `toml:"files"`
+ FromBeginning bool `toml:"from_beginning"`
+ Pipe bool `toml:"pipe"`
+ WatchMethod string `toml:"watch_method"`
+ MaxUndeliveredLines int `toml:"max_undelivered_lines"`
+ CharacterEncoding string `toml:"character_encoding"`
+
+ Log telegraf.Logger `toml:"-"`
tailers map[string]*tail.Tail
+ offsets map[string]int64
parserFunc parsers.ParserFunc
wg sync.WaitGroup
- acc telegraf.Accumulator
-
- sync.Mutex
+ ctx context.Context
+ cancel context.CancelFunc
+ acc telegraf.TrackingAccumulator
+ sem semaphore
+ decoder *encoding.Decoder
}
func NewTail() *Tail {
+ offsetsMutex.Lock()
+ offsetsCopy := make(map[string]int64, len(offsets))
+ for k, v := range offsets {
+ offsetsCopy[k] = v
+ }
+ offsetsMutex.Unlock()
+
return &Tail{
- FromBeginning: false,
+ FromBeginning: false,
+ MaxUndeliveredLines: 1000,
+ offsets: offsetsCopy,
}
}
const sampleConfig = `
- ## files to tail.
+ ## File names or a pattern to tail.
## These accept standard unix glob matching rules, but with the addition of
## ** as a "super asterisk". ie:
## "/var/log/**.log" -> recursively find all .log files in /var/log
@@ -51,14 +78,30 @@ const sampleConfig = `
## See https://github.com/gobwas/glob for more examples
##
files = ["/var/mymetrics.out"]
+
## Read file from beginning.
- from_beginning = false
+ # from_beginning = false
+
## Whether file is a named pipe
- pipe = false
+ # pipe = false
## Method used to watch for file updates. Can be either "inotify" or "poll".
# watch_method = "inotify"
+ ## Maximum lines of the file to process that have not yet be written by the
+ ## output. For best throughput set based on the number of metrics on each
+ ## line and the size of the output's metric_batch_size.
+ # max_undelivered_lines = 1000
+
+ ## Character encoding to use when interpreting the file contents. Invalid
+ ## characters are replaced using the unicode replacement character. When set
+ ## to the empty string the data is not decoded to text.
+ ## ex: character_encoding = "utf-8"
+ ## character_encoding = "utf-16le"
+ ## character_encoding = "utf-16be"
+ ## character_encoding = ""
+ # character_encoding = ""
+
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -71,35 +114,57 @@ func (t *Tail) SampleConfig() string {
}
func (t *Tail) Description() string {
- return "Stream a log file, like the tail -f command"
+ return "Parse the new lines appended to a file"
}
-func (t *Tail) Gather(acc telegraf.Accumulator) error {
- t.Lock()
- defer t.Unlock()
+func (t *Tail) Init() error {
+ if t.MaxUndeliveredLines == 0 {
+ return errors.New("max_undelivered_lines must be positive")
+ }
+ t.sem = make(semaphore, t.MaxUndeliveredLines)
+
+ var err error
+ t.decoder, err = encoding.NewDecoder(t.CharacterEncoding)
+ return err
+}
+func (t *Tail) Gather(acc telegraf.Accumulator) error {
return t.tailNewFiles(true)
}
func (t *Tail) Start(acc telegraf.Accumulator) error {
- t.Lock()
- defer t.Unlock()
+ t.acc = acc.WithTracking(t.MaxUndeliveredLines)
+
+ t.ctx, t.cancel = context.WithCancel(context.Background())
+
+ t.wg.Add(1)
+ go func() {
+ defer t.wg.Done()
+ for {
+ select {
+ case <-t.ctx.Done():
+ return
+ case <-t.acc.Delivered():
+ <-t.sem
+ }
+ }
+ }()
- t.acc = acc
t.tailers = make(map[string]*tail.Tail)
- return t.tailNewFiles(t.FromBeginning)
+ err := t.tailNewFiles(t.FromBeginning)
+
+ // clear offsets
+ t.offsets = make(map[string]int64)
+ // assumption that once Start is called, all parallel plugins have already been initialized
+ offsetsMutex.Lock()
+ offsets = make(map[string]int64)
+ offsetsMutex.Unlock()
+
+ return err
}
func (t *Tail) tailNewFiles(fromBeginning bool) error {
- var seek *tail.SeekInfo
- if !t.Pipe && !fromBeginning {
- seek = &tail.SeekInfo{
- Whence: 2,
- Offset: 0,
- }
- }
-
var poll bool
if t.WatchMethod == "poll" {
poll = true
@@ -109,7 +174,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
for _, filepath := range t.Files {
g, err := globpath.Compile(filepath)
if err != nil {
- t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err))
+ t.Log.Errorf("Glob %q failed to compile: %s", filepath, err.Error())
}
for _, file := range g.Match() {
if _, ok := t.tailers[file]; ok {
@@ -117,6 +182,22 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
continue
}
+ var seek *tail.SeekInfo
+ if !t.Pipe && !fromBeginning {
+ if offset, ok := t.offsets[file]; ok {
+ t.Log.Debugf("Using offset %d for %q", offset, file)
+ seek = &tail.SeekInfo{
+ Whence: 0,
+ Offset: offset,
+ }
+ } else {
+ seek = &tail.SeekInfo{
+ Whence: 2,
+ Offset: 0,
+ }
+ }
+ }
+
tailer, err := tail.TailFile(file,
tail.Config{
ReOpen: true,
@@ -126,97 +207,127 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
Poll: poll,
Pipe: t.Pipe,
Logger: tail.DiscardingLogger,
+ OpenReaderFunc: func(rd io.Reader) io.Reader {
+ r, _ := utfbom.Skip(t.decoder.Reader(rd))
+ return r
+ },
})
if err != nil {
- t.acc.AddError(err)
+ t.Log.Debugf("Failed to open file (%s): %v", file, err)
continue
}
- log.Printf("D! [inputs.tail] tail added for file: %v", file)
+ t.Log.Debugf("Tail added for %q", file)
parser, err := t.parserFunc()
if err != nil {
- t.acc.AddError(fmt.Errorf("error creating parser: %v", err))
+ t.Log.Errorf("Creating parser: %s", err.Error())
+ continue
}
// create a goroutine for each "tailer"
t.wg.Add(1)
- go t.receiver(parser, tailer)
+ go func() {
+ defer t.wg.Done()
+ t.receiver(parser, tailer)
+
+ t.Log.Debugf("Tail removed for %q", tailer.Filename)
+
+ if err := tailer.Err(); err != nil {
+ t.Log.Errorf("Tailing %q: %s", tailer.Filename, err.Error())
+ }
+ }()
t.tailers[tailer.Filename] = tailer
}
}
return nil
}
-// this is launched as a goroutine to continuously watch a tailed logfile
+// ParseLine parses a line of text.
+func parseLine(parser parsers.Parser, line string, firstLine bool) ([]telegraf.Metric, error) {
+ switch parser.(type) {
+ case *csv.Parser:
+ // The csv parser parses headers in Parse and skips them in ParseLine.
+ // As a temporary solution call Parse only when getting the first
+ // line from the file.
+ if firstLine {
+ return parser.Parse([]byte(line))
+ } else {
+ m, err := parser.ParseLine(line)
+ if err != nil {
+ return nil, err
+ }
+
+ if m != nil {
+ return []telegraf.Metric{m}, nil
+ }
+ return []telegraf.Metric{}, nil
+ }
+ default:
+ return parser.Parse([]byte(line))
+ }
+}
+
+// Receiver is launched as a goroutine to continuously watch a tailed logfile
// for changes, parse any incoming msgs, and add to the accumulator.
func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) {
- defer t.wg.Done()
-
var firstLine = true
- var metrics []telegraf.Metric
- var m telegraf.Metric
- var err error
- var line *tail.Line
- for line = range tailer.Lines {
+ for line := range tailer.Lines {
if line.Err != nil {
- t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n",
- tailer.Filename, err))
+ t.Log.Errorf("Tailing %q: %s", tailer.Filename, line.Err.Error())
continue
}
// Fix up files with Windows line endings.
text := strings.TrimRight(line.Text, "\r")
- if firstLine {
- metrics, err = parser.Parse([]byte(text))
- if err == nil {
- if len(metrics) == 0 {
- firstLine = false
- continue
- } else {
- m = metrics[0]
- }
- }
- firstLine = false
- } else {
- m, err = parser.ParseLine(text)
+ metrics, err := parseLine(parser, text, firstLine)
+ if err != nil {
+ t.Log.Errorf("Malformed log line in %q: [%q]: %s",
+ tailer.Filename, line.Text, err.Error())
+ continue
}
+ firstLine = false
- if err == nil {
- if m != nil {
- tags := m.Tags()
- tags["path"] = tailer.Filename
- t.acc.AddFields(m.Name(), m.Fields(), tags, m.Time())
- }
- } else {
- t.acc.AddError(fmt.Errorf("E! Malformed log line in %s: [%s], Error: %s\n",
- tailer.Filename, line.Text, err))
+ for _, metric := range metrics {
+ metric.AddTag("path", tailer.Filename)
}
- }
- log.Printf("D! [inputs.tail] tail removed for file: %v", tailer.Filename)
-
- if err := tailer.Err(); err != nil {
- t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n",
- tailer.Filename, err))
+ // Block until plugin is stopping or room is available to add metrics.
+ select {
+ case <-t.ctx.Done():
+ return
+ case t.sem <- empty{}:
+ t.acc.AddTrackingMetricGroup(metrics)
+ }
}
}
func (t *Tail) Stop() {
- t.Lock()
- defer t.Unlock()
-
for _, tailer := range t.tailers {
+ if !t.Pipe && !t.FromBeginning {
+ // store offset for resume
+ offset, err := tailer.Tell()
+ if err == nil {
+ t.Log.Debugf("Recording offset %d for %q", offset, tailer.Filename)
+ } else {
+ t.Log.Errorf("Recording offset for %q: %s", tailer.Filename, err.Error())
+ }
+ }
err := tailer.Stop()
if err != nil {
- t.acc.AddError(fmt.Errorf("E! Error stopping tail on file %s\n", tailer.Filename))
+ t.Log.Errorf("Stopping tail on %q: %s", tailer.Filename, err.Error())
}
}
- for _, tailer := range t.tailers {
- tailer.Cleanup()
- }
+ t.cancel()
t.wg.Wait()
+
+ // persist offsets
+ offsetsMutex.Lock()
+ for k, v := range t.offsets {
+ offsets[k] = v
+ }
+ offsetsMutex.Unlock()
}
func (t *Tail) SetParserFunc(fn parsers.ParserFunc) {
diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go
index 06db2c17234b1..38a7f22780a52 100644
--- a/plugins/inputs/tail/tail_test.go
+++ b/plugins/inputs/tail/tail_test.go
@@ -1,135 +1,383 @@
package tail
import (
+ "bytes"
"io/ioutil"
+ "log"
"os"
- "runtime"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/parsers/csv"
+ "github.com/influxdata/telegraf/plugins/parsers/influx"
+ "github.com/influxdata/telegraf/plugins/parsers/json"
"github.com/influxdata/telegraf/testutil"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-func TestTailFromBeginning(t *testing.T) {
- if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
- t.Skip("Skipping CI testing due to race conditions")
- }
-
+func TestTailBadLine(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
- _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n")
+
+ _, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n")
require.NoError(t, err)
+ // Write good metric so we can detect when processing is complete
+ _, err = tmpfile.WriteString("cpu usage_idle=100\n")
+ require.NoError(t, err)
+
+ tmpfile.Close()
+
+ buf := &bytes.Buffer{}
+ log.SetOutput(buf)
+
tt := NewTail()
+ tt.Log = testutil.Logger{}
tt.FromBeginning = true
tt.Files = []string{tmpfile.Name()}
tt.SetParserFunc(parsers.NewInfluxParser)
- defer tt.Stop()
- defer tmpfile.Close()
+
+ err = tt.Init()
+ require.NoError(t, err)
acc := testutil.Accumulator{}
require.NoError(t, tt.Start(&acc))
+
require.NoError(t, acc.GatherError(tt.Gather))
acc.Wait(1)
- acc.AssertContainsTaggedFields(t, "cpu",
- map[string]interface{}{
- "usage_idle": float64(100),
- },
- map[string]string{
- "mytag": "foo",
- "path": tmpfile.Name(),
- })
-}
-func TestTailFromEnd(t *testing.T) {
- if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
- t.Skip("Skipping CI testing due to race conditions")
- }
+ tt.Stop()
+ assert.Contains(t, buf.String(), "Malformed log line")
+}
+func TestTailDosLineendings(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
- _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n")
+ _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n")
require.NoError(t, err)
+ tmpfile.Close()
tt := NewTail()
+ tt.Log = testutil.Logger{}
+ tt.FromBeginning = true
tt.Files = []string{tmpfile.Name()}
tt.SetParserFunc(parsers.NewInfluxParser)
- defer tt.Stop()
- defer tmpfile.Close()
+
+ err = tt.Init()
+ require.NoError(t, err)
acc := testutil.Accumulator{}
require.NoError(t, tt.Start(&acc))
- for _, tailer := range tt.tailers {
- for n, err := tailer.Tell(); err == nil && n == 0; n, err = tailer.Tell() {
- // wait for tailer to jump to end
- runtime.Gosched()
- }
- }
-
- _, err = tmpfile.WriteString("cpu,othertag=foo usage_idle=100\n")
- require.NoError(t, err)
+ defer tt.Stop()
require.NoError(t, acc.GatherError(tt.Gather))
- acc.Wait(1)
- acc.AssertContainsTaggedFields(t, "cpu",
+ acc.Wait(2)
+ acc.AssertContainsFields(t, "cpu",
map[string]interface{}{
"usage_idle": float64(100),
- },
- map[string]string{
- "othertag": "foo",
- "path": tmpfile.Name(),
})
- assert.Len(t, acc.Metrics, 1)
+ acc.AssertContainsFields(t, "cpu2",
+ map[string]interface{}{
+ "usage_idle": float64(200),
+ })
}
-func TestTailBadLine(t *testing.T) {
+// The csv parser should only parse the header line once per file.
+func TestCSVHeadersParsedOnce(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
- tt := NewTail()
- tt.FromBeginning = true
- tt.Files = []string{tmpfile.Name()}
- tt.SetParserFunc(parsers.NewInfluxParser)
- defer tt.Stop()
- defer tmpfile.Close()
+ _, err = tmpfile.WriteString(`
+measurement,time_idle
+cpu,42
+cpu,42
+`)
+ require.NoError(t, err)
+ tmpfile.Close()
+
+ plugin := NewTail()
+ plugin.Log = testutil.Logger{}
+ plugin.FromBeginning = true
+ plugin.Files = []string{tmpfile.Name()}
+ plugin.SetParserFunc(func() (parsers.Parser, error) {
+ return csv.NewParser(&csv.Config{
+ MeasurementColumn: "measurement",
+ HeaderRowCount: 1,
+ TimeFunc: func() time.Time { return time.Unix(0, 0) },
+ })
+ })
+
+ err = plugin.Init()
+ require.NoError(t, err)
acc := testutil.Accumulator{}
- require.NoError(t, tt.Start(&acc))
- require.NoError(t, acc.GatherError(tt.Gather))
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+ defer plugin.Stop()
+ err = plugin.Gather(&acc)
+ require.NoError(t, err)
+ acc.Wait(2)
+ plugin.Stop()
- _, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n")
+ expected := []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "path": tmpfile.Name(),
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0)),
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "path": tmpfile.Name(),
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0)),
+ }
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
+}
+
+// Ensure that the first line can produce multiple metrics (#6138)
+func TestMultipleMetricsOnFirstLine(t *testing.T) {
+ tmpfile, err := ioutil.TempFile("", "")
+ require.NoError(t, err)
+ defer os.Remove(tmpfile.Name())
+
+ _, err = tmpfile.WriteString(`
+[{"time_idle": 42}, {"time_idle": 42}]
+`)
+ require.NoError(t, err)
+ tmpfile.Close()
+
+ plugin := NewTail()
+ plugin.Log = testutil.Logger{}
+ plugin.FromBeginning = true
+ plugin.Files = []string{tmpfile.Name()}
+ plugin.SetParserFunc(func() (parsers.Parser, error) {
+ return json.New(
+ &json.Config{
+ MetricName: "cpu",
+ })
+ })
+
+ err = plugin.Init()
require.NoError(t, err)
- acc.WaitError(1)
- assert.Contains(t, acc.Errors[0].Error(), "E! Malformed log line")
+ acc := testutil.Accumulator{}
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+ defer plugin.Stop()
+ err = plugin.Gather(&acc)
+ require.NoError(t, err)
+ acc.Wait(2)
+ plugin.Stop()
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "path": tmpfile.Name(),
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0)),
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "path": tmpfile.Name(),
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0)),
+ }
+ testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(),
+ testutil.IgnoreTime())
}
-func TestTailDosLineendings(t *testing.T) {
+func TestCharacterEncoding(t *testing.T) {
+ full := []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "usage_active": 11.9,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "cpu": "cpu1",
+ },
+ map[string]interface{}{
+ "usage_active": 26.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "cpu": "cpu2",
+ },
+ map[string]interface{}{
+ "usage_active": 14.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "cpu": "cpu3",
+ },
+ map[string]interface{}{
+ "usage_active": 20.4,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "cpu": "cpu-total",
+ },
+ map[string]interface{}{
+ "usage_active": 18.4,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ tests := []struct {
+ name string
+ plugin *Tail
+ offset int64
+ expected []telegraf.Metric
+ }{
+ {
+ name: "utf-8",
+ plugin: &Tail{
+ Files: []string{"testdata/cpu-utf-8.influx"},
+ FromBeginning: true,
+ MaxUndeliveredLines: 1000,
+ Log: testutil.Logger{},
+ CharacterEncoding: "utf-8",
+ },
+ expected: full,
+ },
+ {
+ name: "utf-8 seek",
+ plugin: &Tail{
+ Files: []string{"testdata/cpu-utf-8.influx"},
+ MaxUndeliveredLines: 1000,
+ Log: testutil.Logger{},
+ CharacterEncoding: "utf-8",
+ },
+ offset: 0x33,
+ expected: full[1:],
+ },
+ {
+ name: "utf-16le",
+ plugin: &Tail{
+ Files: []string{"testdata/cpu-utf-16le.influx"},
+ FromBeginning: true,
+ MaxUndeliveredLines: 1000,
+ Log: testutil.Logger{},
+ CharacterEncoding: "utf-16le",
+ },
+ expected: full,
+ },
+ {
+ name: "utf-16le seek",
+ plugin: &Tail{
+ Files: []string{"testdata/cpu-utf-16le.influx"},
+ MaxUndeliveredLines: 1000,
+ Log: testutil.Logger{},
+ CharacterEncoding: "utf-16le",
+ },
+ offset: 0x68,
+ expected: full[1:],
+ },
+ {
+ name: "utf-16be",
+ plugin: &Tail{
+ Files: []string{"testdata/cpu-utf-16be.influx"},
+ FromBeginning: true,
+ MaxUndeliveredLines: 1000,
+ Log: testutil.Logger{},
+ CharacterEncoding: "utf-16be",
+ },
+ expected: full,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tt.plugin.SetParserFunc(func() (parsers.Parser, error) {
+ handler := influx.NewMetricHandler()
+ return influx.NewParser(handler), nil
+ })
+
+ if tt.offset != 0 {
+ tt.plugin.offsets = map[string]int64{
+ tt.plugin.Files[0]: tt.offset,
+ }
+ }
+
+ err := tt.plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+ err = tt.plugin.Start(&acc)
+ require.NoError(t, err)
+ acc.Wait(len(tt.expected))
+ tt.plugin.Stop()
+
+ actual := acc.GetTelegrafMetrics()
+ for _, m := range actual {
+ m.RemoveTag("path")
+ }
+
+ testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime())
+ })
+ }
+}
+
+func TestTailEOF(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
- _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n")
+ _, err = tmpfile.WriteString("cpu usage_idle=100\r\n")
+ require.NoError(t, err)
+ err = tmpfile.Sync()
require.NoError(t, err)
tt := NewTail()
+ tt.Log = testutil.Logger{}
tt.FromBeginning = true
tt.Files = []string{tmpfile.Name()}
tt.SetParserFunc(parsers.NewInfluxParser)
- defer tt.Stop()
- defer tmpfile.Close()
+
+ err = tt.Init()
+ require.NoError(t, err)
acc := testutil.Accumulator{}
require.NoError(t, tt.Start(&acc))
+ defer tt.Stop()
require.NoError(t, acc.GatherError(tt.Gather))
+ acc.Wait(1) // input hits eof
+
+ _, err = tmpfile.WriteString("cpu2 usage_idle=200\r\n")
+ require.NoError(t, err)
+ err = tmpfile.Sync()
+ require.NoError(t, err)
acc.Wait(2)
+ require.NoError(t, acc.GatherError(tt.Gather))
acc.AssertContainsFields(t, "cpu",
map[string]interface{}{
"usage_idle": float64(100),
@@ -138,4 +386,7 @@ func TestTailDosLineendings(t *testing.T) {
map[string]interface{}{
"usage_idle": float64(200),
})
+
+ err = tmpfile.Close()
+ require.NoError(t, err)
}
diff --git a/plugins/inputs/tail/testdata/cpu-utf-16be.influx b/plugins/inputs/tail/testdata/cpu-utf-16be.influx
new file mode 100644
index 0000000000000..2ac4bb73af452
Binary files /dev/null and b/plugins/inputs/tail/testdata/cpu-utf-16be.influx differ
diff --git a/plugins/inputs/tail/testdata/cpu-utf-16le.influx b/plugins/inputs/tail/testdata/cpu-utf-16le.influx
new file mode 100644
index 0000000000000..0f78471507dec
Binary files /dev/null and b/plugins/inputs/tail/testdata/cpu-utf-16le.influx differ
diff --git a/plugins/inputs/tail/testdata/cpu-utf-8.influx b/plugins/inputs/tail/testdata/cpu-utf-8.influx
new file mode 100644
index 0000000000000..eb30355f64bd5
--- /dev/null
+++ b/plugins/inputs/tail/testdata/cpu-utf-8.influx
@@ -0,0 +1,5 @@
+cpu,cpu=cpu0 usage_active=11.9 1594084375000000000
+cpu,cpu=cpu1 usage_active=26.0 1594084375000000000
+cpu,cpu=cpu2 usage_active=14.0 1594084375000000000
+cpu,cpu=cpu3 usage_active=20.4 1594084375000000000
+cpu,cpu=cpu-total usage_active=18.4 1594084375000000000
diff --git a/plugins/inputs/tcp_listener/README.md b/plugins/inputs/tcp_listener/README.md
index f858c7179c783..aeb29e2b49d35 100644
--- a/plugins/inputs/tcp_listener/README.md
+++ b/plugins/inputs/tcp_listener/README.md
@@ -1,4 +1,4 @@
-# TCP listener service input plugin
+# TCP Listener Input Plugin
> DEPRECATED: As of version 1.3 the TCP listener plugin has been deprecated in favor of the
> [socket_listener plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener)
diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go
index 544f36bd61246..41b8e463766ba 100644
--- a/plugins/inputs/tcp_listener/tcp_listener.go
+++ b/plugins/inputs/tcp_listener/tcp_listener.go
@@ -48,13 +48,15 @@ type TcpListener struct {
TotalConnections selfstat.Stat
PacketsRecv selfstat.Stat
BytesRecv selfstat.Stat
+
+ Log telegraf.Logger
}
-var dropwarn = "E! Error: tcp_listener message queue full. " +
+var dropwarn = "tcp_listener message queue full. " +
"We have dropped %d messages so far. " +
- "You may want to increase allowed_pending_messages in the config\n"
+ "You may want to increase allowed_pending_messages in the config"
-var malformedwarn = "E! tcp_listener has received %d malformed packets" +
+var malformedwarn = "tcp_listener has received %d malformed packets" +
" thus far."
const sampleConfig = `
@@ -114,16 +116,15 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error {
address, _ := net.ResolveTCPAddr("tcp", t.ServiceAddress)
t.listener, err = net.ListenTCP("tcp", address)
if err != nil {
- log.Fatalf("ERROR: ListenUDP - %s", err)
+ t.Log.Errorf("Failed to listen: %s", err.Error())
return err
}
- log.Println("I! TCP server listening on: ", t.listener.Addr().String())
t.wg.Add(2)
go t.tcpListen()
go t.tcpParser()
- log.Printf("I! Started TCP listener service on %s\n", t.ServiceAddress)
+ t.Log.Infof("Started TCP listener service on %q", t.ServiceAddress)
return nil
}
@@ -150,7 +151,7 @@ func (t *TcpListener) Stop() {
t.wg.Wait()
close(t.in)
- log.Println("I! Stopped TCP listener service on ", t.ServiceAddress)
+ t.Log.Infof("Stopped TCP listener service on %q", t.ServiceAddress)
}
// tcpListen listens for incoming TCP connections.
@@ -191,9 +192,8 @@ func (t *TcpListener) refuser(conn *net.TCPConn) {
" reached, closing.\nYou may want to increase max_tcp_connections in"+
" the Telegraf tcp listener configuration.\n", t.MaxTCPConnections)
conn.Close()
- log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr())
- log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" +
- " adjust max_tcp_connections")
+ t.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr())
+ t.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections")
}
// handler handles a single TCP Connection
@@ -235,7 +235,7 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) {
default:
t.drops++
if t.drops == 1 || t.drops%t.AllowedPendingMessages == 0 {
- log.Printf(dropwarn, t.drops)
+ t.Log.Errorf(dropwarn, t.drops)
}
}
}
@@ -268,7 +268,7 @@ func (t *TcpListener) tcpParser() error {
} else {
t.malformed++
if t.malformed == 1 || t.malformed%1000 == 0 {
- log.Printf(malformedwarn, t.malformed)
+ t.Log.Errorf(malformedwarn, t.malformed)
}
}
}
diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go
index 6ff40ad87d6ab..16895d6740efe 100644
--- a/plugins/inputs/tcp_listener/tcp_listener_test.go
+++ b/plugins/inputs/tcp_listener/tcp_listener_test.go
@@ -33,6 +33,7 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
func newTestTcpListener() (*TcpListener, chan []byte) {
in := make(chan []byte, 1500)
listener := &TcpListener{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:8194",
AllowedPendingMessages: 10000,
MaxTCPConnections: 250,
@@ -45,6 +46,7 @@ func newTestTcpListener() (*TcpListener, chan []byte) {
// benchmark how long it takes to accept & process 100,000 metrics:
func BenchmarkTCP(b *testing.B) {
listener := TcpListener{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:8198",
AllowedPendingMessages: 100000,
MaxTCPConnections: 250,
@@ -76,6 +78,7 @@ func BenchmarkTCP(b *testing.B) {
func TestHighTrafficTCP(t *testing.T) {
listener := TcpListener{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:8199",
AllowedPendingMessages: 100000,
MaxTCPConnections: 250,
@@ -103,6 +106,7 @@ func TestHighTrafficTCP(t *testing.T) {
func TestConnectTCP(t *testing.T) {
listener := TcpListener{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:8194",
AllowedPendingMessages: 10000,
MaxTCPConnections: 250,
@@ -137,9 +141,10 @@ func TestConnectTCP(t *testing.T) {
}
}
-// Test that MaxTCPConections is respected
+// Test that MaxTCPConnections is respected
func TestConcurrentConns(t *testing.T) {
listener := TcpListener{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:8195",
AllowedPendingMessages: 10000,
MaxTCPConnections: 2,
@@ -172,9 +177,10 @@ func TestConcurrentConns(t *testing.T) {
assert.Equal(t, io.EOF, err)
}
-// Test that MaxTCPConections is respected when max==1
+// Test that MaxTCPConnections is respected when max==1
func TestConcurrentConns1(t *testing.T) {
listener := TcpListener{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:8196",
AllowedPendingMessages: 10000,
MaxTCPConnections: 1,
@@ -205,9 +211,10 @@ func TestConcurrentConns1(t *testing.T) {
assert.Equal(t, io.EOF, err)
}
-// Test that MaxTCPConections is respected
+// Test that MaxTCPConnections is respected
func TestCloseConcurrentConns(t *testing.T) {
listener := TcpListener{
+ Log: testutil.Logger{},
ServiceAddress: "localhost:8195",
AllowedPendingMessages: 10000,
MaxTCPConnections: 2,
diff --git a/plugins/inputs/teamspeak/README.md b/plugins/inputs/teamspeak/README.md
index 84c4297e882ba..4767bb7e35171 100644
--- a/plugins/inputs/teamspeak/README.md
+++ b/plugins/inputs/teamspeak/README.md
@@ -7,7 +7,7 @@ the [Teamspeak 3 ServerQuery Manual](http://media.teamspeak.com/ts3_literature/T
### Configuration:
-```
+```toml
# Reads metrics from a Teamspeak 3 Server via ServerQuery
[[inputs.teamspeak]]
## Server address for Teamspeak 3 ServerQuery
diff --git a/plugins/inputs/temp/README.md b/plugins/inputs/temp/README.md
index 873a732855719..95db4a3bb786f 100644
--- a/plugins/inputs/temp/README.md
+++ b/plugins/inputs/temp/README.md
@@ -1,17 +1,18 @@
-# Temp Input plugin
+# Temperature Input Plugin
The temp input plugin gather metrics on system temperature. This plugin is
meant to be multi platform and uses platform specific collection methods.
Currently supports Linux and Windows.
-### Configuration:
+### Configuration
-```
+```toml
[[inputs.temp]]
+ # no configuration
```
-### Metrics:
+### Metrics
- temp
- tags:
@@ -19,7 +20,16 @@ Currently supports Linux and Windows.
- fields:
- temp (float, celcius)
-### Example Output:
+
+### Troubleshooting
+
+On **Windows**, the plugin uses a WMI call that is can be replicated with the
+following command:
+```
+wmic /namespace:\\root\wmi PATH MSAcpi_ThermalZoneTemperature
+```
+
+### Example Output
```
temp,sensor=coretemp_physicalid0_crit temp=100 1531298763000000000
diff --git a/plugins/inputs/tengine/tengine.go b/plugins/inputs/tengine/tengine.go
index 1ee63740fa51e..c45ae81d10b60 100644
--- a/plugins/inputs/tengine/tengine.go
+++ b/plugins/inputs/tengine/tengine.go
@@ -15,7 +15,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -101,7 +101,7 @@ func (n *Tengine) createHttpClient() (*http.Client, error) {
return client, nil
}
-type TengineSatus struct {
+type TengineStatus struct {
host string
bytes_in uint64
bytes_out uint64
@@ -135,7 +135,7 @@ type TengineSatus struct {
}
func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
- var tenginestatus TengineSatus
+ var tenginestatus TengineStatus
resp, err := n.client.Get(addr.String())
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
diff --git a/plugins/inputs/tomcat/tomcat.go b/plugins/inputs/tomcat/tomcat.go
index 40ae7de816658..d32b0168a3d05 100644
--- a/plugins/inputs/tomcat/tomcat.go
+++ b/plugins/inputs/tomcat/tomcat.go
@@ -10,7 +10,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
diff --git a/plugins/inputs/udp_listener/README.md b/plugins/inputs/udp_listener/README.md
index 6228090b67be2..921e8dbf1eac3 100644
--- a/plugins/inputs/udp_listener/README.md
+++ b/plugins/inputs/udp_listener/README.md
@@ -1,4 +1,4 @@
-# UDP listener service input plugin
+# UDP Listener Input Plugin
> DEPRECATED: As of version 1.3 the UDP listener plugin has been deprecated in favor of the
> [socket_listener plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener)
diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go
index d0a728b3c8484..7fa59fdb121bc 100644
--- a/plugins/inputs/udp_listener/udp_listener.go
+++ b/plugins/inputs/udp_listener/udp_listener.go
@@ -53,17 +53,19 @@ type UdpListener struct {
PacketsRecv selfstat.Stat
BytesRecv selfstat.Stat
+
+ Log telegraf.Logger
}
// UDP_MAX_PACKET_SIZE is packet limit, see
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
const UDP_MAX_PACKET_SIZE int = 64 * 1024
-var dropwarn = "E! Error: udp_listener message queue full. " +
+var dropwarn = "udp_listener message queue full. " +
"We have dropped %d messages so far. " +
- "You may want to increase allowed_pending_messages in the config\n"
+ "You may want to increase allowed_pending_messages in the config"
-var malformedwarn = "E! udp_listener has received %d malformed packets" +
+var malformedwarn = "udp_listener has received %d malformed packets" +
" thus far."
const sampleConfig = `
@@ -113,7 +115,7 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error {
u.wg.Add(1)
go u.udpParser()
- log.Printf("I! Started UDP listener service on %s (ReadBuffer: %d)\n", u.ServiceAddress, u.UDPBufferSize)
+ u.Log.Infof("Started service on %q (ReadBuffer: %d)", u.ServiceAddress, u.UDPBufferSize)
return nil
}
@@ -124,7 +126,7 @@ func (u *UdpListener) Stop() {
u.wg.Wait()
u.listener.Close()
close(u.in)
- log.Println("I! Stopped UDP listener service on ", u.ServiceAddress)
+ u.Log.Infof("Stopped service on %q", u.ServiceAddress)
}
func (u *UdpListener) udpListen() error {
@@ -134,15 +136,15 @@ func (u *UdpListener) udpListen() error {
u.listener, err = net.ListenUDP("udp", address)
if err != nil {
- return fmt.Errorf("E! Error: ListenUDP - %s", err)
+ return err
}
- log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String())
+ u.Log.Infof("Server listening on %q", u.listener.LocalAddr().String())
if u.UDPBufferSize > 0 {
err = u.listener.SetReadBuffer(u.UDPBufferSize) // if we want to move away from OS default
if err != nil {
- return fmt.Errorf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err)
+ return fmt.Errorf("failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err)
}
}
@@ -166,7 +168,7 @@ func (u *UdpListener) udpListenLoop() {
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
} else {
- log.Printf("E! Error: %s\n", err.Error())
+ u.Log.Error(err.Error())
}
continue
}
@@ -180,7 +182,7 @@ func (u *UdpListener) udpListenLoop() {
default:
u.drops++
if u.drops == 1 || u.drops%u.AllowedPendingMessages == 0 {
- log.Printf(dropwarn, u.drops)
+ u.Log.Errorf(dropwarn, u.drops)
}
}
}
@@ -208,7 +210,7 @@ func (u *UdpListener) udpParser() error {
} else {
u.malformed++
if u.malformed == 1 || u.malformed%1000 == 0 {
- log.Printf(malformedwarn, u.malformed)
+ u.Log.Errorf(malformedwarn, u.malformed)
}
}
}
diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go
index ed206f1735ecf..b241235e4d61d 100644
--- a/plugins/inputs/udp_listener/udp_listener_test.go
+++ b/plugins/inputs/udp_listener/udp_listener_test.go
@@ -8,14 +8,11 @@ import (
"log"
"net"
"os"
- "runtime"
"strings"
"testing"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
-
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -34,6 +31,7 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
func newTestUdpListener() (*UdpListener, chan []byte) {
in := make(chan []byte, 1500)
listener := &UdpListener{
+ Log: testutil.Logger{},
ServiceAddress: ":8125",
AllowedPendingMessages: 10000,
in: in,
@@ -42,45 +40,46 @@ func newTestUdpListener() (*UdpListener, chan []byte) {
return listener, in
}
-func TestHighTrafficUDP(t *testing.T) {
- listener := UdpListener{
- ServiceAddress: ":8126",
- AllowedPendingMessages: 100000,
- }
- var err error
- listener.parser, err = parsers.NewInfluxParser()
- require.NoError(t, err)
- acc := &testutil.Accumulator{}
-
- // send multiple messages to socket
- err = listener.Start(acc)
- require.NoError(t, err)
-
- conn, err := net.Dial("udp", "127.0.0.1:8126")
- require.NoError(t, err)
- mlen := int64(len(testMsgs))
- var sent int64
- for i := 0; i < 20000; i++ {
- for sent > listener.BytesRecv.Get()+32000 {
- // more than 32kb sitting in OS buffer, let it drain
- runtime.Gosched()
- }
- conn.Write([]byte(testMsgs))
- sent += mlen
- }
- for sent > listener.BytesRecv.Get() {
- runtime.Gosched()
- }
- for len(listener.in) > 0 {
- runtime.Gosched()
- }
- listener.Stop()
-
- assert.Equal(t, uint64(100000), acc.NMetrics())
-}
+// func TestHighTrafficUDP(t *testing.T) {
+// listener := UdpListener{
+// ServiceAddress: ":8126",
+// AllowedPendingMessages: 100000,
+// }
+// var err error
+// listener.parser, err = parsers.NewInfluxParser()
+// require.NoError(t, err)
+// acc := &testutil.Accumulator{}
+
+// // send multiple messages to socket
+// err = listener.Start(acc)
+// require.NoError(t, err)
+
+// conn, err := net.Dial("udp", "127.0.0.1:8126")
+// require.NoError(t, err)
+// mlen := int64(len(testMsgs))
+// var sent int64
+// for i := 0; i < 20000; i++ {
+// for sent > listener.BytesRecv.Get()+32000 {
+// // more than 32kb sitting in OS buffer, let it drain
+// runtime.Gosched()
+// }
+// conn.Write([]byte(testMsgs))
+// sent += mlen
+// }
+// for sent > listener.BytesRecv.Get() {
+// runtime.Gosched()
+// }
+// for len(listener.in) > 0 {
+// runtime.Gosched()
+// }
+// listener.Stop()
+
+// assert.Equal(t, uint64(100000), acc.NMetrics())
+// }
func TestConnectUDP(t *testing.T) {
listener := UdpListener{
+ Log: testutil.Logger{},
ServiceAddress: ":8127",
AllowedPendingMessages: 10000,
}
diff --git a/plugins/inputs/unbound/README.md b/plugins/inputs/unbound/README.md
index 36c9aa47de850..1ccd183bc643c 100644
--- a/plugins/inputs/unbound/README.md
+++ b/plugins/inputs/unbound/README.md
@@ -18,7 +18,10 @@ a validating, recursive, and caching DNS resolver.
## The default location of the unbound-control binary can be overridden with:
# binary = "/usr/sbin/unbound-control"
- ## The default timeout of 1s can be overriden with:
+ ## The default location of the unbound config file can be overridden with:
+ # config_file = "/etc/unbound/unbound.conf"
+
+ ## The default timeout of 1s can be overridden with:
# timeout = "1s"
## When set to true, thread metrics are tagged with the thread id.
diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go
index 02067c739c572..bb4ecde5860dd 100644
--- a/plugins/inputs/unbound/unbound.go
+++ b/plugins/inputs/unbound/unbound.go
@@ -17,7 +17,7 @@ import (
"github.com/influxdata/telegraf/plugins/inputs"
)
-type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool) (*bytes.Buffer, error)
+type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error)
// Unbound is used to store configuration values
type Unbound struct {
@@ -26,6 +26,7 @@ type Unbound struct {
UseSudo bool
Server string
ThreadAsTag bool
+ ConfigFile string
filter filter.Filter
run runner
@@ -45,7 +46,10 @@ var sampleConfig = `
## The default location of the unbound-control binary can be overridden with:
# binary = "/usr/sbin/unbound-control"
- ## The default timeout of 1s can be overriden with:
+ ## The default location of the unbound config file can be overridden with:
+ # config_file = "/etc/unbound/unbound.conf"
+
+ ## The default timeout of 1s can be overridden with:
# timeout = "1s"
## When set to true, thread metrics are tagged with the thread id.
@@ -67,7 +71,7 @@ func (s *Unbound) SampleConfig() string {
}
// Shell out to unbound_stat and return the output
-func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool) (*bytes.Buffer, error) {
+func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) {
cmdArgs := []string{"stats_noreset"}
if Server != "" {
@@ -96,6 +100,10 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv
cmdArgs = append([]string{"-s", server}, cmdArgs...)
}
+ if ConfigFile != "" {
+ cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...)
+ }
+
cmd := exec.Command(cmdName, cmdArgs...)
if UseSudo {
@@ -118,14 +126,14 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv
// All the dots in stat name will replaced by underscores. Histogram statistics will not be collected.
func (s *Unbound) Gather(acc telegraf.Accumulator) error {
- // Always exclude histrogram statistics
+ // Always exclude histogram statistics
statExcluded := []string{"histogram.*"}
filterExcluded, err := filter.Compile(statExcluded)
if err != nil {
return err
}
- out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ThreadAsTag)
+ out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ThreadAsTag, s.ConfigFile)
if err != nil {
return fmt.Errorf("error gathering metrics: %s", err)
}
@@ -207,6 +215,7 @@ func init() {
UseSudo: false,
Server: "",
ThreadAsTag: false,
+ ConfigFile: "",
}
})
}
diff --git a/plugins/inputs/unbound/unbound_test.go b/plugins/inputs/unbound/unbound_test.go
index b1d6206c39900..cc4b99daecc59 100644
--- a/plugins/inputs/unbound/unbound_test.go
+++ b/plugins/inputs/unbound/unbound_test.go
@@ -12,8 +12,8 @@ import (
var TestTimeout = internal.Duration{Duration: time.Second}
-func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Server string, ThreadAsTag bool) func(string, internal.Duration, bool, string, bool) (*bytes.Buffer, error) {
- return func(string, internal.Duration, bool, string, bool) (*bytes.Buffer, error) {
+func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Server string, ThreadAsTag bool, ConfigFile string) func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) {
+ return func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) {
return bytes.NewBuffer([]byte(output)), nil
}
}
@@ -21,7 +21,7 @@ func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Serv
func TestParseFullOutput(t *testing.T) {
acc := &testutil.Accumulator{}
v := &Unbound{
- run: UnboundControl(fullOutput, TestTimeout, true, "", false),
+ run: UnboundControl(fullOutput, TestTimeout, true, "", false, ""),
}
err := v.Gather(acc)
@@ -38,7 +38,7 @@ func TestParseFullOutput(t *testing.T) {
func TestParseFullOutputThreadAsTag(t *testing.T) {
acc := &testutil.Accumulator{}
v := &Unbound{
- run: UnboundControl(fullOutput, TestTimeout, true, "", true),
+ run: UnboundControl(fullOutput, TestTimeout, true, "", true, ""),
ThreadAsTag: true,
}
err := v.Gather(acc)
diff --git a/plugins/inputs/uwsgi/README.md b/plugins/inputs/uwsgi/README.md
new file mode 100644
index 0000000000000..9a6d42764e3ef
--- /dev/null
+++ b/plugins/inputs/uwsgi/README.md
@@ -0,0 +1,92 @@
+# uWSGI Input Plugin
+
+The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html).
+
+### Configuration
+
+```toml
+[[inputs.uwsgi]]
+ ## List with urls of uWSGI Stats servers. Url must match pattern:
+ ## scheme://address[:port]
+ ##
+ ## For example:
+ ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"]
+ servers = ["tcp://127.0.0.1:1717"]
+
+ ## General connection timeout
+ # timeout = "5s"
+```
+
+
+### Metrics:
+
+ - uwsgi_overview
+ - tags:
+ - source
+ - uid
+ - gid
+ - version
+ - fields:
+ - listen_queue
+ - listen_queue_errors
+ - signal_queue
+ - load
+ - pid
+
++ uwsgi_workers
+ - tags:
+ - worker_id
+ - source
+ - fields:
+ - requests
+ - accepting
+ - delta_request
+ - exceptions
+ - harakiri_count
+ - pid
+ - signals
+ - signal_queue
+ - status
+ - rss
+ - vsz
+ - running_time
+ - last_spawn
+ - respawn_count
+ - tx
+ - avg_rt
+
+- uwsgi_apps
+ - tags:
+ - app_id
+ - worker_id
+ - source
+ - fields:
+ - modifier1
+ - requests
+ - startup_time
+ - exceptions
+
++ uwsgi_cores
+ - tags:
+ - core_id
+ - worker_id
+ - source
+ - fields:
+ - requests
+ - static_requests
+ - routed_requests
+ - offloaded_requests
+ - write_errors
+ - read_errors
+ - in_request
+
+
+### Example Output:
+
+```
+uwsgi_overview,gid=0,uid=0,source=172.17.0.2,version=2.0.18 listen_queue=0i,listen_queue_errors=0i,load=0i,pid=1i,signal_queue=0i 1564441407000000000
+uwsgi_workers,source=172.17.0.2,worker_id=1 accepting=1i,avg_rt=0i,delta_request=0i,exceptions=0i,harakiri_count=0i,last_spawn=1564441202i,pid=6i,requests=0i,respawn_count=1i,rss=0i,running_time=0i,signal_queue=0i,signals=0i,status="idle",tx=0i,vsz=0i 1564441407000000000
+uwsgi_apps,app_id=0,worker_id=1,source=172.17.0.2 exceptions=0i,modifier1=0i,requests=0i,startup_time=0i 1564441407000000000
+uwsgi_cores,core_id=0,worker_id=1,source=172.17.0.2 in_request=0i,offloaded_requests=0i,read_errors=0i,requests=0i,routed_requests=0i,static_requests=0i,write_errors=0i 1564441407000000000
+```
+
diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go
new file mode 100644
index 0000000000000..b13a7b3e6c5d3
--- /dev/null
+++ b/plugins/inputs/uwsgi/uwsgi.go
@@ -0,0 +1,295 @@
+// Package uwsgi implements a telegraf plugin for collecting uwsgi stats from
+// the uwsgi stats server.
+package uwsgi
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// Uwsgi server struct
+type Uwsgi struct {
+ Servers []string `toml:"servers"`
+ Timeout internal.Duration `toml:"timeout"`
+
+ client *http.Client
+}
+
+// Description returns the plugin description
+func (u *Uwsgi) Description() string {
+ return "Read uWSGI metrics."
+}
+
+// SampleConfig returns the sample configuration
+func (u *Uwsgi) SampleConfig() string {
+ return `
+ ## List with urls of uWSGI Stats servers. URL must match pattern:
+ ## scheme://address[:port]
+ ##
+ ## For example:
+ ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"]
+ servers = ["tcp://127.0.0.1:1717"]
+
+ ## General connection timeout
+ # timeout = "5s"
+`
+}
+
+// Gather collect data from uWSGI Server
+func (u *Uwsgi) Gather(acc telegraf.Accumulator) error {
+ if u.client == nil {
+ u.client = &http.Client{
+ Timeout: u.Timeout.Duration,
+ }
+ }
+ wg := &sync.WaitGroup{}
+
+ for _, s := range u.Servers {
+ wg.Add(1)
+ go func(s string) {
+ defer wg.Done()
+ n, err := url.Parse(s)
+ if err != nil {
+ acc.AddError(fmt.Errorf("could not parse uWSGI Stats Server url '%s': %s", s, err.Error()))
+ return
+ }
+
+ if err := u.gatherServer(acc, n); err != nil {
+ acc.AddError(err)
+ return
+ }
+ }(s)
+ }
+
+ wg.Wait()
+
+ return nil
+}
+
+func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, url *url.URL) error {
+ var err error
+ var r io.ReadCloser
+ var s StatsServer
+
+ switch url.Scheme {
+ case "tcp":
+ r, err = net.DialTimeout(url.Scheme, url.Host, u.Timeout.Duration)
+ if err != nil {
+ return err
+ }
+ s.source = url.Host
+ case "unix":
+ r, err = net.DialTimeout(url.Scheme, url.Path, u.Timeout.Duration)
+ if err != nil {
+ return err
+ }
+ s.source, err = os.Hostname()
+ if err != nil {
+ s.source = ""
+ }
+ case "http":
+ resp, err := u.client.Get(url.String())
+ if err != nil {
+ return err
+ }
+ r = resp.Body
+ s.source = url.Host
+ default:
+ return fmt.Errorf("'%s' is not a supported scheme", url.Scheme)
+ }
+
+ defer r.Close()
+
+ if err := json.NewDecoder(r).Decode(&s); err != nil {
+ return fmt.Errorf("failed to decode json payload from '%s': %s", url.String(), err.Error())
+ }
+
+ u.gatherStatServer(acc, &s)
+
+ return err
+}
+
+func (u *Uwsgi) gatherStatServer(acc telegraf.Accumulator, s *StatsServer) {
+ fields := map[string]interface{}{
+ "listen_queue": s.ListenQueue,
+ "listen_queue_errors": s.ListenQueueErrors,
+ "signal_queue": s.SignalQueue,
+ "load": s.Load,
+ "pid": s.PID,
+ }
+
+ tags := map[string]string{
+ "source": s.source,
+ "uid": strconv.Itoa(s.UID),
+ "gid": strconv.Itoa(s.GID),
+ "version": s.Version,
+ }
+ acc.AddFields("uwsgi_overview", fields, tags)
+
+ u.gatherWorkers(acc, s)
+ u.gatherApps(acc, s)
+ u.gatherCores(acc, s)
+}
+
+func (u *Uwsgi) gatherWorkers(acc telegraf.Accumulator, s *StatsServer) {
+ for _, w := range s.Workers {
+ fields := map[string]interface{}{
+ "requests": w.Requests,
+ "accepting": w.Accepting,
+ "delta_request": w.DeltaRequests,
+ "exceptions": w.Exceptions,
+ "harakiri_count": w.HarakiriCount,
+ "pid": w.PID,
+ "signals": w.Signals,
+ "signal_queue": w.SignalQueue,
+ "status": w.Status,
+ "rss": w.Rss,
+ "vsz": w.Vsz,
+ "running_time": w.RunningTime,
+ "last_spawn": w.LastSpawn,
+ "respawn_count": w.RespawnCount,
+ "tx": w.Tx,
+ "avg_rt": w.AvgRt,
+ }
+ tags := map[string]string{
+ "worker_id": strconv.Itoa(w.WorkerID),
+ "source": s.source,
+ }
+
+ acc.AddFields("uwsgi_workers", fields, tags)
+ }
+}
+
+func (u *Uwsgi) gatherApps(acc telegraf.Accumulator, s *StatsServer) {
+ for _, w := range s.Workers {
+ for _, a := range w.Apps {
+ fields := map[string]interface{}{
+ "modifier1": a.Modifier1,
+ "requests": a.Requests,
+ "startup_time": a.StartupTime,
+ "exceptions": a.Exceptions,
+ }
+ tags := map[string]string{
+ "app_id": strconv.Itoa(a.AppID),
+ "worker_id": strconv.Itoa(w.WorkerID),
+ "source": s.source,
+ }
+ acc.AddFields("uwsgi_apps", fields, tags)
+ }
+ }
+}
+
+func (u *Uwsgi) gatherCores(acc telegraf.Accumulator, s *StatsServer) {
+ for _, w := range s.Workers {
+ for _, c := range w.Cores {
+ fields := map[string]interface{}{
+ "requests": c.Requests,
+ "static_requests": c.StaticRequests,
+ "routed_requests": c.RoutedRequests,
+ "offloaded_requests": c.OffloadedRequests,
+ "write_errors": c.WriteErrors,
+ "read_errors": c.ReadErrors,
+ "in_request": c.InRequest,
+ }
+ tags := map[string]string{
+ "core_id": strconv.Itoa(c.CoreID),
+ "worker_id": strconv.Itoa(w.WorkerID),
+ "source": s.source,
+ }
+ acc.AddFields("uwsgi_cores", fields, tags)
+ }
+
+ }
+}
+
+func init() {
+ inputs.Add("uwsgi", func() telegraf.Input {
+ return &Uwsgi{
+ Timeout: internal.Duration{Duration: 5 * time.Second},
+ }
+ })
+}
+
+// StatsServer defines the stats server structure.
+type StatsServer struct {
+ // Tags
+ source string
+ PID int `json:"pid"`
+ UID int `json:"uid"`
+ GID int `json:"gid"`
+ Version string `json:"version"`
+
+ // Fields
+ ListenQueue int `json:"listen_queue"`
+ ListenQueueErrors int `json:"listen_queue_errors"`
+ SignalQueue int `json:"signal_queue"`
+ Load int `json:"load"`
+
+ Workers []*Worker `json:"workers"`
+}
+
+// Worker defines the worker metric structure.
+type Worker struct {
+ // Tags
+ WorkerID int `json:"id"`
+ PID int `json:"pid"`
+
+ // Fields
+ Accepting int `json:"accepting"`
+ Requests int `json:"requests"`
+ DeltaRequests int `json:"delta_requests"`
+ Exceptions int `json:"exceptions"`
+ HarakiriCount int `json:"harakiri_count"`
+ Signals int `json:"signals"`
+ SignalQueue int `json:"signal_queue"`
+ Status string `json:"status"`
+ Rss int `json:"rss"`
+ Vsz int `json:"vsz"`
+ RunningTime int `json:"running_time"`
+ LastSpawn int `json:"last_spawn"`
+ RespawnCount int `json:"respawn_count"`
+ Tx int `json:"tx"`
+ AvgRt int `json:"avg_rt"`
+
+ Apps []*App `json:"apps"`
+ Cores []*Core `json:"cores"`
+}
+
+// App defines the app metric structure.
+type App struct {
+ // Tags
+ AppID int `json:"id"`
+
+ // Fields
+ Modifier1 int `json:"modifier1"`
+ Requests int `json:"requests"`
+ StartupTime int `json:"startup_time"`
+ Exceptions int `json:"exceptions"`
+}
+
+// Core defines the core metric structure.
+type Core struct {
+ // Tags
+ CoreID int `json:"id"`
+
+ // Fields
+ Requests int `json:"requests"`
+ StaticRequests int `json:"static_requests"`
+ RoutedRequests int `json:"routed_requests"`
+ OffloadedRequests int `json:"offloaded_requests"`
+ WriteErrors int `json:"write_errors"`
+ ReadErrors int `json:"read_errors"`
+ InRequest int `json:"in_request"`
+}
diff --git a/plugins/inputs/uwsgi/uwsgi_test.go b/plugins/inputs/uwsgi/uwsgi_test.go
new file mode 100644
index 0000000000000..34581791e022f
--- /dev/null
+++ b/plugins/inputs/uwsgi/uwsgi_test.go
@@ -0,0 +1,185 @@
+package uwsgi_test
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/influxdata/telegraf/plugins/inputs/uwsgi"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBasic(t *testing.T) {
+ js := `
+{
+ "version":"2.0.12",
+ "listen_queue":0,
+ "listen_queue_errors":0,
+ "signal_queue":0,
+ "load":0,
+ "pid":28372,
+ "uid":1000,
+ "gid":1000,
+ "cwd":"/opt/uwsgi",
+ "locks":[
+ {
+ "user 0":0
+ },
+ {
+ "signal":0
+ },
+ {
+ "filemon":0
+ },
+ {
+ "timer":0
+ },
+ {
+ "rbtimer":0
+ },
+ {
+ "cron":0
+ },
+ {
+ "rpc":0
+ },
+ {
+ "snmp":0
+ }
+ ],
+ "sockets":[
+ {
+ "name":"127.0.0.1:47430",
+ "proto":"uwsgi",
+ "queue":0,
+ "max_queue":100,
+ "shared":0,
+ "can_offload":0
+ }
+ ],
+ "workers":[
+ {
+ "id":1,
+ "pid":28375,
+ "accepting":1,
+ "requests":0,
+ "delta_requests":0,
+ "exceptions":0,
+ "harakiri_count":0,
+ "signals":0,
+ "signal_queue":0,
+ "status":"idle",
+ "rss":0,
+ "vsz":0,
+ "running_time":0,
+ "last_spawn":1459942782,
+ "respawn_count":1,
+ "tx":0,
+ "avg_rt":0,
+ "apps":[
+ {
+ "id":0,
+ "modifier1":0,
+ "mountpoint":"",
+ "startup_time":0,
+ "requests":0,
+ "exceptions":0,
+ "chdir":""
+ }
+ ],
+ "cores":[
+ {
+ "id":0,
+ "requests":0,
+ "static_requests":0,
+ "routed_requests":0,
+ "offloaded_requests":0,
+ "write_errors":0,
+ "read_errors":0,
+ "in_request":0,
+ "vars":[
+
+ ]
+ }
+ ]
+ }
+ ]
+}
+`
+
+ fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/" {
+ _, _ = w.Write([]byte(js))
+ } else {
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ defer fakeServer.Close()
+
+ plugin := &uwsgi.Uwsgi{
+ Servers: []string{fakeServer.URL + "/"},
+ }
+ var acc testutil.Accumulator
+ plugin.Gather(&acc)
+ require.Equal(t, 0, len(acc.Errors))
+}
+
+func TestInvalidJSON(t *testing.T) {
+ js := `
+{
+ "version":"2.0.12",
+ "listen_queue":0,
+ "listen_queue_errors":0,
+ "signal_queue":0,
+ "load":0,
+ "pid:28372
+ "uid":10
+}
+`
+
+ fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/" {
+ _, _ = w.Write([]byte(js))
+ } else {
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ defer fakeServer.Close()
+
+ plugin := &uwsgi.Uwsgi{
+ Servers: []string{fakeServer.URL + "/"},
+ }
+ var acc testutil.Accumulator
+ plugin.Gather(&acc)
+ require.Equal(t, 1, len(acc.Errors))
+}
+
+func TestHttpError(t *testing.T) {
+ plugin := &uwsgi.Uwsgi{
+ Servers: []string{"http://novalidurladress/"},
+ }
+ var acc testutil.Accumulator
+ plugin.Gather(&acc)
+ require.Equal(t, 1, len(acc.Errors))
+}
+
+func TestTcpError(t *testing.T) {
+ plugin := &uwsgi.Uwsgi{
+ Servers: []string{"tcp://novalidtcpadress/"},
+ }
+ var acc testutil.Accumulator
+ plugin.Gather(&acc)
+ require.Equal(t, 1, len(acc.Errors))
+}
+
+func TestUnixSocketError(t *testing.T) {
+ plugin := &uwsgi.Uwsgi{
+ Servers: []string{"unix:///novalidunixsocket"},
+ }
+ var acc testutil.Accumulator
+ plugin.Gather(&acc)
+ require.Equal(t, 1, len(acc.Errors))
+}
diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md
index 3609b12e7059a..2db1498040f25 100644
--- a/plugins/inputs/varnish/README.md
+++ b/plugins/inputs/varnish/README.md
@@ -19,7 +19,7 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/)
stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
## Optional name for the varnish instance (or working directory) to query
- ## Usually appened after -n in varnish cli
+ ## Usually append after -n in varnish cli
# instance_name = instanceName
## Timeout for varnishstat command
@@ -92,7 +92,7 @@ MEMPOOL, etc). In the output, the prefix will be used as a tag, and removed from
- MAIN.s_pipe (uint64, count, Total pipe sessions)
- MAIN.s_pass (uint64, count, Total pass- ed requests)
- MAIN.s_fetch (uint64, count, Total backend fetches)
- - MAIN.s_synth (uint64, count, Total synthethic responses)
+ - MAIN.s_synth (uint64, count, Total synthetic responses)
- MAIN.s_req_hdrbytes (uint64, count, Request header bytes)
- MAIN.s_req_bodybytes (uint64, count, Request body bytes)
- MAIN.s_resp_hdrbytes (uint64, count, Response header bytes)
diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go
index 3a18deb6cfd18..893f00c0a8cdd 100644
--- a/plugins/inputs/varnish/varnish.go
+++ b/plugins/inputs/varnish/varnish.go
@@ -49,7 +49,7 @@ var sampleConfig = `
stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
## Optional name for the varnish instance (or working directory) to query
- ## Usually appened after -n in varnish cli
+ ## Usually append after -n in varnish cli
# instance_name = instanceName
## Timeout for varnishstat command
diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go
index e8ca94e3c7ade..96e5c35562208 100644
--- a/plugins/inputs/varnish/varnish_test.go
+++ b/plugins/inputs/varnish/varnish_test.go
@@ -192,7 +192,7 @@ MAIN.s_req 0 0.00 Total requests seen
MAIN.s_pipe 0 0.00 Total pipe sessions seen
MAIN.s_pass 0 0.00 Total pass-ed requests seen
MAIN.s_fetch 0 0.00 Total backend fetches initiated
-MAIN.s_synth 0 0.00 Total synthethic responses made
+MAIN.s_synth 0 0.00 Total synthetic responses made
MAIN.s_req_hdrbytes 0 0.00 Request header bytes
MAIN.s_req_bodybytes 0 0.00 Request body bytes
MAIN.s_resp_hdrbytes 0 0.00 Response header bytes
diff --git a/plugins/inputs/vsphere/METRICS.md b/plugins/inputs/vsphere/METRICS.md
index 0b9e0482fd8f8..d1a34bb26c4f9 100644
--- a/plugins/inputs/vsphere/METRICS.md
+++ b/plugins/inputs/vsphere/METRICS.md
@@ -4,6 +4,8 @@ and the set of available metrics may vary depending hardware, as well as what pl
are installed. Therefore, providing a definitive list of available metrics is difficult. The metrics listed
below are the most commonly available as of vSphere 6.5.
+For a complete list of metrics available from vSphere and the units they measure in, please reference the [VMWare vCenter Converter API Reference](https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.PerformanceManager.html).
+
To list the exact set in your environment, please use the govc tool available [here](https://github.com/vmware/govmomi/tree/master/govc)
To obtain the set of metrics for e.g. a VM, you may use the following command:
@@ -284,4 +286,4 @@ disk.capacity.latest
disk.capacity.contention.average
disk.capacity.provisioned.average
disk.capacity.usage.average
-```
\ No newline at end of file
+```
diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md
index ae7cdc37b05fe..108637bab05d7 100644
--- a/plugins/inputs/vsphere/README.md
+++ b/plugins/inputs/vsphere/README.md
@@ -7,6 +7,9 @@ The VMware vSphere plugin uses the vSphere API to gather metrics from multiple v
* VMs
* Datastores
+## Supported versions of vSphere
+This plugin supports vSphere version 5.5 through 6.7.
+
## Configuration
NOTE: To disable collection of a specific resource type, simply exclude all metrics using the XX_metric_exclude.
@@ -16,7 +19,7 @@ For example, to disable collection of VMs, add this:
vm_metric_exclude = [ "*" ]
```
-```
+```toml
# Read metrics from one or many vCenters
[[inputs.vsphere]]
## List of vCenter URLs to be monitored. These three lines must be uncommented
@@ -28,6 +31,7 @@ vm_metric_exclude = [ "*" ]
## VMs
## Typical VM metrics (if omitted or empty, all metrics are collected)
# vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
+ # vm_exclude = [] # Inventory paths to exclude
vm_metric_include = [
"cpu.demand.average",
"cpu.idle.summation",
@@ -70,6 +74,7 @@ vm_metric_exclude = [ "*" ]
## Hosts
## Typical host metrics (if omitted or empty, all metrics are collected)
# host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
+ # host_exclude [] # Inventory paths to exclude
host_metric_include = [
"cpu.coreUtilization.average",
"cpu.costop.summation",
@@ -118,23 +123,30 @@ vm_metric_exclude = [ "*" ]
"storageAdapter.write.average",
"sys.uptime.latest",
]
+ ## Collect IP addresses? Valid values are "ipv4" and "ipv6"
+ # ip_addresses = ["ipv6", "ipv4" ]
+
# host_metric_exclude = [] ## Nothing excluded by default
# host_instances = true ## true by default
+
## Clusters
# cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+ # cluster_exclude = [] # Inventory paths to exclude
# cluster_metric_include = [] ## if omitted or empty, all metrics are collected
# cluster_metric_exclude = [] ## Nothing excluded by default
# cluster_instances = false ## false by default
## Datastores
- # cluster_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
+ # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
+ # datastore_exclude = [] # Inventory paths to exclude
# datastore_metric_include = [] ## if omitted or empty, all metrics are collected
# datastore_metric_exclude = [] ## Nothing excluded by default
# datastore_instances = false ## false by default
## Datacenters
# datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+ # datacenter_exclude = [] # Inventory paths to exclude
datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
# datacenter_instances = false ## false by default
@@ -143,11 +155,11 @@ vm_metric_exclude = [ "*" ]
## separator character to use for measurement and field names (default: "_")
# separator = "_"
- ## number of objects to retreive per query for realtime resources (vms and hosts)
+ ## number of objects to retrieve per query for realtime resources (vms and hosts)
## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# max_query_objects = 256
- ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
+ ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores)
## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# max_query_metrics = 256
@@ -155,11 +167,6 @@ vm_metric_exclude = [ "*" ]
# collect_concurrency = 1
# discover_concurrency = 1
- ## whether or not to force discovery of new objects on initial gather call before collecting metrics
- ## when true for large environments this may cause errors for time elapsed while collecting metrics
- ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
- # force_discover_on_init = false
-
## the interval before (re)discovering objects subject to metrics collection (default: 300s)
# object_discovery_interval = "300s"
@@ -174,6 +181,17 @@ vm_metric_exclude = [ "*" ]
## preserve the full precision when averaging takes place.
# use_int_samples = true
+ ## Custom attributes from vCenter can be very useful for queries in order to slice the
+ ## metrics along different dimension and for forming ad-hoc relationships. They are disabled
+ ## by default, since they can add a considerable amount of tags to the resulting metrics. To
+ ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+ ## to select the attributes you want to include.
+ ## By default, since they can add a considerable amount of tags to the resulting metrics. To
+ ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+ ## to select the attributes you want to include.
+ # custom_attribute_include = []
+ # custom_attribute_exclude = ["*"]
+
## Optional SSL Config
# ssl_ca = "/path/to/cafile"
# ssl_cert = "/path/to/certfile"
@@ -190,7 +208,7 @@ A vCenter administrator can change this setting, see this [VMware KB article](ht
Any modification should be reflected in this plugin by modifying the parameter `max_query_objects`
```
- ## number of objects to retreive per query for realtime resources (vms and hosts)
+ ## number of objects to retrieve per query for realtime resources (vms and hosts)
## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# max_query_objects = 256
```
@@ -257,18 +275,18 @@ We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop
vCenter keeps two different kinds of metrics, known as realtime and historical metrics.
-* Realtime metrics: Avaialable at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter.
+* Realtime metrics: Available at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter.
* Historical metrics: Available at a 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the 5 minute rollup. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**.
For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html
-This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collecition interval. This will cause error messages similar to this to appear in the Telegraf logs:
+This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collection interval. This will cause error messages similar to this to appear in the Telegraf logs:
```2019-01-16T13:41:10Z W! [agent] input "inputs.vsphere" did not complete within its interval```
This will disrupt the metric collection and can result in missed samples. The best practice workaround is to specify two instances of the vSphere plugin, one for the realtime metrics with a short collection interval and one for the historical metrics with a longer interval. You can use the ```*_metric_exclude``` to turn off the resources you don't want to collect metrics for in each instance. For example:
-```
+```toml
## Realtime instance
[[inputs.vsphere]]
interval = "60s"
diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go
index ca7af5843050e..b3096f7be300b 100644
--- a/plugins/inputs/vsphere/client.go
+++ b/plugins/inputs/vsphere/client.go
@@ -4,13 +4,13 @@ import (
"context"
"crypto/tls"
"fmt"
- "log"
"net/url"
"strconv"
"strings"
"sync"
"time"
+ "github.com/influxdata/telegraf"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/performance"
@@ -36,7 +36,7 @@ type ClientFactory struct {
parent *VSphere
}
-// Client represents a connection to vSphere and is backed by a govmoni connection
+// Client represents a connection to vSphere and is backed by a govmomi connection
type Client struct {
Client *govmomi.Client
Views *view.Manager
@@ -45,6 +45,7 @@ type Client struct {
Valid bool
Timeout time.Duration
closeGate sync.Once
+ log telegraf.Logger
}
// NewClientFactory creates a new ClientFactory and prepares it for use.
@@ -61,28 +62,39 @@ func NewClientFactory(ctx context.Context, url *url.URL, parent *VSphere) *Clien
func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) {
cf.mux.Lock()
defer cf.mux.Unlock()
- if cf.client == nil {
- var err error
- if cf.client, err = NewClient(ctx, cf.url, cf.parent); err != nil {
- return nil, err
+ retrying := false
+ for {
+ if cf.client == nil {
+ var err error
+ if cf.client, err = NewClient(ctx, cf.url, cf.parent); err != nil {
+ return nil, err
+ }
}
- }
- // Execute a dummy call against the server to make sure the client is
- // still functional. If not, try to log back in. If that doesn't work,
- // we give up.
- ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration)
- defer cancel1()
- if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil {
- log.Printf("I! [inputs.vsphere]: Client session seems to have time out. Reauthenticating!")
- ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration)
- defer cancel2()
- if cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)) != nil {
- return nil, fmt.Errorf("Renewing authentication failed: %v", err)
+ // Execute a dummy call against the server to make sure the client is
+ // still functional. If not, try to log back in. If that doesn't work,
+ // we give up.
+ ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration)
+ defer cancel1()
+ if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil {
+ cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!")
+ ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration)
+ defer cancel2()
+ if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil {
+ if !retrying {
+ // The client went stale. Probably because someone rebooted vCenter. Clear it to
+ // force us to create a fresh one. We only get one chance at this. If we fail a second time
+ // we will simply skip this collection round and hope things have stabilized for the next one.
+ retrying = true
+ cf.client = nil
+ continue
+ }
+ return nil, fmt.Errorf("renewing authentication failed: %s", err.Error())
+ }
}
- }
- return cf.client, nil
+ return cf.client, nil
+ }
}
// NewClient creates a new vSphere client based on the url and setting passed as parameters.
@@ -102,7 +114,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) {
u.User = url.UserPassword(vs.Username, vs.Password)
}
- log.Printf("D! [inputs.vsphere]: Creating client: %s", u.Host)
+ vs.Log.Debugf("Creating client: %s", u.Host)
soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify)
// Add certificate if we have it. Use it to log us in.
@@ -159,6 +171,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) {
p := performance.NewManager(c.Client)
client := &Client{
+ log: vs.Log,
Client: c,
Views: m,
Root: v,
@@ -173,9 +186,9 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) {
if err != nil {
return nil, err
}
- log.Printf("D! [inputs.vsphere] vCenter says max_query_metrics should be %d", n)
+ vs.Log.Debugf("vCenter says max_query_metrics should be %d", n)
if n < vs.MaxQueryMetrics {
- log.Printf("W! [inputs.vsphere] Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n)
+ vs.Log.Warnf("Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n)
vs.MaxQueryMetrics = n
}
return client, nil
@@ -191,7 +204,6 @@ func (cf *ClientFactory) Close() {
}
func (c *Client) close() {
-
// Use a Once to prevent us from panics stemming from trying
// to close it multiple times.
c.closeGate.Do(func() {
@@ -199,7 +211,7 @@ func (c *Client) close() {
defer cancel()
if c.Client != nil {
if err := c.Client.Logout(ctx); err != nil {
- log.Printf("E! [inputs.vsphere]: Error during logout: %s", err)
+ c.log.Errorf("Logout: %s", err.Error())
}
}
})
@@ -228,7 +240,7 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
if s, ok := res[0].GetOptionValue().Value.(string); ok {
v, err := strconv.Atoi(s)
if err == nil {
- log.Printf("D! [inputs.vsphere] vCenter maxQueryMetrics is defined: %d", v)
+ c.log.Debugf("vCenter maxQueryMetrics is defined: %d", v)
if v == -1 {
// Whatever the server says, we never ask for more metrics than this.
return absoluteMaxMetrics, nil
@@ -239,17 +251,17 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
// Fall through version-based inference if value isn't usable
}
} else {
- log.Println("D! [inputs.vsphere] Option query for maxQueryMetrics failed. Using default")
+ c.log.Debug("Option query for maxQueryMetrics failed. Using default")
}
// No usable maxQueryMetrics setting. Infer based on version
ver := c.Client.Client.ServiceContent.About.Version
parts := strings.Split(ver, ".")
if len(parts) < 2 {
- log.Printf("W! [inputs.vsphere] vCenter returned an invalid version string: %s. Using default query size=64", ver)
+ c.log.Warnf("vCenter returned an invalid version string: %s. Using default query size=64", ver)
return 64, nil
}
- log.Printf("D! [inputs.vsphere] vCenter version is: %s", ver)
+ c.log.Debugf("vCenter version is: %s", ver)
major, err := strconv.Atoi(parts[0])
if err != nil {
return 0, err
@@ -294,3 +306,18 @@ func (c *Client) ListResources(ctx context.Context, root *view.ContainerView, ki
defer cancel1()
return root.Retrieve(ctx1, kind, ps, dst)
}
+
+func (c *Client) GetCustomFields(ctx context.Context) (map[int32]string, error) {
+ ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout)
+ defer cancel1()
+ cfm := object.NewCustomFieldsManager(c.Client.Client)
+ fields, err := cfm.Field(ctx1)
+ if err != nil {
+ return nil, err
+ }
+ r := make(map[int32]string)
+ for _, f := range fields {
+ r[f.Key] = f.Name
+ }
+ return r, nil
+}
diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go
index 694efb574d376..6d77cb69dddca 100644
--- a/plugins/inputs/vsphere/endpoint.go
+++ b/plugins/inputs/vsphere/endpoint.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "log"
"math"
"math/rand"
"net/url"
@@ -26,27 +25,43 @@ import (
var isolateLUN = regexp.MustCompile(".*/([^/]+)/?$")
+var isIPv4 = regexp.MustCompile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$")
+
+var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$")
+
const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics
const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics
-const maxSampleConst = 10 // Absolute maximim number of samples regardless of period
+const maxSampleConst = 10 // Absolute maximum number of samples regardless of period
const maxMetadataSamples = 100 // Number of resources to sample for metric metadata
+const hwMarkTTL = time.Duration(4 * time.Hour)
+
+type queryChunk []types.PerfQuerySpec
+
+type queryJob func(queryChunk)
+
// Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower
// level Client type.
type Endpoint struct {
- Parent *VSphere
- URL *url.URL
- resourceKinds map[string]*resourceKind
- hwMarks *TSCache
- lun2ds map[string]string
- discoveryTicker *time.Ticker
- collectMux sync.RWMutex
- initialized bool
- clientFactory *ClientFactory
- busy sync.Mutex
+ Parent *VSphere
+ URL *url.URL
+ resourceKinds map[string]*resourceKind
+ hwMarks *TSCache
+ lun2ds map[string]string
+ discoveryTicker *time.Ticker
+ collectMux sync.RWMutex
+ initialized bool
+ clientFactory *ClientFactory
+ busy sync.Mutex
+ customFields map[int32]string
+ customAttrFilter filter.Filter
+ customAttrEnabled bool
+ metricNameLookup map[int32]string
+ metricNameMux sync.RWMutex
+ log telegraf.Logger
}
type resourceKind struct {
@@ -60,6 +75,7 @@ type resourceKind struct {
objects objectMap
filters filter.Filter
paths []string
+ excludePaths []string
collectInstances bool
getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error)
include []string
@@ -77,21 +93,23 @@ type metricEntry struct {
fields map[string]interface{}
}
-type objectMap map[string]objectRef
+type objectMap map[string]*objectRef
type objectRef struct {
- name string
- altID string
- ref types.ManagedObjectReference
- parentRef *types.ManagedObjectReference //Pointer because it must be nillable
- guest string
- dcname string
+ name string
+ altID string
+ ref types.ManagedObjectReference
+ parentRef *types.ManagedObjectReference //Pointer because it must be nillable
+ guest string
+ dcname string
+ customValues map[string]string
+ lookup map[string]string
}
func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) {
if pKind, ok := e.resourceKinds[res.parent]; ok {
if p, ok := pKind.objects[obj.parentRef.Value]; ok {
- return &p, true
+ return p, true
}
}
return nil, false
@@ -99,14 +117,17 @@ func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, boo
// NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed
// as parameters.
-func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, error) {
+func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegraf.Logger) (*Endpoint, error) {
e := Endpoint{
- URL: url,
- Parent: parent,
- hwMarks: NewTSCache(1 * time.Hour),
- lun2ds: make(map[string]string),
- initialized: false,
- clientFactory: NewClientFactory(ctx, url, parent),
+ URL: url,
+ Parent: parent,
+ hwMarks: NewTSCache(hwMarkTTL),
+ lun2ds: make(map[string]string),
+ initialized: false,
+ clientFactory: NewClientFactory(ctx, url, parent),
+ customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude),
+ customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude),
+ log: log,
}
e.resourceKinds = map[string]*resourceKind{
@@ -121,6 +142,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
objects: make(objectMap),
filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude),
paths: parent.DatacenterInclude,
+ excludePaths: parent.DatacenterExclude,
simple: isSimple(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude),
include: parent.DatacenterMetricInclude,
collectInstances: parent.DatacenterInstances,
@@ -138,6 +160,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
objects: make(objectMap),
filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude),
paths: parent.ClusterInclude,
+ excludePaths: parent.ClusterExclude,
simple: isSimple(parent.ClusterMetricInclude, parent.ClusterMetricExclude),
include: parent.ClusterMetricInclude,
collectInstances: parent.ClusterInstances,
@@ -155,6 +178,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
objects: make(objectMap),
filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude),
paths: parent.HostInclude,
+ excludePaths: parent.HostExclude,
simple: isSimple(parent.HostMetricInclude, parent.HostMetricExclude),
include: parent.HostMetricInclude,
collectInstances: parent.HostInstances,
@@ -172,6 +196,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
objects: make(objectMap),
filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude),
paths: parent.VMInclude,
+ excludePaths: parent.VMExclude,
simple: isSimple(parent.VMMetricInclude, parent.VMMetricExclude),
include: parent.VMMetricInclude,
collectInstances: parent.VMInstances,
@@ -188,6 +213,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
objects: make(objectMap),
filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude),
paths: parent.DatastoreInclude,
+ excludePaths: parent.DatastoreExclude,
simple: isSimple(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude),
include: parent.DatastoreMetricInclude,
collectInstances: parent.DatastoreInstances,
@@ -239,10 +265,10 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
case <-e.discoveryTicker.C:
err := e.discover(ctx)
if err != nil && err != context.Canceled {
- log.Printf("E! [inputs.vsphere]: Error in discovery for %s: %v", e.URL.Host, err)
+ e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
}
case <-ctx.Done():
- log.Printf("D! [inputs.vsphere]: Exiting discovery goroutine for %s", e.URL.Host)
+ e.log.Debugf("Exiting discovery goroutine for %s", e.URL.Host)
e.discoveryTicker.Stop()
return
}
@@ -253,51 +279,61 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
func (e *Endpoint) initalDiscovery(ctx context.Context) {
err := e.discover(ctx)
if err != nil && err != context.Canceled {
- log.Printf("E! [inputs.vsphere]: Error in discovery for %s: %v", e.URL.Host, err)
+ e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
}
e.startDiscovery(ctx)
}
func (e *Endpoint) init(ctx context.Context) error {
+ client, err := e.clientFactory.GetClient(ctx)
+ if err != nil {
+ return err
+ }
- if e.Parent.ObjectDiscoveryInterval.Duration > 0 {
-
- // Run an initial discovery. If force_discovery_on_init isn't set, we kick it off as a
- // goroutine without waiting for it. This will probably cause us to report an empty
- // dataset on the first collection, but it solves the issue of the first collection timing out.
- if e.Parent.ForceDiscoverOnInit {
- log.Printf("D! [inputs.vsphere]: Running initial discovery and waiting for it to finish")
- e.initalDiscovery(ctx)
+ // Initial load of custom field metadata
+ if e.customAttrEnabled {
+ fields, err := client.GetCustomFields(ctx)
+ if err != nil {
+ e.log.Warn("Could not load custom field metadata")
} else {
- // Otherwise, just run it in the background. We'll probably have an incomplete first metric
- // collection this way.
- go func() {
- e.initalDiscovery(ctx)
- }()
+ e.customFields = fields
}
}
+
+ if e.Parent.ObjectDiscoveryInterval.Duration > 0 {
+ e.Parent.Log.Debug("Running initial discovery")
+ e.initalDiscovery(ctx)
+ }
e.initialized = true
return nil
}
-func (e *Endpoint) getMetricNameMap(ctx context.Context) (map[int32]string, error) {
+func (e *Endpoint) getMetricNameForId(id int32) string {
+ e.metricNameMux.RLock()
+ defer e.metricNameMux.RUnlock()
+ return e.metricNameLookup[id]
+}
+
+func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error {
+ e.metricNameMux.Lock()
+ defer e.metricNameMux.Unlock()
client, err := e.clientFactory.GetClient(ctx)
if err != nil {
- return nil, err
+ return err
}
mn, err := client.CounterInfoByName(ctx)
if err != nil {
- return nil, err
+ return err
}
- names := make(map[int32]string)
+ e.metricNameLookup = make(map[int32]string)
for name, m := range mn {
- names[m.Key] = name
+ e.metricNameLookup[m.Key] = name
}
- return names, nil
+ return nil
}
-func (e *Endpoint) getMetadata(ctx context.Context, obj objectRef, sampling int32) (performance.MetricList, error) {
+func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) {
client, err := e.clientFactory.GetClient(ctx)
if err != nil {
return nil, err
@@ -312,41 +348,49 @@ func (e *Endpoint) getMetadata(ctx context.Context, obj objectRef, sampling int3
return metrics, nil
}
-func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache map[string]string, r types.ManagedObjectReference) string {
+func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache map[string]string, r types.ManagedObjectReference) (string, bool) {
+ return e.getAncestorName(ctx, client, "Datacenter", cache, r)
+}
+
+func (e *Endpoint) getAncestorName(ctx context.Context, client *Client, resourceType string, cache map[string]string, r types.ManagedObjectReference) (string, bool) {
path := make([]string, 0)
returnVal := ""
here := r
- for {
- if name, ok := cache[here.Reference().String()]; ok {
- // Populate cache for the entire chain of objects leading here.
- returnVal = name
- break
- }
- path = append(path, here.Reference().String())
- o := object.NewCommon(client.Client.Client, r)
- var result mo.ManagedEntity
- ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
- defer cancel1()
- err := o.Properties(ctx1, here, []string{"parent", "name"}, &result)
- if err != nil {
- log.Printf("W! [inputs.vsphere]: Error while resolving parent. Assuming no parent exists. Error: %s", err)
- break
- }
- if result.Reference().Type == "Datacenter" {
- // Populate cache for the entire chain of objects leading here.
- returnVal = result.Name
- break
- }
- if result.Parent == nil {
- log.Printf("D! [inputs.vsphere]: No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
- break
- }
- here = result.Parent.Reference()
+ done := false
+ for !done {
+ done = func() bool {
+ if name, ok := cache[here.Reference().String()]; ok {
+ // Populate cache for the entire chain of objects leading here.
+ returnVal = name
+ return true
+ }
+ path = append(path, here.Reference().String())
+ o := object.NewCommon(client.Client.Client, r)
+ var result mo.ManagedEntity
+ ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
+ defer cancel1()
+ err := o.Properties(ctx1, here, []string{"parent", "name"}, &result)
+ if err != nil {
+ e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error())
+ return true
+ }
+ if result.Reference().Type == resourceType {
+ // Populate cache for the entire chain of objects leading here.
+ returnVal = result.Name
+ return true
+ }
+ if result.Parent == nil {
+ e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
+ return true
+ }
+ here = result.Parent.Reference()
+ return false
+ }()
}
for _, s := range path {
cache[s] = returnVal
}
- return returnVal
+ return returnVal, returnVal != ""
}
func (e *Endpoint) discover(ctx context.Context) error {
@@ -356,7 +400,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
return ctx.Err()
}
- metricNames, err := e.getMetricNameMap(ctx)
+ err := e.reloadMetricNameMap(ctx)
if err != nil {
return err
}
@@ -368,8 +412,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
return err
}
- log.Printf("D! [inputs.vsphere]: Discover new objects for %s", e.URL.Host)
- resourceKinds := make(map[string]resourceKind)
+ e.log.Debugf("Discover new objects for %s", e.URL.Host)
dcNameCache := make(map[string]string)
numRes := int64(0)
@@ -377,17 +420,18 @@ func (e *Endpoint) discover(ctx context.Context) error {
// Populate resource objects, and endpoint instance info.
newObjects := make(map[string]objectMap)
for k, res := range e.resourceKinds {
- log.Printf("D! [inputs.vsphere] Discovering resources for %s", res.name)
+ e.log.Debugf("Discovering resources for %s", res.name)
// Need to do this for all resource types even if they are not enabled
if res.enabled || k != "vm" {
rf := ResourceFilter{
- finder: &Finder{client},
- resType: res.vcName,
- paths: res.paths}
+ finder: &Finder{client},
+ resType: res.vcName,
+ paths: res.paths,
+ excludePaths: res.excludePaths}
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
- defer cancel1()
objects, err := res.getObjects(ctx1, e, &rf)
+ cancel1()
if err != nil {
return err
}
@@ -396,7 +440,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
if res.name != "Datacenter" {
for k, obj := range objects {
if obj.parentRef != nil {
- obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef)
+ obj.dcname, _ = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef)
objects[k] = obj
}
}
@@ -407,7 +451,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
if res.simple {
e.simpleMetadataSelect(ctx, client, res)
} else {
- e.complexMetadataSelect(ctx, res, objects, metricNames)
+ e.complexMetadataSelect(ctx, res, objects)
}
}
newObjects[k] = objects
@@ -415,19 +459,32 @@ func (e *Endpoint) discover(ctx context.Context) error {
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects)))
numRes += int64(len(objects))
}
+ if err != nil {
+ e.log.Error(err)
+ }
}
// Build lun2ds map
- dss := resourceKinds["datastore"]
+ dss := newObjects["datastore"]
l2d := make(map[string]string)
- for _, ds := range dss.objects {
- url := ds.altID
- m := isolateLUN.FindStringSubmatch(url)
+ for _, ds := range dss {
+ lunId := ds.altID
+ m := isolateLUN.FindStringSubmatch(lunId)
if m != nil {
l2d[m[1]] = ds.name
}
}
+ // Load custom field metadata
+ var fields map[int32]string
+ if e.customAttrEnabled {
+ fields, err = client.GetCustomFields(ctx)
+ if err != nil {
+ e.log.Warn("Could not load custom field metadata")
+ fields = nil
+ }
+ }
+
// Atomically swap maps
e.collectMux.Lock()
defer e.collectMux.Unlock()
@@ -437,16 +494,20 @@ func (e *Endpoint) discover(ctx context.Context) error {
}
e.lun2ds = l2d
+ if fields != nil {
+ e.customFields = fields
+ }
+
sw.Stop()
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": "instance-total"}, numRes)
return nil
}
func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) {
- log.Printf("D! [inputs.vsphere] Using fast metric metadata selection for %s", res.name)
+ e.log.Debugf("Using fast metric metadata selection for %s", res.name)
m, err := client.CounterInfoByName(ctx)
if err != nil {
- log.Printf("E! [inputs.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err)
+ e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
return
}
res.metrics = make(performance.MetricList, 0, len(res.include))
@@ -462,15 +523,15 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res
}
res.metrics = append(res.metrics, cnt)
} else {
- log.Printf("W! [inputs.vsphere] Metric name %s is unknown. Will not be collected", s)
+ e.log.Warnf("Metric name %s is unknown. Will not be collected", s)
}
}
}
-func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap, metricNames map[int32]string) {
+func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) {
// We're only going to get metadata from maxMetadataSamples resources. If we have
// more resources than that, we pick maxMetadataSamples samples at random.
- sampledObjects := make([]objectRef, len(objects))
+ sampledObjects := make([]*objectRef, len(objects))
i := 0
for _, obj := range objects {
sampledObjects[i] = obj
@@ -478,7 +539,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
}
n := len(sampledObjects)
if n > maxMetadataSamples {
- // Shuffle samples into the maxMetadatSamples positions
+ // Shuffle samples into the maxMetadataSamples positions
for i := 0; i < maxMetadataSamples; i++ {
j := int(rand.Int31n(int32(i + 1)))
t := sampledObjects[i]
@@ -491,11 +552,11 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
instInfoMux := sync.Mutex{}
te := NewThrottledExecutor(e.Parent.DiscoverConcurrency)
for _, obj := range sampledObjects {
- func(obj objectRef) {
+ func(obj *objectRef) {
te.Run(ctx, func() {
metrics, err := e.getMetadata(ctx, obj, res.sampling)
if err != nil {
- log.Printf("E! [inputs.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err)
+ e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
}
mMap := make(map[string]types.PerfMetricId)
for _, m := range metrics {
@@ -504,11 +565,11 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
} else {
m.Instance = ""
}
- if res.filters.Match(metricNames[m.CounterId]) {
+ if res.filters.Match(e.getMetricNameForId(m.CounterId)) {
mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m
}
}
- log.Printf("D! [inputs.vsphere] Found %d metrics for %s", len(mMap), obj.name)
+ e.log.Debugf("Found %d metrics for %s", len(mMap), obj.name)
instInfoMux.Lock()
defer instInfoMux.Unlock()
if len(mMap) > len(res.metrics) {
@@ -535,8 +596,13 @@ func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (o
}
m := make(objectMap, len(resources))
for _, r := range resources {
- m[r.ExtensibleManagedObject.Reference().Value] = objectRef{
- name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, dcname: r.Name}
+ m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
+ name: r.Name,
+ ref: r.ExtensibleManagedObject.Reference(),
+ parentRef: r.Parent,
+ dcname: r.Name,
+ customValues: e.loadCustomAttributes(&r.ManagedEntity),
+ }
}
return m, nil
}
@@ -552,35 +618,47 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje
cache := make(map[string]*types.ManagedObjectReference)
m := make(objectMap, len(resources))
for _, r := range resources {
- // We're not interested in the immediate parent (a folder), but the data center.
- p, ok := cache[r.Parent.Value]
- if !ok {
- ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
- defer cancel2()
- client, err := e.clientFactory.GetClient(ctx2)
- if err != nil {
- return nil, err
+ // Wrap in a function to make defer work correctly.
+ err := func() error {
+ // We're not interested in the immediate parent (a folder), but the data center.
+ p, ok := cache[r.Parent.Value]
+ if !ok {
+ ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
+ defer cancel2()
+ client, err := e.clientFactory.GetClient(ctx2)
+ if err != nil {
+ return err
+ }
+ o := object.NewFolder(client.Client.Client, *r.Parent)
+ var folder mo.Folder
+ ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
+ defer cancel3()
+ err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder)
+ if err != nil {
+ e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error())
+ p = nil
+ } else {
+ pp := folder.Parent.Reference()
+ p = &pp
+ cache[r.Parent.Value] = p
+ }
}
- o := object.NewFolder(client.Client.Client, *r.Parent)
- var folder mo.Folder
- ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
- defer cancel3()
- err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder)
- if err != nil {
- log.Printf("W! [inputs.vsphere] Error while getting folder parent: %e", err)
- p = nil
- } else {
- pp := folder.Parent.Reference()
- p = &pp
- cache[r.Parent.Value] = p
+ m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
+ name: r.Name,
+ ref: r.ExtensibleManagedObject.Reference(),
+ parentRef: p,
+ customValues: e.loadCustomAttributes(&r.ManagedEntity),
}
+ return nil
+ }()
+ if err != nil {
+ return nil, err
}
- m[r.ExtensibleManagedObject.Reference().Value] = objectRef{
- name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: p}
}
return m, nil
}
+//noinspection GoUnusedParameter
func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) {
var resources []mo.HostSystem
err := filter.FindAll(ctx, &resources)
@@ -589,8 +667,12 @@ func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectM
}
m := make(objectMap)
for _, r := range resources {
- m[r.ExtensibleManagedObject.Reference().Value] = objectRef{
- name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent}
+ m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
+ name: r.Name,
+ ref: r.ExtensibleManagedObject.Reference(),
+ parentRef: r.Parent,
+ customValues: e.loadCustomAttributes(&r.ManagedEntity),
+ }
}
return m, nil
}
@@ -610,14 +692,77 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap
}
guest := "unknown"
uuid := ""
+ lookup := make(map[string]string)
+
+ // Extract host name
+ if r.Guest != nil && r.Guest.HostName != "" {
+ lookup["guesthostname"] = r.Guest.HostName
+ }
+
+ // Collect network information
+ for _, net := range r.Guest.Net {
+ if net.DeviceConfigId == -1 {
+ continue
+ }
+ if net.IpConfig == nil || net.IpConfig.IpAddress == nil {
+ continue
+ }
+ ips := make(map[string][]string)
+ for _, ip := range net.IpConfig.IpAddress {
+ addr := ip.IpAddress
+ for _, ipType := range e.Parent.IpAddresses {
+ if !(ipType == "ipv4" && isIPv4.MatchString(addr) ||
+ ipType == "ipv6" && isIPv6.MatchString(addr)) {
+ continue
+ }
+
+ // By convention, we want the preferred addresses to appear first in the array.
+ if _, ok := ips[ipType]; !ok {
+ ips[ipType] = make([]string, 0)
+ }
+ if ip.State == "preferred" {
+ ips[ipType] = append([]string{addr}, ips[ipType]...)
+ } else {
+ ips[ipType] = append(ips[ipType], addr)
+ }
+ }
+ }
+ for ipType, ipList := range ips {
+ lookup["nic/"+strconv.Itoa(int(net.DeviceConfigId))+"/"+ipType] = strings.Join(ipList, ",")
+ }
+ }
+
// Sometimes Config is unknown and returns a nil pointer
- //
if r.Config != nil {
guest = cleanGuestID(r.Config.GuestId)
uuid = r.Config.Uuid
}
- m[r.ExtensibleManagedObject.Reference().Value] = objectRef{
- name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Runtime.Host, guest: guest, altID: uuid}
+ cvs := make(map[string]string)
+ if e.customAttrEnabled {
+ for _, cv := range r.Summary.CustomValue {
+ val := cv.(*types.CustomFieldStringValue)
+ if val.Value == "" {
+ continue
+ }
+ key, ok := e.customFields[val.Key]
+ if !ok {
+ e.log.Warnf("Metadata for custom field %d not found. Skipping", val.Key)
+ continue
+ }
+ if e.customAttrFilter.Match(key) {
+ cvs[key] = val.Value
+ }
+ }
+ }
+ m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
+ name: r.Name,
+ ref: r.ExtensibleManagedObject.Reference(),
+ parentRef: r.Runtime.Host,
+ guest: guest,
+ altID: uuid,
+ customValues: e.loadCustomAttributes(&r.ManagedEntity),
+ lookup: lookup,
+ }
}
return m, nil
}
@@ -632,19 +777,47 @@ func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (ob
}
m := make(objectMap)
for _, r := range resources {
- url := ""
+ lunId := ""
if r.Info != nil {
info := r.Info.GetDatastoreInfo()
if info != nil {
- url = info.Url
+ lunId = info.Url
}
}
- m[r.ExtensibleManagedObject.Reference().Value] = objectRef{
- name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, altID: url}
+ m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
+ name: r.Name,
+ ref: r.ExtensibleManagedObject.Reference(),
+ parentRef: r.Parent,
+ altID: lunId,
+ customValues: e.loadCustomAttributes(&r.ManagedEntity),
+ }
}
return m, nil
}
+func (e *Endpoint) loadCustomAttributes(entity *mo.ManagedEntity) map[string]string {
+ if !e.customAttrEnabled {
+ return map[string]string{}
+ }
+ cvs := make(map[string]string)
+ for _, v := range entity.CustomValue {
+ cv, ok := v.(*types.CustomFieldStringValue)
+ if !ok {
+ e.Parent.Log.Warnf("Metadata for custom field %d not of string type. Skipping", cv.Key)
+ continue
+ }
+ key, ok := e.customFields[cv.Key]
+ if !ok {
+ e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", cv.Key)
+ continue
+ }
+ if e.customAttrFilter.Match(key) {
+ cvs[key] = cv.Value
+ }
+ }
+ return cvs
+}
+
// Close shuts down an Endpoint and releases any resources associated with it.
func (e *Endpoint) Close() {
e.clientFactory.Close()
@@ -695,13 +868,13 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error
}
// Workaround to make sure pqs is a copy of the loop variable and won't change.
-func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job func([]types.PerfQuerySpec), pqs []types.PerfQuerySpec) {
+func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job queryJob, pqs queryChunk) {
te.Run(ctx, func() {
job(pqs)
})
}
-func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job func([]types.PerfQuerySpec)) {
+func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job queryJob) {
te := NewThrottledExecutor(e.Parent.CollectConcurrency)
maxMetrics := e.Parent.MaxQueryMetrics
if maxMetrics < 1 {
@@ -714,54 +887,48 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
if res.name == "cluster" && maxMetrics > 10 {
maxMetrics = 10
}
- pqs := make([]types.PerfQuerySpec, 0, e.Parent.MaxQueryObjects)
- metrics := 0
- total := 0
- nRes := 0
+
+ pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects)
+
for _, object := range res.objects {
- mr := len(res.metrics)
- for mr > 0 {
- mc := mr
- headroom := maxMetrics - metrics
- if !res.realTime && mc > headroom { // Metric query limit only applies to non-realtime metrics
- mc = headroom
- }
- fm := len(res.metrics) - mr
- pq := types.PerfQuerySpec{
- Entity: object.ref,
- MaxSample: maxSampleConst,
- MetricId: res.metrics[fm : fm+mc],
- IntervalId: res.sampling,
- Format: "normal",
- }
+ timeBuckets := make(map[int64]*types.PerfQuerySpec, 0)
+ for metricIdx, metric := range res.metrics {
- start, ok := e.hwMarks.Get(object.ref.Value)
+ // Determine time of last successful collection
+ metricName := e.getMetricNameForId(metric.CounterId)
+ if metricName == "" {
+ e.log.Infof("Unable to find metric name for id %d. Skipping!", metric.CounterId)
+ continue
+ }
+ start, ok := e.hwMarks.Get(object.ref.Value, metricName)
if !ok {
- // Look back 3 sampling periods by default
start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1))
}
- pq.StartTime = &start
- pq.EndTime = &now
-
- // Make sure endtime is always after start time. We may occasionally see samples from the future
- // returned from vCenter. This is presumably due to time drift between vCenter and EXSi nodes.
- if pq.StartTime.After(*pq.EndTime) {
- log.Printf("D! [inputs.vsphere] Future sample. Res: %s, StartTime: %s, EndTime: %s, Now: %s", pq.Entity, *pq.StartTime, *pq.EndTime, now)
- end := start.Add(time.Second)
- pq.EndTime = &end
+ start = start.Truncate(20 * time.Second) // Truncate to maximum resolution
+
+ // Create bucket if we don't already have it
+ bucket, ok := timeBuckets[start.Unix()]
+ if !ok {
+ bucket = &types.PerfQuerySpec{
+ Entity: object.ref,
+ MaxSample: maxSampleConst,
+ MetricId: make([]types.PerfMetricId, 0),
+ IntervalId: res.sampling,
+ Format: "normal",
+ }
+ bucket.StartTime = &start
+ bucket.EndTime = &now
+ timeBuckets[start.Unix()] = bucket
}
- pqs = append(pqs, pq)
- mr -= mc
- metrics += mc
+ // Add this metric to the bucket
+ bucket.MetricId = append(bucket.MetricId, metric)
- // We need to dump the current chunk of metrics for one of two reasons:
- // 1) We filled up the metric quota while processing the current resource
- // 2) We are at the last resource and have no more data to process.
- // 3) The query contains more than 100,000 individual metrics
- if mr > 0 || nRes >= e.Parent.MaxQueryObjects || len(pqs) > 100000 {
- log.Printf("D! [inputs.vsphere]: Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d",
- len(pqs), metrics, mr, res.name, e.URL.Host, total+1, len(res.objects))
+ // Bucket filled to capacity? (Only applies to non real time)
+ // OR if we're past the absolute maximum limit
+ if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > 100000 {
+ e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d",
+ len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects))
// Don't send work items if the context has been cancelled.
if ctx.Err() == context.Canceled {
@@ -769,20 +936,23 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
}
// Run collection job
+ delete(timeBuckets, start.Unix())
+ submitChunkJob(ctx, te, job, queryChunk{*bucket})
+ }
+ }
+ // Handle data in time bucket and submit job if we've reached the maximum number of object.
+ for _, bucket := range timeBuckets {
+ pqs = append(pqs, *bucket)
+ if (!res.realTime && len(pqs) > e.Parent.MaxQueryObjects) || len(pqs) > 100000 {
+ e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, len(bucket.MetricId))
submitChunkJob(ctx, te, job, pqs)
- pqs = make([]types.PerfQuerySpec, 0, e.Parent.MaxQueryObjects)
- metrics = 0
- nRes = 0
+ pqs = make(queryChunk, 0, e.Parent.MaxQueryObjects)
}
}
- total++
- nRes++
}
- // Handle final partially filled chunk
+ // Submit any jobs left in the queue
if len(pqs) > 0 {
- // Run collection job
- log.Printf("D! [inputs.vsphere]: Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)",
- len(pqs), metrics, res.name, e.URL.Host, len(res.objects))
+ e.log.Debugf("Submitting job for %s: %d objects", res.name, len(pqs))
submitChunkJob(ctx, te, job, pqs)
}
@@ -804,19 +974,27 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
// Estimate the interval at which we're invoked. Use local time (not server time)
// since this is about how we got invoked locally.
localNow := time.Now()
- estInterval := time.Duration(time.Minute)
+ estInterval := time.Minute
if !res.lastColl.IsZero() {
- estInterval = localNow.Sub(res.lastColl).Truncate(time.Duration(res.sampling) * time.Second)
+ s := time.Duration(res.sampling) * time.Second
+ rawInterval := localNow.Sub(res.lastColl)
+ paddedInterval := rawInterval + time.Duration(res.sampling/2)*time.Second
+ estInterval = paddedInterval.Truncate(s)
+ if estInterval < s {
+ estInterval = s
+ }
+ e.log.Debugf("Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval)
}
- log.Printf("D! [inputs.vsphere] Interval estimated to %s", estInterval)
+ e.log.Debugf("Interval estimated to %s", estInterval)
+ res.lastColl = localNow
latest := res.latestSample
if !latest.IsZero() {
elapsed := now.Sub(latest).Seconds() + 5.0 // Allow 5 second jitter.
- log.Printf("D! [inputs.vsphere]: Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType)
+ e.log.Debugf("Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType)
if !res.realTime && elapsed < float64(res.sampling) {
// No new data would be available. We're outta here!
- log.Printf("D! [inputs.vsphere]: Sampling period for %s of %d has not elapsed on %s",
+ e.log.Debugf("Sampling period for %s of %d has not elapsed on %s",
resourceType, res.sampling, e.URL.Host)
return nil
}
@@ -827,7 +1005,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
internalTags := map[string]string{"resourcetype": resourceType}
sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags)
- log.Printf("D! [inputs.vsphere]: Collecting metrics for %d objects of type %s for %s",
+ e.log.Debugf("Collecting metrics for %d objects of type %s for %s",
len(res.objects), resourceType, e.URL.Host)
count := int64(0)
@@ -837,12 +1015,14 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
// Divide workload into chunks and process them concurrently
e.chunkify(ctx, res, now, latest, acc,
- func(chunk []types.PerfQuerySpec) {
+ func(chunk queryChunk) {
n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval)
- log.Printf("D! [inputs.vsphere] CollectChunk for %s returned %d metrics", resourceType, n)
+ e.log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
if err != nil {
- acc.AddError(errors.New("While collecting " + res.name + ": " + err.Error()))
+ acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error()))
+ return
}
+ e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
atomic.AddInt64(&count, int64(n))
tsMux.Lock()
defer tsMux.Unlock()
@@ -851,7 +1031,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
}
})
- log.Printf("D! [inputs.vsphere] Latest sample for %s set to %s", resourceType, latestSample)
+ e.log.Debugf("Latest sample for %s set to %s", resourceType, latestSample)
if !latestSample.IsZero() {
res.latestSample = latestSample
}
@@ -860,7 +1040,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
return nil
}
-func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) {
+func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) {
rInfo := make([]types.PerfSampleInfo, 0, len(info))
rValues := make([]float64, 0, len(values))
bi := 1.0
@@ -869,7 +1049,7 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur
// According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted
// data coming back with missing values. Take care of that gracefully!
if idx >= len(values) {
- log.Printf("D! [inputs.vsphere] len(SampleInfo)>len(Value) %d > %d", len(info), len(values))
+ e.log.Debugf("len(SampleInfo)>len(Value) %d > %d during alignment", len(info), len(values))
break
}
v := float64(values[idx])
@@ -883,7 +1063,7 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur
if roundedTs == lastBucket {
bi++
p := len(rValues) - 1
- rValues[p] = ((bi-1)/bi)*float64(rValues[p]) + v/bi
+ rValues[p] = ((bi-1)/bi)*rValues[p] + v/bi
} else {
rValues = append(rValues, v)
roundedInfo := types.PerfSampleInfo{
@@ -895,12 +1075,11 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur
lastBucket = roundedTs
}
}
- //log.Printf("D! [inputs.vsphere] Aligned samples: %d collapsed into %d", len(info), len(rInfo))
return rInfo, rValues
}
-func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) {
- log.Printf("D! [inputs.vsphere] Query for %s has %d QuerySpecs", res.name, len(pqs))
+func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) {
+ e.log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs))
latestSample := time.Time{}
count := 0
resourceType := res.name
@@ -921,14 +1100,14 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
return count, latestSample, err
}
- log.Printf("D! [inputs.vsphere] Query for %s returned metrics for %d objects", resourceType, len(ems))
+ e.log.Debugf("Query for %s returned metrics for %d objects", resourceType, len(ems))
// Iterate through results
for _, em := range ems {
moid := em.Entity.Reference().Value
instInfo, found := res.objects[moid]
if !found {
- log.Printf("E! [inputs.vsphere]: MOID %s not found in cache. Skipping! (This should not happen!)", moid)
+ e.log.Errorf("MOID %s not found in cache. Skipping! (This should not happen!)", moid)
continue
}
buckets := make(map[string]metricEntry)
@@ -943,19 +1122,19 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
// Populate tags
objectRef, ok := res.objects[moid]
if !ok {
- log.Printf("E! [inputs.vsphere]: MOID %s not found in cache. Skipping", moid)
+ e.log.Errorf("MOID %s not found in cache. Skipping", moid)
continue
}
- e.populateTags(&objectRef, resourceType, res, t, &v)
+ e.populateTags(objectRef, resourceType, res, t, &v)
nValues := 0
- alignedInfo, alignedValues := alignSamples(em.SampleInfo, v.Value, interval)
+ alignedInfo, alignedValues := e.alignSamples(em.SampleInfo, v.Value, interval)
for idx, sample := range alignedInfo {
// According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted
// data coming back with missing values. Take care of that gracefully!
if idx >= len(alignedValues) {
- log.Printf("D! [inputs.vsphere] len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues))
+ e.log.Debugf("Len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues))
break
}
ts := sample.Timestamp
@@ -976,11 +1155,11 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
// Percentage values must be scaled down by 100.
info, ok := metricInfo[name]
if !ok {
- log.Printf("E! [inputs.vsphere]: Could not determine unit for %s. Skipping", name)
+ e.log.Errorf("Could not determine unit for %s. Skipping", name)
}
v := alignedValues[idx]
if info.UnitInfo.GetElementDescription().Key == "percent" {
- bucket.fields[fn] = float64(v) / 100.0
+ bucket.fields[fn] = v / 100.0
} else {
if e.Parent.UseIntSamples {
bucket.fields[fn] = int64(round(v))
@@ -990,11 +1169,11 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
}
count++
- // Update highwater marks
- e.hwMarks.Put(moid, ts)
+ // Update hiwater marks
+ e.hwMarks.Put(moid, name, ts)
}
if nValues == 0 {
- log.Printf("D! [inputs.vsphere]: Missing value for: %s, %s", name, objectRef.name)
+ e.log.Debugf("Missing value for: %s, %s", name, objectRef.name)
continue
}
}
@@ -1025,6 +1204,9 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou
if objectRef.guest != "" {
t["guest"] = objectRef.guest
}
+ if gh := objectRef.lookup["guesthostname"]; gh != "" {
+ t["guesthostname"] = gh
+ }
if c, ok := e.resourceKinds["cluster"].objects[parent.parentRef.Value]; ok {
t["clustername"] = c.name
}
@@ -1055,6 +1237,17 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou
t["disk"] = cleanDiskTag(instance)
} else if strings.HasPrefix(name, "net.") {
t["interface"] = instance
+
+ // Add IP addresses to NIC data.
+ if resourceType == "vm" && objectRef.lookup != nil {
+ key := "nic/" + t["interface"] + "/"
+ if ip, ok := objectRef.lookup[key+"ipv6"]; ok {
+ t["ipv6"] = ip
+ }
+ if ip, ok := objectRef.lookup[key+"ipv4"]; ok {
+ t["ipv4"] = ip
+ }
+ }
} else if strings.HasPrefix(name, "storageAdapter.") {
t["adapter"] = instance
} else if strings.HasPrefix(name, "storagePath.") {
@@ -1069,6 +1262,15 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou
// default
t["instance"] = v.Instance
}
+
+ // Fill in custom values if they exist
+ if objectRef.customValues != nil {
+ for k, v := range objectRef.customValues {
+ if v != "" {
+ t[k] = v
+ }
+ }
+ }
}
func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (string, string) {
diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go
index 599655402aba4..e49bf80f33fe5 100644
--- a/plugins/inputs/vsphere/finder.go
+++ b/plugins/inputs/vsphere/finder.go
@@ -2,7 +2,6 @@ package vsphere
import (
"context"
- "log"
"reflect"
"strings"
@@ -16,6 +15,8 @@ var childTypes map[string][]string
var addFields map[string][]string
+var containers map[string]interface{}
+
// Finder allows callers to find resources in vCenter given a query string.
type Finder struct {
client *Client
@@ -24,40 +25,55 @@ type Finder struct {
// ResourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a
// self contained object capable of returning a certain set of resources.
type ResourceFilter struct {
- finder *Finder
- resType string
- paths []string
-}
-
-type nameAndRef struct {
- name string
- ref types.ManagedObjectReference
+ finder *Finder
+ resType string
+ paths []string
+ excludePaths []string
}
// FindAll returns the union of resources found given the supplied resource type and paths.
-func (f *Finder) FindAll(ctx context.Context, resType string, paths []string, dst interface{}) error {
+func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error {
+ objs := make(map[string]types.ObjectContent)
for _, p := range paths {
- if err := f.Find(ctx, resType, p, dst); err != nil {
+ if err := f.find(ctx, resType, p, objs); err != nil {
return err
}
}
- return nil
+ if len(excludePaths) > 0 {
+ excludes := make(map[string]types.ObjectContent)
+ for _, p := range excludePaths {
+ if err := f.find(ctx, resType, p, excludes); err != nil {
+ return err
+ }
+ }
+ for k := range excludes {
+ delete(objs, k)
+ }
+ }
+ return objectContentToTypedArray(objs, dst)
}
// Find returns the resources matching the specified path.
func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error {
+ objs := make(map[string]types.ObjectContent)
+ err := f.find(ctx, resType, path, objs)
+ if err != nil {
+ return err
+ }
+ return objectContentToTypedArray(objs, dst)
+}
+
+func (f *Finder) find(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error {
p := strings.Split(path, "/")
flt := make([]property.Filter, len(p)-1)
for i := 1; i < len(p); i++ {
flt[i-1] = property.Filter{"name": p[i]}
}
- objs := make(map[string]types.ObjectContent)
err := f.descend(ctx, f.client.Client.ServiceContent.RootFolder, resType, flt, 0, objs)
if err != nil {
return err
}
- objectContentToTypedArray(objs, dst)
- log.Printf("D! [inputs.vsphere] Find(%s, %s) returned %d objects", resType, path, len(objs))
+ f.client.log.Debugf("Find(%s, %s) returned %d objects", resType, path, len(objs))
return nil
}
@@ -87,15 +103,21 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference,
var content []types.ObjectContent
fields := []string{"name"}
+ recurse := tokens[pos]["name"] == "**"
+
+ types := ct
if isLeaf {
- // Special case: The last token is a recursive wildcard, so we can grab everything
- // recursively in a single call.
- if tokens[pos]["name"] == "**" {
+ if af, ok := addFields[resType]; ok {
+ fields = append(fields, af...)
+ }
+ if recurse {
+ // Special case: The last token is a recursive wildcard, so we can grab everything
+ // recursively in a single call.
v2, err := m.CreateContainerView(ctx, root, []string{resType}, true)
- defer v2.Destroy(ctx)
- if af, ok := addFields[resType]; ok {
- fields = append(fields, af...)
+ if err != nil {
+ return err
}
+ defer v2.Destroy(ctx)
err = v2.Retrieve(ctx, []string{resType}, fields, &content)
if err != nil {
return err
@@ -105,23 +127,16 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference,
}
return nil
}
-
- if af, ok := addFields[resType]; ok {
- fields = append(fields, af...)
- }
- err = v.Retrieve(ctx, []string{resType}, fields, &content)
- if err != nil {
- return err
- }
- } else {
- err = v.Retrieve(ctx, ct, fields, &content)
- if err != nil {
- return err
- }
+ types = []string{resType} // Only load wanted object type at leaf level
+ }
+ err = v.Retrieve(ctx, types, fields, &content)
+ if err != nil {
+ return err
}
+ rerunAsLeaf := false
for _, c := range content {
- if !tokens[pos].MatchPropertyList(c.PropSet[:1]) {
+ if !matchName(tokens[pos], c.PropSet) {
continue
}
@@ -137,44 +152,45 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference,
}
// Deal with recursive wildcards (**)
- inc := 1 // Normally we advance one token.
- if tokens[pos]["name"] == "**" {
- if isLeaf {
- inc = 0 // Can't advance past last token, so keep descending the tree
- } else {
- // Lookahead to next token. If it matches this child, we are out of
- // the recursive wildcard handling and we can advance TWO tokens ahead, since
- // the token that ended the recursive wildcard mode is now consumed.
- if tokens[pos+1].MatchPropertyList(c.PropSet) {
- if pos < len(tokens)-2 {
+ var inc int
+ if recurse {
+ inc = 0 // By default, we stay on this token
+ if !isLeaf {
+ // Lookahead to next token.
+ if matchName(tokens[pos+1], c.PropSet) {
+ // Are we looking ahead at a leaf node that has the wanted type?
+ // Rerun the entire level as a leaf. This is needed since all properties aren't loaded
+ // when we're processing non-leaf nodes.
+ if pos == len(tokens)-2 {
+ if c.Obj.Type == resType {
+ rerunAsLeaf = true
+ continue
+ }
+ } else if _, ok := containers[c.Obj.Type]; ok {
+ // Tokens match and we're looking ahead at a container type that's not a leaf
+ // Consume this token and the next.
inc = 2
- } else {
- // We found match and it's at a leaf! Grab it!
- objs[c.Obj.String()] = c
- continue
}
- } else {
- // We didn't break out of recursicve wildcard mode yet, so stay on this token.
- inc = 0
-
}
}
+ } else {
+ // The normal case: Advance to next token before descending
+ inc = 1
}
err := f.descend(ctx, c.Obj, resType, tokens, pos+inc, objs)
if err != nil {
return err
}
}
- return nil
-}
-func nameFromObjectContent(o types.ObjectContent) string {
- for _, p := range o.PropSet {
- if p.Name == "name" {
- return p.Val.(string)
- }
+ if rerunAsLeaf {
+ // We're at a "pseudo leaf", i.e. we looked ahead a token and found that this level contains leaf nodes.
+ // Rerun the entire level as a leaf to get those nodes. This will only be executed when pos is one token
+ // before the last, to pos+1 will always point to a leaf token.
+ return f.descend(ctx, root, resType, tokens, pos+1, objs)
}
- return ""
+
+ return nil
}
func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interface{}) error {
@@ -211,14 +227,23 @@ func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interfac
// FindAll finds all resources matching the paths that were specified upon creation of
// the ResourceFilter.
func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error {
- return r.finder.FindAll(ctx, r.resType, r.paths, dst)
+ return r.finder.FindAll(ctx, r.resType, r.paths, r.excludePaths, dst)
+}
+
+func matchName(f property.Filter, props []types.DynamicProperty) bool {
+ for _, prop := range props {
+ if prop.Name == "name" {
+ return f.MatchProperty(prop)
+ }
+ }
+ return false
}
func init() {
childTypes = map[string][]string{
"HostSystem": {"VirtualMachine"},
- "ComputeResource": {"HostSystem", "ResourcePool"},
- "ClusterComputeResource": {"HostSystem", "ResourcePool"},
+ "ComputeResource": {"HostSystem", "ResourcePool", "VirtualApp"},
+ "ClusterComputeResource": {"HostSystem", "ResourcePool", "VirtualApp"},
"Datacenter": {"Folder"},
"Folder": {
"Folder",
@@ -231,10 +256,20 @@ func init() {
}
addFields = map[string][]string{
- "HostSystem": {"parent"},
- "VirtualMachine": {"runtime.host", "config.guestId", "config.uuid", "runtime.powerState"},
- "Datastore": {"parent", "info"},
- "ClusterComputeResource": {"parent"},
- "Datacenter": {"parent"},
+ "HostSystem": {"parent", "summary.customValue", "customValue"},
+ "VirtualMachine": {"runtime.host", "config.guestId", "config.uuid", "runtime.powerState",
+ "summary.customValue", "guest.net", "guest.hostName", "customValue"},
+ "Datastore": {"parent", "info", "customValue"},
+ "ClusterComputeResource": {"parent", "customValue"},
+ "Datacenter": {"parent", "customValue"},
+ }
+
+ containers = map[string]interface{}{
+ "HostSystem": nil,
+ "ComputeResource": nil,
+ "Datacenter": nil,
+ "ResourcePool": nil,
+ "Folder": nil,
+ "VirtualApp": nil,
}
}
diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go
index 4f73c4fe89155..1be75d7605173 100644
--- a/plugins/inputs/vsphere/tscache.go
+++ b/plugins/inputs/vsphere/tscache.go
@@ -10,7 +10,6 @@ import (
type TSCache struct {
ttl time.Duration
table map[string]time.Time
- done chan struct{}
mux sync.RWMutex
}
@@ -19,7 +18,6 @@ func NewTSCache(ttl time.Duration) *TSCache {
return &TSCache{
ttl: ttl,
table: make(map[string]time.Time),
- done: make(chan struct{}),
}
}
@@ -34,15 +32,15 @@ func (t *TSCache) Purge() {
n++
}
}
- log.Printf("D! [inputs.vsphere] Purged timestamp cache. %d deleted with %d remaining", n, len(t.table))
+ log.Printf("D! [inputs.vsphere] purged timestamp cache. %d deleted with %d remaining", n, len(t.table))
}
// IsNew returns true if the supplied timestamp for the supplied key is more recent than the
// timestamp we have on record.
-func (t *TSCache) IsNew(key string, tm time.Time) bool {
+func (t *TSCache) IsNew(key string, metricName string, tm time.Time) bool {
t.mux.RLock()
defer t.mux.RUnlock()
- v, ok := t.table[key]
+ v, ok := t.table[makeKey(key, metricName)]
if !ok {
return true // We've never seen this before, so consider everything a new sample
}
@@ -50,16 +48,20 @@ func (t *TSCache) IsNew(key string, tm time.Time) bool {
}
// Get returns a timestamp (if present)
-func (t *TSCache) Get(key string) (time.Time, bool) {
+func (t *TSCache) Get(key string, metricName string) (time.Time, bool) {
t.mux.RLock()
defer t.mux.RUnlock()
- ts, ok := t.table[key]
+ ts, ok := t.table[makeKey(key, metricName)]
return ts, ok
}
// Put updates the latest timestamp for the supplied key.
-func (t *TSCache) Put(key string, time time.Time) {
+func (t *TSCache) Put(key string, metricName string, time time.Time) {
t.mux.Lock()
defer t.mux.Unlock()
- t.table[key] = time
+ t.table[makeKey(key, metricName)] = time
+}
+
+func makeKey(resource string, metric string) string {
+ return resource + "|" + metric
}
diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go
index d64b5273d312a..9bafcd92113c3 100644
--- a/plugins/inputs/vsphere/vsphere.go
+++ b/plugins/inputs/vsphere/vsphere.go
@@ -2,13 +2,12 @@ package vsphere
import (
"context"
- "log"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/vmware/govmomi/vim25/soap"
)
@@ -23,24 +22,32 @@ type VSphere struct {
DatacenterMetricInclude []string
DatacenterMetricExclude []string
DatacenterInclude []string
+ DatacenterExclude []string
ClusterInstances bool
ClusterMetricInclude []string
ClusterMetricExclude []string
ClusterInclude []string
+ ClusterExclude []string
HostInstances bool
HostMetricInclude []string
HostMetricExclude []string
HostInclude []string
+ HostExclude []string
VMInstances bool `toml:"vm_instances"`
VMMetricInclude []string `toml:"vm_metric_include"`
VMMetricExclude []string `toml:"vm_metric_exclude"`
VMInclude []string `toml:"vm_include"`
+ VMExclude []string `toml:"vm_exclude"`
DatastoreInstances bool
DatastoreMetricInclude []string
DatastoreMetricExclude []string
DatastoreInclude []string
+ DatastoreExclude []string
Separator string
+ CustomAttributeInclude []string
+ CustomAttributeExclude []string
UseIntSamples bool
+ IpAddresses []string
MaxQueryObjects int
MaxQueryMetrics int
@@ -55,6 +62,8 @@ type VSphere struct {
// Mix in the TLS/SSL goodness from core
tls.ClientConfig
+
+ Log telegraf.Logger
}
var sampleConfig = `
@@ -66,6 +75,8 @@ var sampleConfig = `
## VMs
## Typical VM metrics (if omitted or empty, all metrics are collected)
+ # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
+ # vm_exclude = [] # Inventory paths to exclude
vm_metric_include = [
"cpu.demand.average",
"cpu.idle.summation",
@@ -107,6 +118,8 @@ var sampleConfig = `
## Hosts
## Typical host metrics (if omitted or empty, all metrics are collected)
+ # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
+ # host_exclude [] # Inventory paths to exclude
host_metric_include = [
"cpu.coreUtilization.average",
"cpu.costop.summation",
@@ -155,33 +168,43 @@ var sampleConfig = `
"storageAdapter.write.average",
"sys.uptime.latest",
]
+ ## Collect IP addresses? Valid values are "ipv4" and "ipv6"
+ # ip_addresses = ["ipv6", "ipv4" ]
+
# host_metric_exclude = [] ## Nothing excluded by default
# host_instances = true ## true by default
+
## Clusters
+ # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+ # cluster_exclude = [] # Inventory paths to exclude
# cluster_metric_include = [] ## if omitted or empty, all metrics are collected
# cluster_metric_exclude = [] ## Nothing excluded by default
# cluster_instances = false ## false by default
## Datastores
+ # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
+ # datastore_exclude = [] # Inventory paths to exclude
# datastore_metric_include = [] ## if omitted or empty, all metrics are collected
# datastore_metric_exclude = [] ## Nothing excluded by default
- # datastore_instances = false ## false by default for Datastores only
+ # datastore_instances = false ## false by default
## Datacenters
+ # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+ # datacenter_exclude = [] # Inventory paths to exclude
datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
- # datacenter_instances = false ## false by default for Datastores only
+ # datacenter_instances = false ## false by default
## Plugin Settings
## separator character to use for measurement and field names (default: "_")
# separator = "_"
- ## number of objects to retreive per query for realtime resources (vms and hosts)
+ ## number of objects to retrieve per query for realtime resources (vms and hosts)
## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# max_query_objects = 256
- ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
+ ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores)
## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# max_query_metrics = 256
@@ -189,11 +212,6 @@ var sampleConfig = `
# collect_concurrency = 1
# discover_concurrency = 1
- ## whether or not to force discovery of new objects on initial gather call before collecting metrics
- ## when true for large environments this may cause errors for time elapsed while collecting metrics
- ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
- # force_discover_on_init = false
-
## the interval before (re)discovering objects subject to metrics collection (default: 300s)
# object_discovery_interval = "300s"
@@ -208,6 +226,17 @@ var sampleConfig = `
## preserve the full precision when averaging takes place.
# use_int_samples = true
+ ## Custom attributes from vCenter can be very useful for queries in order to slice the
+ ## metrics along different dimension and for forming ad-hoc relationships. They are disabled
+ ## by default, since they can add a considerable amount of tags to the resulting metrics. To
+ ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+ ## to select the attributes you want to include.
+ ## By default, since they can add a considerable amount of tags to the resulting metrics. To
+ ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+ ## to select the attributes you want to include.
+ # custom_attribute_include = []
+ # custom_attribute_exclude = ["*"]
+
## Optional SSL Config
# ssl_ca = "/path/to/cafile"
# ssl_cert = "/path/to/certfile"
@@ -230,10 +259,15 @@ func (v *VSphere) Description() string {
// Start is called from telegraf core when a plugin is started and allows it to
// perform initialization tasks.
func (v *VSphere) Start(acc telegraf.Accumulator) error {
- log.Println("D! [inputs.vsphere]: Starting plugin")
+ v.Log.Info("Starting plugin")
ctx, cancel := context.WithCancel(context.Background())
v.cancel = cancel
+ // Check for deprecated settings
+ if !v.ForceDiscoverOnInit {
+ v.Log.Warn("The 'force_discover_on_init' configuration parameter has been deprecated. Setting it to 'false' has no effect")
+ }
+
// Create endpoints, one for each vCenter we're monitoring
v.endpoints = make([]*Endpoint, len(v.Vcenters))
for i, rawURL := range v.Vcenters {
@@ -241,7 +275,7 @@ func (v *VSphere) Start(acc telegraf.Accumulator) error {
if err != nil {
return err
}
- ep, err := NewEndpoint(ctx, v, u)
+ ep, err := NewEndpoint(ctx, v, u, v.Log)
if err != nil {
return err
}
@@ -253,7 +287,7 @@ func (v *VSphere) Start(acc telegraf.Accumulator) error {
// Stop is called from telegraf core when a plugin is stopped and allows it to
// perform shutdown tasks.
func (v *VSphere) Stop() {
- log.Println("D! [inputs.vsphere]: Stopping plugin")
+ v.Log.Info("Stopping plugin")
v.cancel()
// Wait for all endpoints to finish. No need to wait for
@@ -262,7 +296,7 @@ func (v *VSphere) Stop() {
// wait for any discovery to complete by trying to grab the
// "busy" mutex.
for _, ep := range v.endpoints {
- log.Printf("D! [inputs.vsphere]: Waiting for endpoint %s to finish", ep.URL.Host)
+ v.Log.Debugf("Waiting for endpoint %q to finish", ep.URL.Host)
func() {
ep.busy.Lock() // Wait until discovery is finished
defer ep.busy.Unlock()
@@ -321,13 +355,16 @@ func init() {
DatastoreMetricExclude: nil,
DatastoreInclude: []string{"/*/datastore/**"},
Separator: "_",
+ CustomAttributeInclude: []string{},
+ CustomAttributeExclude: []string{"*"},
UseIntSamples: true,
+ IpAddresses: []string{},
MaxQueryObjects: 256,
MaxQueryMetrics: 256,
CollectConcurrency: 1,
DiscoverConcurrency: 1,
- ForceDiscoverOnInit: false,
+ ForceDiscoverOnInit: true,
ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300},
Timeout: internal.Duration{Duration: time.Second * 60},
}
diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go
index eff56a89d2bc1..20e26d293bece 100644
--- a/plugins/inputs/vsphere/vsphere_test.go
+++ b/plugins/inputs/vsphere/vsphere_test.go
@@ -4,17 +4,15 @@ import (
"context"
"crypto/tls"
"fmt"
+ "os"
"regexp"
- "sort"
"strings"
- "sync"
- "sync/atomic"
"testing"
"time"
"unsafe"
"github.com/influxdata/telegraf/internal"
- itls "github.com/influxdata/telegraf/internal/tls"
+ itls "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/testutil"
"github.com/influxdata/toml"
"github.com/stretchr/testify/require"
@@ -25,88 +23,25 @@ import (
)
var configHeader = `
-# Telegraf Configuration
-#
-# Telegraf is entirely plugin driven. All metrics are gathered from the
-# declared inputs, and sent to the declared outputs.
-#
-# Plugins must be declared in here to be active.
-# To deactivate a plugin, comment out the name and any variables.
-#
-# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
-# file would generate.
-#
-# Environment variables can be used anywhere in this config file, simply prepend
-# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
-# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
-
-
-# Global tags can be specified here in key="value" format.
-[global_tags]
- # dc = "us-east-1" # will tag all metrics with dc=us-east-1
- # rack = "1a"
- ## Environment variables can be used as tags, and throughout the config file
- # user = "$USER"
-
-
-# Configuration for telegraf agent
[agent]
- ## Default data collection interval for all inputs
interval = "10s"
- ## Rounds collection interval to 'interval'
- ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
-
- ## Telegraf will send metrics to outputs in batches of at most
- ## metric_batch_size metrics.
- ## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
-
- ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
- ## output, and will flush this buffer on a successful write. Oldest metrics
- ## are dropped first when this buffer fills.
- ## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
-
- ## Collection jitter is used to jitter the collection by a random amount.
- ## Each plugin will sleep for a random time within jitter before collecting.
- ## This can be used to avoid many plugins querying things like sysfs at the
- ## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
-
- ## Default flushing interval for all outputs. You shouldn't set this below
- ## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
- ## Jitter the flush interval by a random amount. This is primarily to avoid
- ## large write spikes for users running a large number of telegraf instances.
- ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
-
- ## By default or when set to "0s", precision will be set to the same
- ## timestamp order as the collection interval, with the maximum being 1s.
- ## ie, when interval = "10s", precision will be "1s"
- ## when interval = "250ms", precision will be "1ms"
- ## Precision will NOT be used for service inputs. It is up to each individual
- ## service input to set the timestamp at the appropriate precision.
- ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
-
- ## Logging configuration:
- ## Run telegraf with debug log messages.
debug = false
- ## Run telegraf in quiet mode (error log messages only).
quiet = false
- ## Specify the log file name. The empty string means to log to stderr.
logfile = ""
-
- ## Override default hostname, if empty use os.Hostname()
hostname = ""
- ## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
`
func defaultVSphere() *VSphere {
return &VSphere{
+ Log: testutil.Logger{},
ClusterMetricInclude: []string{
"cpu.usage.*",
"cpu.usagemhz.*",
@@ -202,7 +137,7 @@ func defaultVSphere() *VSphere {
VMInclude: []string{"/**"},
DatastoreMetricInclude: []string{
"disk.used.*",
- "disk.provsioned.*"},
+ "disk.provisioned.*"},
DatastoreMetricExclude: nil,
DatastoreInclude: []string{"/**"},
DatacenterMetricInclude: nil,
@@ -217,12 +152,17 @@ func defaultVSphere() *VSphere {
ForceDiscoverOnInit: true,
DiscoverConcurrency: 1,
CollectConcurrency: 1,
+ Separator: ".",
}
}
-func createSim() (*simulator.Model, *simulator.Server, error) {
+func createSim(folders int) (*simulator.Model, *simulator.Server, error) {
model := simulator.VPX()
+ model.Folder = folders
+ model.Datacenter = 2
+ //model.App = 1
+
err := model.Create()
if err != nil {
return nil, nil, err
@@ -245,7 +185,8 @@ func testAlignUniform(t *testing.T, n int) {
}
values[i] = 1
}
- newInfo, newValues := alignSamples(info, values, 60*time.Second)
+ e := Endpoint{log: testutil.Logger{}}
+ newInfo, newValues := e.alignSamples(info, values, 60*time.Second)
require.Equal(t, n/3, len(newInfo), "Aligned infos have wrong size")
require.Equal(t, n/3, len(newValues), "Aligned values have wrong size")
for _, v := range newValues {
@@ -270,7 +211,8 @@ func TestAlignMetrics(t *testing.T) {
}
values[i] = int64(i%3 + 1)
}
- newInfo, newValues := alignSamples(info, values, 60*time.Second)
+ e := Endpoint{log: testutil.Logger{}}
+ newInfo, newValues := e.alignSamples(info, values, 60*time.Second)
require.Equal(t, n/3, len(newInfo), "Aligned infos have wrong size")
require.Equal(t, n/3, len(newValues), "Aligned values have wrong size")
for _, v := range newValues {
@@ -290,64 +232,6 @@ func TestParseConfig(t *testing.T) {
require.NotNil(t, tab)
}
-func TestThrottledExecutor(t *testing.T) {
- max := int64(0)
- ngr := int64(0)
- n := 10000
- var mux sync.Mutex
- results := make([]int, 0, n)
- te := NewThrottledExecutor(5)
- for i := 0; i < n; i++ {
- func(i int) {
- te.Run(context.Background(), func() {
- atomic.AddInt64(&ngr, 1)
- mux.Lock()
- defer mux.Unlock()
- results = append(results, i*2)
- if ngr > max {
- max = ngr
- }
- time.Sleep(100 * time.Microsecond)
- atomic.AddInt64(&ngr, -1)
- })
- }(i)
- }
- te.Wait()
- sort.Ints(results)
- for i := 0; i < n; i++ {
- require.Equal(t, results[i], i*2, "Some jobs didn't run")
- }
- require.Equal(t, int64(5), max, "Wrong number of goroutines spawned")
-}
-
-func TestTimeout(t *testing.T) {
- // Don't run test on 32-bit machines due to bug in simulator.
- // https://github.com/vmware/govmomi/issues/1330
- var i int
- if unsafe.Sizeof(i) < 8 {
- return
- }
-
- m, s, err := createSim()
- if err != nil {
- t.Fatal(err)
- }
- defer m.Remove()
- defer s.Close()
-
- v := defaultVSphere()
- var acc testutil.Accumulator
- v.Vcenters = []string{s.URL.String()}
- v.Timeout = internal.Duration{Duration: 1 * time.Nanosecond}
- require.NoError(t, v.Start(nil)) // We're not using the Accumulator, so it can be nil.
- defer v.Stop()
- err = v.Gather(&acc)
-
- // The accumulator must contain exactly one error and it must be a deadline exceeded.
- require.Equal(t, 1, len(acc.Errors))
- require.True(t, strings.Contains(acc.Errors[0].Error(), "context deadline exceeded"))
-}
-
func TestMaxQuery(t *testing.T) {
// Don't run test on 32-bit machines due to bug in simulator.
// https://github.com/vmware/govmomi/issues/1330
@@ -355,7 +239,7 @@ func TestMaxQuery(t *testing.T) {
if unsafe.Sizeof(i) < 8 {
return
}
- m, s, err := createSim()
+ m, s, err := createSim(0)
if err != nil {
t.Fatal(err)
}
@@ -391,6 +275,20 @@ func TestMaxQuery(t *testing.T) {
c2.close()
}
+func testLookupVM(ctx context.Context, t *testing.T, f *Finder, path string, expected int, expectedName string) {
+ poweredOn := types.VirtualMachinePowerState("poweredOn")
+ var vm []mo.VirtualMachine
+ err := f.Find(ctx, "VirtualMachine", path, &vm)
+ require.NoError(t, err)
+ require.Equal(t, expected, len(vm))
+ if expectedName != "" {
+ require.Equal(t, expectedName, vm[0].Name)
+ }
+ for _, v := range vm {
+ require.Equal(t, poweredOn, v.Runtime.PowerState)
+ }
+}
+
func TestFinder(t *testing.T) {
// Don't run test on 32-bit machines due to bug in simulator.
// https://github.com/vmware/govmomi/issues/1330
@@ -399,7 +297,7 @@ func TestFinder(t *testing.T) {
return
}
- m, s, err := createSim()
+ m, s, err := createSim(0)
if err != nil {
t.Fatal(err)
}
@@ -413,13 +311,13 @@ func TestFinder(t *testing.T) {
f := Finder{c}
- dc := []mo.Datacenter{}
+ var dc []mo.Datacenter
err = f.Find(ctx, "Datacenter", "/DC0", &dc)
require.NoError(t, err)
require.Equal(t, 1, len(dc))
require.Equal(t, "DC0", dc[0].Name)
- host := []mo.HostSystem{}
+ var host []mo.HostSystem
err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host)
require.NoError(t, err)
require.Equal(t, 1, len(host))
@@ -436,65 +334,77 @@ func TestFinder(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 3, len(host))
- vm := []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/DC0/vm/DC0_H0_VM0", &vm)
- require.NoError(t, err)
- require.Equal(t, 1, len(dc))
- require.Equal(t, "DC0_H0_VM0", vm[0].Name)
+ var vm []mo.VirtualMachine
+ testLookupVM(ctx, t, &f, "/DC0/vm/DC0_H0_VM0", 1, "")
+ testLookupVM(ctx, t, &f, "/DC0/vm/DC0_C0*", 2, "")
+ testLookupVM(ctx, t, &f, "/DC0/*/DC0_H0_VM0", 1, "DC0_H0_VM0")
+ testLookupVM(ctx, t, &f, "/DC0/*/DC0_H0_*", 2, "")
+ testLookupVM(ctx, t, &f, "/DC0/**/DC0_H0_VM*", 2, "")
+ testLookupVM(ctx, t, &f, "/DC0/**", 4, "")
+ testLookupVM(ctx, t, &f, "/DC1/**", 4, "")
+ testLookupVM(ctx, t, &f, "/**", 8, "")
+ testLookupVM(ctx, t, &f, "/**/vm/**", 8, "")
+ testLookupVM(ctx, t, &f, "/*/host/**/*DC*", 8, "")
+ testLookupVM(ctx, t, &f, "/*/host/**/*DC*VM*", 8, "")
+ testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "")
vm = []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/DC0/vm/DC0_C0*", &vm)
- require.NoError(t, err)
- require.Equal(t, 1, len(dc))
-
- vm = []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/DC0/*/DC0_H0_VM0", &vm)
- require.NoError(t, err)
- require.Equal(t, 1, len(dc))
- require.Equal(t, "DC0_H0_VM0", vm[0].Name)
-
- vm = []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/DC0/*/DC0_H0_*", &vm)
- require.NoError(t, err)
- require.Equal(t, 2, len(vm))
-
- vm = []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/DC0/**/DC0_H0_VM*", &vm)
- require.NoError(t, err)
- require.Equal(t, 2, len(vm))
-
- vm = []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/DC0/**", &vm)
+ err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, []string{}, &vm)
require.NoError(t, err)
require.Equal(t, 4, len(vm))
+ rf := ResourceFilter{
+ finder: &f,
+ paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"},
+ excludePaths: []string{"/DC0/vm/DC0_H0_VM0"},
+ resType: "VirtualMachine",
+ }
vm = []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/**", &vm)
- require.NoError(t, err)
- require.Equal(t, 4, len(vm))
-
+ require.NoError(t, rf.FindAll(ctx, &vm))
+ require.Equal(t, 3, len(vm))
+
+ rf = ResourceFilter{
+ finder: &f,
+ paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"},
+ excludePaths: []string{"/**"},
+ resType: "VirtualMachine",
+ }
vm = []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/**/DC0_H0_VM*", &vm)
- require.NoError(t, err)
- require.Equal(t, 2, len(vm))
-
+ require.NoError(t, rf.FindAll(ctx, &vm))
+ require.Equal(t, 0, len(vm))
+
+ rf = ResourceFilter{
+ finder: &f,
+ paths: []string{"/**"},
+ excludePaths: []string{"/**"},
+ resType: "VirtualMachine",
+ }
vm = []mo.VirtualMachine{}
- err = f.Find(ctx, "VirtualMachine", "/**/vm/**", &vm)
- require.NoError(t, err)
- require.Equal(t, 4, len(vm))
-
+ require.NoError(t, rf.FindAll(ctx, &vm))
+ require.Equal(t, 0, len(vm))
+
+ rf = ResourceFilter{
+ finder: &f,
+ paths: []string{"/**"},
+ excludePaths: []string{"/this won't match anything"},
+ resType: "VirtualMachine",
+ }
vm = []mo.VirtualMachine{}
- err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, &vm)
- require.NoError(t, err)
- require.Equal(t, 4, len(vm))
-
+ require.NoError(t, rf.FindAll(ctx, &vm))
+ require.Equal(t, 8, len(vm))
+
+ rf = ResourceFilter{
+ finder: &f,
+ paths: []string{"/**"},
+ excludePaths: []string{"/**/*VM0"},
+ resType: "VirtualMachine",
+ }
vm = []mo.VirtualMachine{}
- err = f.FindAll(ctx, "VirtualMachine", []string{"/**"}, &vm)
- require.NoError(t, err)
+ require.NoError(t, rf.FindAll(ctx, &vm))
require.Equal(t, 4, len(vm))
}
-func TestAll(t *testing.T) {
+func TestFolders(t *testing.T) {
// Don't run test on 32-bit machines due to bug in simulator.
// https://github.com/vmware/govmomi/issues/1330
var i int
@@ -502,19 +412,142 @@ func TestAll(t *testing.T) {
return
}
- m, s, err := createSim()
+ m, s, err := createSim(1)
if err != nil {
t.Fatal(err)
}
defer m.Remove()
defer s.Close()
- var acc testutil.Accumulator
+ ctx := context.Background()
+
v := defaultVSphere()
- v.Vcenters = []string{s.URL.String()}
- v.Start(&acc)
+
+ c, err := NewClient(ctx, s.URL, v)
+
+ f := Finder{c}
+
+ var folder []mo.Folder
+ err = f.Find(ctx, "Folder", "/F0", &folder)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(folder))
+ require.Equal(t, "F0", folder[0].Name)
+
+ var dc []mo.Datacenter
+ err = f.Find(ctx, "Datacenter", "/F0/DC1", &dc)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(dc))
+ require.Equal(t, "DC1", dc[0].Name)
+
+ testLookupVM(ctx, t, &f, "/F0/DC0/vm/**/F*", 0, "")
+ testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/*VM*", 4, "")
+ testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/**", 4, "")
+}
+
+func TestCollection(t *testing.T) {
+ testCollection(t, false)
+}
+
+func TestCollectionNoClusterMetrics(t *testing.T) {
+ testCollection(t, true)
+}
+
+func testCollection(t *testing.T, excludeClusters bool) {
+ mustHaveMetrics := map[string]struct{}{
+ "vsphere.vm.cpu": {},
+ "vsphere.vm.mem": {},
+ "vsphere.vm.net": {},
+ "vsphere.host.cpu": {},
+ "vsphere.host.mem": {},
+ "vsphere.host.net": {},
+ "vsphere.datastore.disk": {},
+ }
+ vCenter := os.Getenv("VCENTER_URL")
+ username := os.Getenv("VCENTER_USER")
+ password := os.Getenv("VCENTER_PASSWORD")
+ v := defaultVSphere()
+ if vCenter != "" {
+ v.Vcenters = []string{vCenter}
+ v.Username = username
+ v.Password = password
+ } else {
+
+ // Don't run test on 32-bit machines due to bug in simulator.
+ // https://github.com/vmware/govmomi/issues/1330
+ var i int
+ if unsafe.Sizeof(i) < 8 {
+ return
+ }
+
+ m, s, err := createSim(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer m.Remove()
+ defer s.Close()
+ v.Vcenters = []string{s.URL.String()}
+ }
+ if excludeClusters {
+ v.ClusterMetricExclude = []string{"*"}
+ }
+
+ var acc testutil.Accumulator
+
+ require.NoError(t, v.Start(&acc))
defer v.Stop()
require.NoError(t, v.Gather(&acc))
require.Equal(t, 0, len(acc.Errors), fmt.Sprintf("Errors found: %s", acc.Errors))
require.True(t, len(acc.Metrics) > 0, "No metrics were collected")
+ cache := make(map[string]string)
+ client, err := v.endpoints[0].clientFactory.GetClient(context.Background())
+ require.NoError(t, err)
+ hostCache := make(map[string]string)
+ for _, m := range acc.Metrics {
+ delete(mustHaveMetrics, m.Measurement)
+
+ if strings.HasPrefix(m.Measurement, "vsphere.vm.") {
+ mustContainAll(t, m.Tags, []string{"esxhostname", "moid", "vmname", "guest", "dcname", "uuid", "vmname"})
+ hostName := m.Tags["esxhostname"]
+ hostMoid, ok := hostCache[hostName]
+ if !ok {
+ // We have to follow the host parent path to locate a cluster. Look up the host!
+ finder := Finder{client}
+ var hosts []mo.HostSystem
+ finder.Find(context.Background(), "HostSystem", "/**/"+hostName, &hosts)
+ require.NotEmpty(t, hosts)
+ hostMoid = hosts[0].Reference().Value
+ hostCache[hostName] = hostMoid
+ }
+ if isInCluster(t, v, client, cache, "HostSystem", hostMoid) { // If the VM lives in a cluster
+ mustContainAll(t, m.Tags, []string{"clustername"})
+ }
+ } else if strings.HasPrefix(m.Measurement, "vsphere.host.") {
+ if isInCluster(t, v, client, cache, "HostSystem", m.Tags["moid"]) { // If the host lives in a cluster
+ mustContainAll(t, m.Tags, []string{"esxhostname", "clustername", "moid", "dcname"})
+ } else {
+ mustContainAll(t, m.Tags, []string{"esxhostname", "moid", "dcname"})
+ }
+ } else if strings.HasPrefix(m.Measurement, "vsphere.cluster.") {
+ mustContainAll(t, m.Tags, []string{"clustername", "moid", "dcname"})
+ } else {
+ mustContainAll(t, m.Tags, []string{"moid", "dcname"})
+ }
+ }
+ require.Empty(t, mustHaveMetrics, "Some metrics were not found")
+}
+
+func isInCluster(t *testing.T, v *VSphere, client *Client, cache map[string]string, resourceKind, moid string) bool {
+ ctx := context.Background()
+ ref := types.ManagedObjectReference{
+ Type: resourceKind,
+ Value: moid,
+ }
+ _, ok := v.endpoints[0].getAncestorName(ctx, client, "ClusterComputeResource", cache, ref)
+ return ok
+}
+
+func mustContainAll(t *testing.T, tagMap map[string]string, mustHave []string) {
+ for _, tag := range mustHave {
+ require.Contains(t, tagMap, tag)
+ }
}
diff --git a/plugins/inputs/webhooks/README.md b/plugins/inputs/webhooks/README.md
index c6c7daf355116..2eea2a537adee 100644
--- a/plugins/inputs/webhooks/README.md
+++ b/plugins/inputs/webhooks/README.md
@@ -1,4 +1,4 @@
-# Webhooks
+# Webhooks Input Plugin
This is a Telegraf service plugin that start an http server and register multiple webhook listeners.
diff --git a/plugins/inputs/webhooks/github/README.md b/plugins/inputs/webhooks/github/README.md
index 5115d287cdba5..4a4e64c730a67 100644
--- a/plugins/inputs/webhooks/github/README.md
+++ b/plugins/inputs/webhooks/github/README.md
@@ -78,7 +78,7 @@ The tag values and field values show the place on the incoming JSON object where
* 'issues' = `event.repository.open_issues_count` int
* 'commit' = `event.deployment.sha` string
* 'task' = `event.deployment.task` string
-* 'environment' = `event.deployment.evnironment` string
+* 'environment' = `event.deployment.environment` string
* 'description' = `event.deployment.description` string
#### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent)
@@ -96,7 +96,7 @@ The tag values and field values show the place on the incoming JSON object where
* 'issues' = `event.repository.open_issues_count` int
* 'commit' = `event.deployment.sha` string
* 'task' = `event.deployment.task` string
-* 'environment' = `event.deployment.evnironment` string
+* 'environment' = `event.deployment.environment` string
* 'description' = `event.deployment.description` string
* 'depState' = `event.deployment_status.state` string
* 'depDescription' = `event.deployment_status.description` string
diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md
index 688898db00d1f..8244fee8add1b 100644
--- a/plugins/inputs/webhooks/particle/README.md
+++ b/plugins/inputs/webhooks/particle/README.md
@@ -3,7 +3,7 @@
You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to [https://console.particle.io](https://console.particle.io/) and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add:
-```
+```json
{
"measurement": "your_measurement_name"
}
@@ -31,7 +31,7 @@ String data = String::format("{ \"tags\" : {
```
Escaping the "" is required in the source file.
-The number of tag values and field values is not restrictied so you can send as many values per webhook call as you'd like.
+The number of tag values and field values is not restricted so you can send as many values per webhook call as you'd like.
You will need to enable JSON messages in the Webhooks setup of Particle.io, and make sure to check the "include default data" box as well.
diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md
index 11496baff1c7a..1bb4bcb34a7ff 100644
--- a/plugins/inputs/win_perf_counters/README.md
+++ b/plugins/inputs/win_perf_counters/README.md
@@ -1,4 +1,4 @@
-# win_perf_counters readme
+# Windows Performance Counters Input Plugin
This document presents the input plugin to read Performance Counters on Windows operating systems.
@@ -173,7 +173,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
## Examples
### Generic Queries
-```
+```toml
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
# Processor usage, alternative to native, reports on a per core.
@@ -217,7 +217,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
```
### Active Directory Domain Controller
-```
+```toml
[[inputs.win_perf_counters]]
[inputs.win_perf_counters.tags]
monitorgroup = "ActiveDirectory"
@@ -245,7 +245,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
```
### DFS Namespace + Domain Controllers
-```
+```toml
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
# AD, DFS N, Useful if the server hosts a DFS Namespace or is a Domain Controller
@@ -258,7 +258,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
```
### DFS Replication + Domain Controllers
-```
+```toml
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
# AD, DFS R, Useful if the server hosts a DFS Replication folder or is a Domain Controller
@@ -271,7 +271,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
```
### DNS Server + Domain Controllers
-```
+```toml
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
ObjectName = "DNS"
@@ -282,7 +282,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
```
### IIS / ASP.NET
-```
+```toml
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
# HTTP Service request queues in the Kernel before being handed over to User Mode.
@@ -326,7 +326,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
```
### Process
-```
+```toml
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
# Process metrics, in this case for IIS only
@@ -338,7 +338,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
```
### .NET Monitoring
-```
+```toml
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
# .NET CLR Exceptions, in this case for IIS only
diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go
index 6a8dff10b1b88..3a24761b9d593 100644
--- a/plugins/inputs/win_perf_counters/pdh.go
+++ b/plugins/inputs/win_perf_counters/pdh.go
@@ -214,7 +214,7 @@ func init() {
//
// To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility,
// the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a
-// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an
+// full implementation of the pdh.dll API, except with a GUI and all that. The registry setting also provides an
// interface to the available counters, and can be found at the following key:
//
// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage
diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go
index ce247a495a7ea..a59f96b84dc43 100644
--- a/plugins/inputs/win_perf_counters/performance_query.go
+++ b/plugins/inputs/win_perf_counters/performance_query.go
@@ -74,7 +74,7 @@ func (m *PerformanceQueryImpl) Open() error {
// Close closes the counterPath, releases associated counter handles and frees resources
func (m *PerformanceQueryImpl) Close() error {
if m.query == 0 {
- return errors.New("uninitialised query")
+ return errors.New("uninitialized query")
}
if ret := PdhCloseQuery(m.query); ret != ERROR_SUCCESS {
@@ -87,7 +87,7 @@ func (m *PerformanceQueryImpl) Close() error {
func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
var counterHandle PDH_HCOUNTER
if m.query == 0 {
- return 0, errors.New("uninitialised query")
+ return 0, errors.New("uninitialized query")
}
if ret := PdhAddCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS {
@@ -99,7 +99,7 @@ func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNT
func (m *PerformanceQueryImpl) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
var counterHandle PDH_HCOUNTER
if m.query == 0 {
- return 0, errors.New("uninitialised query")
+ return 0, errors.New("uninitialized query")
}
if ret := PdhAddEnglishCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS {
return 0, NewPdhError(ret)
@@ -184,7 +184,7 @@ func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN
func (m *PerformanceQueryImpl) CollectData() error {
var ret uint32
if m.query == 0 {
- return errors.New("uninitialised query")
+ return errors.New("uninitialized query")
}
if ret = PdhCollectQueryData(m.query); ret != ERROR_SUCCESS {
@@ -195,7 +195,7 @@ func (m *PerformanceQueryImpl) CollectData() error {
func (m *PerformanceQueryImpl) CollectDataWithTime() (time.Time, error) {
if m.query == 0 {
- return time.Now(), errors.New("uninitialised query")
+ return time.Now(), errors.New("uninitialized query")
}
ret, mtime := PdhCollectQueryDataWithTime(m.query)
if ret != ERROR_SUCCESS {
diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go
index 2bf50e5cc4151..bd130a3fd79e9 100644
--- a/plugins/inputs/win_perf_counters/win_perf_counters.go
+++ b/plugins/inputs/win_perf_counters/win_perf_counters.go
@@ -5,7 +5,6 @@ package win_perf_counters
import (
"errors"
"fmt"
- "log"
"strings"
"time"
@@ -36,9 +35,12 @@ var sampleConfig = `
ObjectName = "Processor"
Instances = ["*"]
Counters = [
- "%% Idle Time", "%% Interrupt Time",
- "%% Privileged Time", "%% User Time",
- "%% Processor Time"
+ "% Idle Time",
+ "% Interrupt Time",
+ "% Privileged Time",
+ "% User Time",
+ "% Processor Time",
+ "% DPC Time",
]
Measurement = "win_cpu"
# Set to true to include _Total instance when querying for all (*).
@@ -51,14 +53,56 @@ var sampleConfig = `
ObjectName = "LogicalDisk"
Instances = ["*"]
Counters = [
- "%% Idle Time", "%% Disk Time","%% Disk Read Time",
- "%% Disk Write Time", "%% User Time", "Current Disk Queue Length"
+ "% Idle Time",
+ "% Disk Time",
+ "% Disk Read Time",
+ "% Disk Write Time",
+ "% User Time",
+ "% Free Space",
+ "Current Disk Queue Length",
+ "Free Megabytes",
]
Measurement = "win_disk"
+ [[inputs.win_perf_counters.object]]
+ ObjectName = "PhysicalDisk"
+ Instances = ["*"]
+ Counters = [
+ "Disk Read Bytes/sec",
+ "Disk Write Bytes/sec",
+ "Current Disk Queue Length",
+ "Disk Reads/sec",
+ "Disk Writes/sec",
+ "% Disk Time",
+ "% Disk Read Time",
+ "% Disk Write Time",
+ ]
+ Measurement = "win_diskio"
+
+ [[inputs.win_perf_counters.object]]
+ ObjectName = "Network Interface"
+ Instances = ["*"]
+ Counters = [
+ "Bytes Received/sec",
+ "Bytes Sent/sec",
+ "Packets Received/sec",
+ "Packets Sent/sec",
+ "Packets Received Discarded",
+ "Packets Outbound Discarded",
+ "Packets Received Errors",
+ "Packets Outbound Errors",
+ ]
+ Measurement = "win_net"
+
+
[[inputs.win_perf_counters.object]]
ObjectName = "System"
- Counters = ["Context Switches/sec","System Calls/sec"]
+ Counters = [
+ "Context Switches/sec",
+ "System Calls/sec",
+ "Processor Queue Length",
+ "System Up Time",
+ ]
Instances = ["------"]
Measurement = "win_system"
@@ -67,12 +111,30 @@ var sampleConfig = `
# such as from the Memory object.
ObjectName = "Memory"
Counters = [
- "Available Bytes", "Cache Faults/sec", "Demand Zero Faults/sec",
- "Page Faults/sec", "Pages/sec", "Transition Faults/sec",
- "Pool Nonpaged Bytes", "Pool Paged Bytes"
+ "Available Bytes",
+ "Cache Faults/sec",
+ "Demand Zero Faults/sec",
+ "Page Faults/sec",
+ "Pages/sec",
+ "Transition Faults/sec",
+ "Pool Nonpaged Bytes",
+ "Pool Paged Bytes",
+ "Standby Cache Reserve Bytes",
+ "Standby Cache Normal Priority Bytes",
+ "Standby Cache Core Bytes",
]
Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath.
Measurement = "win_mem"
+
+ [[inputs.win_perf_counters.object]]
+ # Example query where the Instance portion must be removed to get data back,
+ # such as from the Paging File object.
+ ObjectName = "Paging File"
+ Counters = [
+ "% Usage",
+ ]
+ Instances = ["_Total"]
+ Measurement = "win_swap"
`
type Win_PerfCounters struct {
@@ -84,6 +146,8 @@ type Win_PerfCounters struct {
CountersRefreshInterval internal.Duration
UseWildcardsExpansion bool
+ Log telegraf.Logger
+
lastRefreshed time.Time
counters []*counter
query PerformanceQuery
@@ -226,7 +290,7 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan
m.counters = append(m.counters, newItem)
if m.PrintValid {
- log.Printf("Valid: %s\n", counterPath)
+ m.Log.Infof("Valid: %s", counterPath)
}
}
} else {
@@ -234,7 +298,7 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan
includeTotal, counterHandle}
m.counters = append(m.counters, newItem)
if m.PrintValid {
- log.Printf("Valid: %s\n", counterPath)
+ m.Log.Infof("Valid: %s", counterPath)
}
}
@@ -260,7 +324,7 @@ func (m *Win_PerfCounters) ParseConfig() error {
if err != nil {
if PerfObject.FailOnMissing || PerfObject.WarnOnMissing {
- log.Printf("Invalid counterPath: '%s'. Error: %s\n", counterPath, err.Error())
+ m.Log.Errorf("Invalid counterPath: '%s'. Error: %s\n", counterPath, err.Error())
}
if PerfObject.FailOnMissing {
return err
diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go
index 546dfa1432fd9..78917c2f2261f 100644
--- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go
+++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go
@@ -4,6 +4,7 @@ package win_perf_counters
import (
"errors"
+ "fmt"
"testing"
"time"
@@ -27,15 +28,15 @@ func TestWinPerformanceQueryImpl(t *testing.T) {
_, err = query.AddCounterToQuery("")
require.Error(t, err, "uninitialized query must return errors")
- assert.True(t, strings.Contains(err.Error(), "uninitialised"))
+ assert.True(t, strings.Contains(err.Error(), "uninitialized"))
_, err = query.AddEnglishCounterToQuery("")
require.Error(t, err, "uninitialized query must return errors")
- assert.True(t, strings.Contains(err.Error(), "uninitialised"))
+ assert.True(t, strings.Contains(err.Error(), "uninitialized"))
err = query.CollectData()
require.Error(t, err, "uninitialized query must return errors")
- assert.True(t, strings.Contains(err.Error(), "uninitialised"))
+ assert.True(t, strings.Contains(err.Error(), "uninitialized"))
err = query.Open()
require.NoError(t, err)
@@ -185,11 +186,11 @@ func TestWinPerfcountersConfigGet2(t *testing.T) {
if len(m.counters) == 1 {
require.NoError(t, nil)
} else if len(m.counters) == 0 {
- var errorstring1 = "No results returned from the counterPath: " + string(len(m.counters))
+ var errorstring1 = "No results returned from the counterPath"
err2 := errors.New(errorstring1)
require.NoError(t, err2)
} else if len(m.counters) > 1 {
- var errorstring1 = "Too many results returned from the counterPath: " + string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too many results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
}
@@ -233,12 +234,12 @@ func TestWinPerfcountersConfigGet3(t *testing.T) {
require.NoError(t, nil)
} else if len(m.counters) < 2 {
- var errorstring1 = "Too few results returned from the counterPath. " + string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too few results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
} else if len(m.counters) > 2 {
- var errorstring1 = "Too many results returned from the counterPath: " + string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too many results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
}
@@ -282,12 +283,12 @@ func TestWinPerfcountersConfigGet4(t *testing.T) {
require.NoError(t, nil)
} else if len(m.counters) < 2 {
- var errorstring1 = "Too few results returned from the counterPath: " + string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too few results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
} else if len(m.counters) > 2 {
- var errorstring1 = "Too many results returned from the counterPath: " + string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too many results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
}
@@ -331,13 +332,11 @@ func TestWinPerfcountersConfigGet5(t *testing.T) {
if len(m.counters) == 4 {
require.NoError(t, nil)
} else if len(m.counters) < 4 {
- var errorstring1 = "Too few results returned from the counterPath: " +
- string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too few results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
} else if len(m.counters) > 4 {
- var errorstring1 = "Too many results returned from the counterPath: " +
- string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too many results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
}
@@ -415,13 +414,11 @@ func TestWinPerfcountersConfigGet7(t *testing.T) {
if len(m.counters) == 2 {
require.NoError(t, nil)
} else if len(m.counters) < 2 {
- var errorstring1 = "Too few results returned from the counterPath: " +
- string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too few results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
} else if len(m.counters) > 2 {
- var errorstring1 = "Too many results returned from the counterPath: " +
- string(len(m.counters))
+ var errorstring1 = fmt.Sprintf("Too many results returned from the counterPath: %v", len(m.counters))
err2 := errors.New(errorstring1)
require.NoError(t, err2)
}
diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go
index 5052fb7a263a5..a11f0ace8da3a 100644
--- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go
+++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go
@@ -50,7 +50,7 @@ func (m *FakePerformanceQuery) Open() error {
func (m *FakePerformanceQuery) Close() error {
if !m.openCalled {
- return errors.New("CloSe: uninitialised query")
+ return errors.New("CloSe: uninitialized query")
}
m.openCalled = false
return nil
@@ -58,7 +58,7 @@ func (m *FakePerformanceQuery) Close() error {
func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
if !m.openCalled {
- return 0, errors.New("AddCounterToQuery: uninitialised query")
+ return 0, errors.New("AddCounterToQuery: uninitialized query")
}
if c, ok := m.counters[counterPath]; ok {
return c.handle, nil
@@ -69,7 +69,7 @@ func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNT
func (m *FakePerformanceQuery) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
if !m.openCalled {
- return 0, errors.New("AddEnglishCounterToQuery: uninitialised query")
+ return 0, errors.New("AddEnglishCounterToQuery: uninitialized query")
}
if c, ok := m.counters[counterPath]; ok {
return c.handle, nil
@@ -97,7 +97,7 @@ func (m *FakePerformanceQuery) ExpandWildCardPath(counterPath string) ([]string,
func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_HCOUNTER) (float64, error) {
if !m.openCalled {
- return 0, errors.New("GetFormattedCounterValueDouble: uninitialised query")
+ return 0, errors.New("GetFormattedCounterValueDouble: uninitialized query")
}
for _, counter := range m.counters {
if counter.handle == counterHandle {
@@ -129,7 +129,7 @@ func (m *FakePerformanceQuery) findCounterByHandle(counterHandle PDH_HCOUNTER) *
func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) {
if !m.openCalled {
- return nil, errors.New("GetFormattedCounterArrayDouble: uninitialised query")
+ return nil, errors.New("GetFormattedCounterArrayDouble: uninitialized query")
}
for _, c := range m.counters {
if c.handle == hCounter {
@@ -157,14 +157,14 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN
func (m *FakePerformanceQuery) CollectData() error {
if !m.openCalled {
- return errors.New("CollectData: uninitialised query")
+ return errors.New("CollectData: uninitialized query")
}
return nil
}
func (m *FakePerformanceQuery) CollectDataWithTime() (time.Time, error) {
if !m.openCalled {
- return time.Now(), errors.New("CollectData: uninitialised query")
+ return time.Now(), errors.New("CollectData: uninitialized query")
}
return MetricTime, nil
}
@@ -247,13 +247,17 @@ func TestCounterPathParsing(t *testing.T) {
func TestAddItemSimple(t *testing.T) {
var err error
cps1 := []string{"\\O(I)\\C"}
- m := Win_PerfCounters{PrintValid: false, Object: nil, query: &FakePerformanceQuery{
- counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}),
- expandPaths: map[string][]string{
- cps1[0]: cps1,
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: nil,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}),
+ expandPaths: map[string][]string{
+ cps1[0]: cps1,
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.AddItem(cps1[0], "O", "I", "c", "test", false)
@@ -265,13 +269,18 @@ func TestAddItemSimple(t *testing.T) {
func TestAddItemInvalidCountPath(t *testing.T) {
var err error
cps1 := []string{"\\O\\C"}
- m := Win_PerfCounters{PrintValid: false, Object: nil, UseWildcardsExpansion: true, query: &FakePerformanceQuery{
- counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}),
- expandPaths: map[string][]string{
- cps1[0]: {"\\O/C"},
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: nil,
+ UseWildcardsExpansion: true,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}),
+ expandPaths: map[string][]string{
+ cps1[0]: {"\\O/C"},
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.AddItem("\\O\\C", "O", "------", "C", "test", false)
@@ -284,16 +293,20 @@ func TestParseConfigBasic(t *testing.T) {
var err error
perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false)
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"}
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0}),
- expandPaths: map[string][]string{
- cps1[0]: {cps1[0]},
- cps1[1]: {cps1[1]},
- cps1[2]: {cps1[2]},
- cps1[3]: {cps1[3]},
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0}),
+ expandPaths: map[string][]string{
+ cps1[0]: {cps1[0]},
+ cps1[1]: {cps1[1]},
+ cps1[2]: {cps1[2]},
+ cps1[3]: {cps1[3]},
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.ParseConfig()
@@ -318,14 +331,19 @@ func TestParseConfigNoInstance(t *testing.T) {
var err error
perfObjects := createPerfObject("m", "O", []string{"------"}, []string{"C1", "C2"}, false, false)
cps1 := []string{"\\O\\C1", "\\O\\C2"}
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: &FakePerformanceQuery{
- counters: createCounterMap(cps1, []float64{1.1, 1.2}, []uint32{0, 0}),
- expandPaths: map[string][]string{
- cps1[0]: {cps1[0]},
- cps1[1]: {cps1[1]},
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ UseWildcardsExpansion: false,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(cps1, []float64{1.1, 1.2}, []uint32{0, 0}),
+ expandPaths: map[string][]string{
+ cps1[0]: {cps1[0]},
+ cps1[1]: {cps1[1]},
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.ParseConfig()
@@ -350,15 +368,19 @@ func TestParseConfigInvalidCounterError(t *testing.T) {
var err error
perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, true, false)
cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"}
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}),
- expandPaths: map[string][]string{
- cps1[0]: {cps1[0]},
- cps1[1]: {cps1[1]},
- cps1[2]: {cps1[2]},
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}),
+ expandPaths: map[string][]string{
+ cps1[0]: {cps1[0]},
+ cps1[1]: {cps1[1]},
+ cps1[2]: {cps1[2]},
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.ParseConfig()
@@ -381,15 +403,19 @@ func TestParseConfigInvalidCounterNoError(t *testing.T) {
var err error
perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false)
cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"}
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}),
- expandPaths: map[string][]string{
- cps1[0]: {cps1[0]},
- cps1[1]: {cps1[1]},
- cps1[2]: {cps1[2]},
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}),
+ expandPaths: map[string][]string{
+ cps1[0]: {cps1[0]},
+ cps1[1]: {cps1[1]},
+ cps1[2]: {cps1[2]},
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.ParseConfig()
@@ -413,13 +439,18 @@ func TestParseConfigTotalExpansion(t *testing.T) {
var err error
perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, true, true)
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"}
- m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}),
- expandPaths: map[string][]string{
- "\\O(*)\\*": cps1,
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ UseWildcardsExpansion: true,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}),
+ expandPaths: map[string][]string{
+ "\\O(*)\\*": cps1,
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.ParseConfig()
@@ -430,13 +461,18 @@ func TestParseConfigTotalExpansion(t *testing.T) {
perfObjects[0].IncludeTotal = false
- m = Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}),
- expandPaths: map[string][]string{
- "\\O(*)\\*": cps1,
- },
- vistaAndNewer: true,
- }}
+ m = Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ UseWildcardsExpansion: true,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}),
+ expandPaths: map[string][]string{
+ "\\O(*)\\*": cps1,
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.ParseConfig()
@@ -450,13 +486,18 @@ func TestParseConfigExpand(t *testing.T) {
var err error
perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, false, false)
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"}
- m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}),
- expandPaths: map[string][]string{
- "\\O(*)\\*": cps1,
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ UseWildcardsExpansion: true,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}),
+ expandPaths: map[string][]string{
+ "\\O(*)\\*": cps1,
+ },
+ vistaAndNewer: true,
+ }}
err = m.query.Open()
require.NoError(t, err)
err = m.ParseConfig()
@@ -474,13 +515,17 @@ func TestSimpleGather(t *testing.T) {
measurement := "test"
perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false)
cp1 := "\\O(I)\\C"
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}),
- expandPaths: map[string][]string{
- cp1: {cp1},
- },
- vistaAndNewer: false,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}),
+ expandPaths: map[string][]string{
+ cp1: {cp1},
+ },
+ vistaAndNewer: false,
+ }}
var acc1 testutil.Accumulator
err = m.Gather(&acc1)
require.NoError(t, err)
@@ -513,13 +558,17 @@ func TestSimpleGatherNoData(t *testing.T) {
measurement := "test"
perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false)
cp1 := "\\O(I)\\C"
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{PDH_NO_DATA}),
- expandPaths: map[string][]string{
- cp1: {cp1},
- },
- vistaAndNewer: false,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{PDH_NO_DATA}),
+ expandPaths: map[string][]string{
+ cp1: {cp1},
+ },
+ vistaAndNewer: false,
+ }}
var acc1 testutil.Accumulator
err = m.Gather(&acc1)
// this "PDH_NO_DATA" error should not be returned to caller, but checked, and handled
@@ -555,13 +604,18 @@ func TestSimpleGatherWithTimestamp(t *testing.T) {
measurement := "test"
perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false)
cp1 := "\\O(I)\\C"
- m := Win_PerfCounters{PrintValid: false, UsePerfCounterTime: true, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}),
- expandPaths: map[string][]string{
- cp1: {cp1},
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ UsePerfCounterTime: true,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}),
+ expandPaths: map[string][]string{
+ cp1: {cp1},
+ },
+ vistaAndNewer: true,
+ }}
var acc1 testutil.Accumulator
err = m.Gather(&acc1)
require.NoError(t, err)
@@ -586,13 +640,17 @@ func TestGatherError(t *testing.T) {
measurement := "test"
perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false)
cp1 := "\\O(I)\\C"
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap([]string{cp1}, []float64{-2}, []uint32{PDH_PLA_VALIDATION_WARNING}),
- expandPaths: map[string][]string{
- cp1: {cp1},
- },
- vistaAndNewer: false,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap([]string{cp1}, []float64{-2}, []uint32{PDH_PLA_VALIDATION_WARNING}),
+ expandPaths: map[string][]string{
+ cp1: {cp1},
+ },
+ vistaAndNewer: false,
+ }}
var acc1 testutil.Accumulator
err = m.Gather(&acc1)
require.Error(t, err)
@@ -617,15 +675,19 @@ func TestGatherInvalidDataIgnore(t *testing.T) {
measurement := "test"
perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C1", "C2", "C3"}, false, false)
cps1 := []string{"\\O(I)\\C1", "\\O(I)\\C2", "\\O(I)\\C3"}
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap(cps1, []float64{1.2, 1, 0}, []uint32{0, PDH_INVALID_DATA, 0}),
- expandPaths: map[string][]string{
- cps1[0]: {cps1[0]},
- cps1[1]: {cps1[1]},
- cps1[2]: {cps1[2]},
- },
- vistaAndNewer: false,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(cps1, []float64{1.2, 1, 0}, []uint32{0, PDH_INVALID_DATA, 0}),
+ expandPaths: map[string][]string{
+ cps1[0]: {cps1[0]},
+ cps1[1]: {cps1[1]},
+ cps1[2]: {cps1[2]},
+ },
+ vistaAndNewer: false,
+ }}
var acc1 testutil.Accumulator
err = m.Gather(&acc1)
require.NoError(t, err)
@@ -666,7 +728,14 @@ func TestGatherRefreshingWithExpansion(t *testing.T) {
},
vistaAndNewer: true,
}
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: true, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ UseWildcardsExpansion: true,
+ query: fpm,
+ CountersRefreshInterval: internal.Duration{Duration: time.Second * 10},
+ }
var acc1 testutil.Accumulator
err = m.Gather(&acc1)
assert.Len(t, m.counters, 4)
@@ -752,7 +821,13 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) {
},
vistaAndNewer: true,
}
- m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ Object: perfObjects,
+ UseWildcardsExpansion: false,
+ query: fpm,
+ CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}}
var acc1 testutil.Accumulator
err = m.Gather(&acc1)
assert.Len(t, m.counters, 2)
@@ -862,14 +937,19 @@ func TestGatherTotalNoExpansion(t *testing.T) {
measurement := "m"
perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, true)
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"}
- m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: false, Object: perfObjects, query: &FakePerformanceQuery{
- counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}),
- expandPaths: map[string][]string{
- "\\O(*)\\C1": {cps1[0], cps1[2]},
- "\\O(*)\\C2": {cps1[1], cps1[3]},
- },
- vistaAndNewer: true,
- }}
+ m := Win_PerfCounters{
+ Log: testutil.Logger{},
+ PrintValid: false,
+ UseWildcardsExpansion: false,
+ Object: perfObjects,
+ query: &FakePerformanceQuery{
+ counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}),
+ expandPaths: map[string][]string{
+ "\\O(*)\\C1": {cps1[0], cps1[2]},
+ "\\O(*)\\C2": {cps1[1], cps1[3]},
+ },
+ vistaAndNewer: true,
+ }}
var acc1 testutil.Accumulator
err = m.Gather(&acc1)
require.NoError(t, err)
diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go
index 1befc4a601602..6ac1bde68ca20 100644
--- a/plugins/inputs/win_services/win_services.go
+++ b/plugins/inputs/win_services/win_services.go
@@ -4,7 +4,6 @@ package win_services
import (
"fmt"
- "log"
"os"
"github.com/influxdata/telegraf"
@@ -90,6 +89,8 @@ var description = "Input plugin to report Windows services info."
//WinServices is an implementation if telegraf.Input interface, providing info about Windows Services
type WinServices struct {
+ Log telegraf.Logger
+
ServiceNames []string `toml:"service_names"`
mgrProvider ManagerProvider
}
@@ -125,9 +126,9 @@ func (m *WinServices) Gather(acc telegraf.Accumulator) error {
service, err := collectServiceInfo(scmgr, srvName)
if err != nil {
if IsPermission(err) {
- log.Printf("D! Error in plugin [inputs.win_services]: %v", err)
+ m.Log.Debug(err.Error())
} else {
- acc.AddError(err)
+ m.Log.Error(err.Error())
}
continue
}
diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go
index a39df49c7371d..0c375c3dd2e65 100644
--- a/plugins/inputs/win_services/win_services_integration_test.go
+++ b/plugins/inputs/win_services/win_services_integration_test.go
@@ -47,7 +47,11 @@ func TestGatherErrors(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
- ws := &WinServices{InvalidServices, &MgProvider{}}
+ ws := &WinServices{
+ Log: testutil.Logger{},
+ ServiceNames: InvalidServices,
+ mgrProvider: &MgProvider{},
+ }
require.Len(t, ws.ServiceNames, 3, "Different number of services")
var acc testutil.Accumulator
require.NoError(t, ws.Gather(&acc))
diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go
index 37dc3f08c7a95..e33ab2ddce622 100644
--- a/plugins/inputs/win_services/win_services_test.go
+++ b/plugins/inputs/win_services/win_services_test.go
@@ -3,8 +3,10 @@
package win_services
import (
+ "bytes"
"errors"
"fmt"
+ "log"
"testing"
"github.com/influxdata/telegraf/testutil"
@@ -128,47 +130,51 @@ var testErrors = []testData{
func TestBasicInfo(t *testing.T) {
- winServices := &WinServices{nil, &FakeMgProvider{testErrors[0]}}
+ winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}}
assert.NotEmpty(t, winServices.SampleConfig())
assert.NotEmpty(t, winServices.Description())
}
func TestMgrErrors(t *testing.T) {
//mgr.connect error
- winServices := &WinServices{nil, &FakeMgProvider{testErrors[0]}}
+ winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}}
var acc1 testutil.Accumulator
err := winServices.Gather(&acc1)
require.Error(t, err)
assert.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error())
////mgr.listServices error
- winServices = &WinServices{nil, &FakeMgProvider{testErrors[1]}}
+ winServices = &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[1]}}
var acc2 testutil.Accumulator
err = winServices.Gather(&acc2)
require.Error(t, err)
assert.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error())
////mgr.listServices error 2
- winServices = &WinServices{[]string{"Fake service 1"}, &FakeMgProvider{testErrors[3]}}
+ winServices = &WinServices{testutil.Logger{}, []string{"Fake service 1"}, &FakeMgProvider{testErrors[3]}}
var acc3 testutil.Accumulator
- err = winServices.Gather(&acc3)
- require.NoError(t, err)
- assert.Len(t, acc3.Errors, 1)
+ buf := &bytes.Buffer{}
+ log.SetOutput(buf)
+ require.NoError(t, winServices.Gather(&acc3))
+
+ require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error())
}
func TestServiceErrors(t *testing.T) {
- winServices := &WinServices{nil, &FakeMgProvider{testErrors[2]}}
+ winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[2]}}
var acc1 testutil.Accumulator
+
+ buf := &bytes.Buffer{}
+ log.SetOutput(buf)
require.NoError(t, winServices.Gather(&acc1))
- assert.Len(t, acc1.Errors, 3)
+
//open service error
- assert.Contains(t, acc1.Errors[0].Error(), testErrors[2].services[0].serviceOpenError.Error())
+ require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error())
//query service error
- assert.Contains(t, acc1.Errors[1].Error(), testErrors[2].services[1].serviceQueryError.Error())
+ require.Contains(t, buf.String(), testErrors[2].services[1].serviceQueryError.Error())
//config service error
- assert.Contains(t, acc1.Errors[2].Error(), testErrors[2].services[2].serviceConfigError.Error())
-
+ require.Contains(t, buf.String(), testErrors[2].services[2].serviceConfigError.Error())
}
var testSimpleData = []testData{
@@ -179,7 +185,7 @@ var testSimpleData = []testData{
}
func TestGather2(t *testing.T) {
- winServices := &WinServices{nil, &FakeMgProvider{testSimpleData[0]}}
+ winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testSimpleData[0]}}
var acc1 testutil.Accumulator
require.NoError(t, winServices.Gather(&acc1))
assert.Len(t, acc1.Errors, 0, "There should be no errors after gather")
@@ -193,5 +199,4 @@ func TestGather2(t *testing.T) {
tags["display_name"] = s.displayName
acc1.AssertContainsTaggedFields(t, "win_services", fields, tags)
}
-
}
diff --git a/plugins/inputs/wireguard/README.md b/plugins/inputs/wireguard/README.md
new file mode 100644
index 0000000000000..57e16ba4942c9
--- /dev/null
+++ b/plugins/inputs/wireguard/README.md
@@ -0,0 +1,73 @@
+# Wireguard Input Plugin
+
+The Wireguard input plugin collects statistics on the local Wireguard server
+using the [`wgctrl`](https://github.com/WireGuard/wgctrl-go) library. It
+reports gauge metrics for Wireguard interface device(s) and its peers.
+
+### Configuration
+
+```toml
+# Collect Wireguard server interface and peer statistics
+[[inputs.wireguard]]
+ ## Optional list of Wireguard device/interface names to query.
+ ## If omitted, all Wireguard interfaces are queried.
+ # devices = ["wg0"]
+```
+
+### Metrics
+
+- `wireguard_device`
+ - tags:
+ - `name` (interface device name, e.g. `wg0`)
+ - `type` (Wireguard tunnel type, e.g. `linux_kernel` or `userspace`)
+ - fields:
+ - `listen_port` (int, UDP port on which the interface is listening)
+ - `firewall_mark` (int, device's current firewall mark)
+ - `peers` (int, number of peers associated with the device)
+
+- `wireguard_peer`
+ - tags:
+ - `device` (associated interface device name, e.g. `wg0`)
+ - `public_key` (peer public key, e.g. `NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=`)
+ - fields:
+ - `persistent_keepalive_interval_ns` (int, keepalive interval in nanoseconds; 0 if unset)
+ - `protocol_version` (int, Wireguard protocol version number)
+ - `allowed_ips` (int, number of allowed IPs for this peer)
+ - `last_handshake_time_ns` (int, Unix timestamp of the last handshake for this peer in nanoseconds)
+ - `rx_bytes` (int, number of bytes received from this peer)
+ - `tx_bytes` (int, number of bytes transmitted to this peer)
+
+### Troubleshooting
+
+#### Error: `operation not permitted`
+
+When the kernelspace implementation of Wireguard is in use (as opposed to its
+userspace implementations), Telegraf communicates with the module over netlink.
+This requires Telegraf to either run as root, or for the Telegraf binary to
+have the `CAP_NET_ADMIN` capability.
+
+To add this capability to the Telegraf binary (to allow this communication under
+the default user `telegraf`):
+
+```bash
+$ sudo setcap CAP_NET_ADMIN+epi $(which telegraf)
+```
+
+N.B.: This capability is a filesystem attribute on the binary itself. The
+attribute needs to be re-applied if the Telegraf binary is rotated (e.g.
+on installation of new a Telegraf version from the system package manager).
+
+#### Error: `error enumerating Wireguard devices`
+
+This usually happens when the device names specified in config are invalid.
+Ensure that `sudo wg show` succeeds, and that the device names in config match
+those printed by this command.
+
+### Example Output
+
+```
+wireguard_device,host=WGVPN,name=wg0,type=linux_kernel firewall_mark=51820i,listen_port=58216i 1582513589000000000
+wireguard_device,host=WGVPN,name=wg0,type=linux_kernel peers=1i 1582513589000000000
+wireguard_peer,device=wg0,host=WGVPN,public_key=NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE= allowed_ips=2i,persistent_keepalive_interval_ns=60000000000i,protocol_version=1i 1582513589000000000
+wireguard_peer,device=wg0,host=WGVPN,public_key=NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE= last_handshake_time_ns=1582513584530013376i,rx_bytes=6484i,tx_bytes=13540i 1582513589000000000
+```
diff --git a/plugins/inputs/wireguard/wireguard.go b/plugins/inputs/wireguard/wireguard.go
new file mode 100644
index 0000000000000..ded3328378230
--- /dev/null
+++ b/plugins/inputs/wireguard/wireguard.go
@@ -0,0 +1,139 @@
+package wireguard
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "golang.zx2c4.com/wireguard/wgctrl"
+ "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
+)
+
+const (
+ measurementDevice = "wireguard_device"
+ measurementPeer = "wireguard_peer"
+)
+
+var (
+ deviceTypeNames = map[wgtypes.DeviceType]string{
+ wgtypes.Unknown: "unknown",
+ wgtypes.LinuxKernel: "linux_kernel",
+ wgtypes.Userspace: "userspace",
+ }
+)
+
+// Wireguard is an input that enumerates all Wireguard interfaces/devices on
+// the host, and reports gauge metrics for the device itself and its peers.
+type Wireguard struct {
+ Devices []string `toml:"devices"`
+
+ client *wgctrl.Client
+}
+
+func (wg *Wireguard) Description() string {
+ return "Collect Wireguard server interface and peer statistics"
+}
+
+func (wg *Wireguard) SampleConfig() string {
+ return `
+ ## Optional list of Wireguard device/interface names to query.
+ ## If omitted, all Wireguard interfaces are queried.
+ # devices = ["wg0"]
+`
+}
+
+func (wg *Wireguard) Init() error {
+ var err error
+
+ wg.client, err = wgctrl.New()
+
+ return err
+}
+
+func (wg *Wireguard) Gather(acc telegraf.Accumulator) error {
+ devices, err := wg.enumerateDevices()
+ if err != nil {
+ return fmt.Errorf("error enumerating Wireguard devices: %v", err)
+ }
+
+ for _, device := range devices {
+ wg.gatherDeviceMetrics(acc, device)
+
+ for _, peer := range device.Peers {
+ wg.gatherDevicePeerMetrics(acc, device, peer)
+ }
+ }
+
+ return nil
+}
+
+func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) {
+ var devices []*wgtypes.Device
+
+ // If no device names are specified, defer to the library to enumerate
+ // all of them
+ if len(wg.Devices) == 0 {
+ return wg.client.Devices()
+ }
+
+ // Otherwise, explicitly populate only device names specified in config
+ for _, name := range wg.Devices {
+ dev, err := wg.client.Device(name)
+ if err != nil {
+ log.Printf("W! [inputs.wireguard] No Wireguard device found with name %s", name)
+ continue
+ }
+
+ devices = append(devices, dev)
+ }
+
+ return devices, nil
+}
+
+func (wg *Wireguard) gatherDeviceMetrics(acc telegraf.Accumulator, device *wgtypes.Device) {
+ fields := map[string]interface{}{
+ "listen_port": device.ListenPort,
+ "firewall_mark": device.FirewallMark,
+ }
+
+ gauges := map[string]interface{}{
+ "peers": len(device.Peers),
+ }
+
+ tags := map[string]string{
+ "name": device.Name,
+ "type": deviceTypeNames[device.Type],
+ }
+
+ acc.AddFields(measurementDevice, fields, tags)
+ acc.AddGauge(measurementDevice, gauges, tags)
+}
+
+func (wg *Wireguard) gatherDevicePeerMetrics(acc telegraf.Accumulator, device *wgtypes.Device, peer wgtypes.Peer) {
+ fields := map[string]interface{}{
+ "persistent_keepalive_interval_ns": peer.PersistentKeepaliveInterval.Nanoseconds(),
+ "protocol_version": peer.ProtocolVersion,
+ "allowed_ips": len(peer.AllowedIPs),
+ }
+
+ gauges := map[string]interface{}{
+ "last_handshake_time_ns": peer.LastHandshakeTime.UnixNano(),
+ "rx_bytes": peer.ReceiveBytes,
+ "tx_bytes": peer.TransmitBytes,
+ }
+
+ tags := map[string]string{
+ "device": device.Name,
+ "public_key": peer.PublicKey.String(),
+ }
+
+ acc.AddFields(measurementPeer, fields, tags)
+ acc.AddGauge(measurementPeer, gauges, tags)
+}
+
+func init() {
+ inputs.Add("wireguard", func() telegraf.Input {
+ return &Wireguard{}
+ })
+}
diff --git a/plugins/inputs/wireguard/wireguard_test.go b/plugins/inputs/wireguard/wireguard_test.go
new file mode 100644
index 0000000000000..0cfdba75df50c
--- /dev/null
+++ b/plugins/inputs/wireguard/wireguard_test.go
@@ -0,0 +1,84 @@
+package wireguard
+
+import (
+ "net"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+ "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
+)
+
+func TestWireguard_gatherDeviceMetrics(t *testing.T) {
+ var acc testutil.Accumulator
+
+ wg := &Wireguard{}
+ device := &wgtypes.Device{
+ Name: "wg0",
+ Type: wgtypes.LinuxKernel,
+ ListenPort: 1,
+ FirewallMark: 2,
+ Peers: []wgtypes.Peer{{}, {}},
+ }
+
+ expectFields := map[string]interface{}{
+ "listen_port": 1,
+ "firewall_mark": 2,
+ }
+ expectGauges := map[string]interface{}{
+ "peers": 2,
+ }
+ expectTags := map[string]string{
+ "name": "wg0",
+ "type": "linux_kernel",
+ }
+
+ wg.gatherDeviceMetrics(&acc, device)
+
+ assert.Equal(t, 3, acc.NFields())
+ acc.AssertDoesNotContainMeasurement(t, measurementPeer)
+ acc.AssertContainsTaggedFields(t, measurementDevice, expectFields, expectTags)
+ acc.AssertContainsTaggedFields(t, measurementDevice, expectGauges, expectTags)
+}
+
+func TestWireguard_gatherDevicePeerMetrics(t *testing.T) {
+ var acc testutil.Accumulator
+ pubkey, _ := wgtypes.ParseKey("NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=")
+
+ wg := &Wireguard{}
+ device := &wgtypes.Device{
+ Name: "wg0",
+ }
+ peer := wgtypes.Peer{
+ PublicKey: pubkey,
+ PersistentKeepaliveInterval: 1 * time.Minute,
+ LastHandshakeTime: time.Unix(100, 0),
+ ReceiveBytes: int64(40),
+ TransmitBytes: int64(60),
+ AllowedIPs: []net.IPNet{{}, {}},
+ ProtocolVersion: 0,
+ }
+
+ expectFields := map[string]interface{}{
+ "persistent_keepalive_interval_ns": int64(60000000000),
+ "protocol_version": 0,
+ "allowed_ips": 2,
+ }
+ expectGauges := map[string]interface{}{
+ "last_handshake_time_ns": int64(100000000000),
+ "rx_bytes": int64(40),
+ "tx_bytes": int64(60),
+ }
+ expectTags := map[string]string{
+ "device": "wg0",
+ "public_key": pubkey.String(),
+ }
+
+ wg.gatherDevicePeerMetrics(&acc, device, peer)
+
+ assert.Equal(t, 6, acc.NFields())
+ acc.AssertDoesNotContainMeasurement(t, measurementDevice)
+ acc.AssertContainsTaggedFields(t, measurementPeer, expectFields, expectTags)
+ acc.AssertContainsTaggedFields(t, measurementPeer, expectGauges, expectTags)
+}
diff --git a/plugins/inputs/wireless/wireless.go b/plugins/inputs/wireless/wireless.go
index eb488ef594693..911d7fb097d17 100644
--- a/plugins/inputs/wireless/wireless.go
+++ b/plugins/inputs/wireless/wireless.go
@@ -7,7 +7,8 @@ import (
// Wireless is used to store configuration values.
type Wireless struct {
- HostProc string `toml:"host_proc"`
+ HostProc string `toml:"host_proc"`
+ Log telegraf.Logger `toml:"-"`
}
var sampleConfig = `
diff --git a/plugins/inputs/wireless/wireless_nonlinux.go b/plugins/inputs/wireless/wireless_notlinux.go
similarity index 75%
rename from plugins/inputs/wireless/wireless_nonlinux.go
rename to plugins/inputs/wireless/wireless_notlinux.go
index 0fbe5eb062bb8..4769acc970e42 100644
--- a/plugins/inputs/wireless/wireless_nonlinux.go
+++ b/plugins/inputs/wireless/wireless_notlinux.go
@@ -3,19 +3,21 @@
package wireless
import (
- "log"
-
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
+func (w *Wireless) Init() error {
+ w.Log.Warn("Current platform is not supported")
+ return nil
+}
+
func (w *Wireless) Gather(acc telegraf.Accumulator) error {
return nil
}
func init() {
inputs.Add("wireless", func() telegraf.Input {
- log.Print("W! [inputs.wireless] Current platform is not supported")
return &Wireless{}
})
}
diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md
index a85d05463568e..760813ecc7adb 100644
--- a/plugins/inputs/x509_cert/README.md
+++ b/plugins/inputs/x509_cert/README.md
@@ -1,4 +1,4 @@
-# X509 Cert Input Plugin
+# x509 Certificate Input Plugin
This plugin provides information about X509 certificate accessible via local
file or network connection.
@@ -15,13 +15,14 @@ file or network connection.
## Timeout for SSL connection
# timeout = "5s"
+ ## Pass a different name into the TLS request (Server Name Indication)
+ ## example: server_name = "myhost.example.org"
+ # server_name = "myhost.example.org"
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
-
- ## Use TLS but skip chain & host verification
- # insecure_skip_verify = false
```
@@ -35,7 +36,16 @@ file or network connection.
- country
- province
- locality
+ - verification
+ - serial_number
+ - signature_algorithm
+ - public_key_algorithm
+ - issuer_common_name
+ - issuer_serial_number
+ - san
- fields:
+ - verification_code (int)
+ - verification_error (string)
- expiry (int, seconds)
- age (int, seconds)
- startdate (int, seconds)
@@ -45,6 +55,8 @@ file or network connection.
### Example output
```
-x509_cert,host=myhost,source=https://example.org age=1753627i,expiry=5503972i,startdate=1516092060i,enddate=1523349660i 1517845687000000000
-x509_cert,host=myhost,source=/etc/ssl/certs/ssl-cert-snakeoil.pem age=7522207i,expiry=308002732i,startdate=1510323480i,enddate=1825848420i 1517845687000000000
+x509_cert,common_name=ubuntu,source=/etc/ssl/certs/ssl-cert-snakeoil.pem,verification=valid age=7693222i,enddate=1871249033i,expiry=307666777i,startdate=1555889033i,verification_code=0i 1563582256000000000
+x509_cert,common_name=www.example.org,country=US,locality=Los\ Angeles,organization=Internet\ Corporation\ for\ Assigned\ Names\ and\ Numbers,organizational_unit=Technology,province=California,source=https://example.org:443,verification=invalid age=20219055i,enddate=1606910400i,expiry=43328144i,startdate=1543363200i,verification_code=1i,verification_error="x509: certificate signed by unknown authority" 1563582256000000000
+x509_cert,common_name=DigiCert\ SHA2\ Secure\ Server\ CA,country=US,organization=DigiCert\ Inc,source=https://example.org:443,verification=valid age=200838255i,enddate=1678276800i,expiry=114694544i,startdate=1362744000i,verification_code=0i 1563582256000000000
+x509_cert,common_name=DigiCert\ Global\ Root\ CA,country=US,organization=DigiCert\ Inc,organizational_unit=www.digicert.com,source=https://example.org:443,verification=valid age=400465455i,enddate=1952035200i,expiry=388452944i,startdate=1163116800i,verification_code=0i 1563582256000000000
```
diff --git a/plugins/inputs/x509_cert/dev/telegraf.conf b/plugins/inputs/x509_cert/dev/telegraf.conf
index 1eda94f02b325..7545997a4d394 100644
--- a/plugins/inputs/x509_cert/dev/telegraf.conf
+++ b/plugins/inputs/x509_cert/dev/telegraf.conf
@@ -1,5 +1,4 @@
[[inputs.x509_cert]]
- sources = ["https://www.influxdata.com:443"]
+ sources = ["https://expired.badssl.com:443", "https://wrong.host.badssl.com:443"]
[[outputs.file]]
- files = ["stdout"]
diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go
index 81bcb0d2c15f2..983926af16aeb 100644
--- a/plugins/inputs/x509_cert/x509_cert.go
+++ b/plugins/inputs/x509_cert/x509_cert.go
@@ -2,9 +2,9 @@
package x509_cert
import (
+ "bytes"
"crypto/tls"
"crypto/x509"
- "crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
@@ -15,7 +15,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- _tls "github.com/influxdata/telegraf/internal/tls"
+ _tls "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -26,20 +26,23 @@ const sampleConfig = `
## Timeout for SSL connection
# timeout = "5s"
+ ## Pass a different name into the TLS request (Server Name Indication)
+ ## example: server_name = "myhost.example.org"
+ # server_name = ""
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
-
- ## Use TLS but skip chain & host verification
- # insecure_skip_verify = false
`
const description = "Reads metrics from a SSL certificate"
// X509Cert holds the configuration of the plugin.
type X509Cert struct {
- Sources []string `toml:"sources"`
- Timeout internal.Duration `toml:"timeout"`
+ Sources []string `toml:"sources"`
+ Timeout internal.Duration `toml:"timeout"`
+ ServerName string `toml:"server_name"`
+ tlsCfg *tls.Config
_tls.ClientConfig
}
@@ -53,16 +56,20 @@ func (c *X509Cert) SampleConfig() string {
return sampleConfig
}
-func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Certificate, error) {
+func (c *X509Cert) locationToURL(location string) (*url.URL, error) {
if strings.HasPrefix(location, "/") {
location = "file://" + location
}
u, err := url.Parse(location)
if err != nil {
- return nil, fmt.Errorf("failed to parse cert location - %s\n", err.Error())
+ return nil, fmt.Errorf("failed to parse cert location - %s", err.Error())
}
+ return u, nil
+}
+
+func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certificate, error) {
switch u.Scheme {
case "https":
u.Scheme = "tcp"
@@ -70,22 +77,20 @@ func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Cert
case "udp", "udp4", "udp6":
fallthrough
case "tcp", "tcp4", "tcp6":
- tlsCfg, err := c.ClientConfig.TLSConfig()
- if err != nil {
- return nil, err
- }
-
ipConn, err := net.DialTimeout(u.Scheme, u.Host, timeout)
if err != nil {
return nil, err
}
defer ipConn.Close()
- if tlsCfg == nil {
- tlsCfg = &tls.Config{}
+ if c.ServerName == "" {
+ c.tlsCfg.ServerName = u.Hostname()
+ } else {
+ c.tlsCfg.ServerName = c.ServerName
}
- tlsCfg.ServerName = u.Hostname()
- conn := tls.Client(ipConn, tlsCfg)
+
+ c.tlsCfg.InsecureSkipVerify = true
+ conn := tls.Client(ipConn, c.tlsCfg)
defer conn.Close()
hsErr := conn.Handshake()
@@ -101,20 +106,28 @@ func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Cert
if err != nil {
return nil, err
}
-
- block, _ := pem.Decode(content)
- if block == nil {
- return nil, fmt.Errorf("failed to parse certificate PEM")
- }
-
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, err
+ var certs []*x509.Certificate
+ for {
+ block, rest := pem.Decode(bytes.TrimSpace(content))
+ if block == nil {
+ return nil, fmt.Errorf("failed to parse certificate PEM")
+ }
+
+ if block.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ certs = append(certs, cert)
+ }
+ if rest == nil || len(rest) == 0 {
+ break
+ }
+ content = rest
}
-
- return []*x509.Certificate{cert}, nil
+ return certs, nil
default:
- return nil, fmt.Errorf("unsuported scheme '%s' in location %s\n", u.Scheme, location)
+ return nil, fmt.Errorf("unsupported scheme '%s' in location %s", u.Scheme, u.String())
}
}
@@ -134,28 +147,43 @@ func getFields(cert *x509.Certificate, now time.Time) map[string]interface{} {
return fields
}
-func getTags(subject pkix.Name, location string) map[string]string {
+func getTags(cert *x509.Certificate, location string) map[string]string {
tags := map[string]string{
- "source": location,
- "common_name": subject.CommonName,
+ "source": location,
+ "common_name": cert.Subject.CommonName,
+ "serial_number": cert.SerialNumber.Text(16),
+ "signature_algorithm": cert.SignatureAlgorithm.String(),
+ "public_key_algorithm": cert.PublicKeyAlgorithm.String(),
}
- if len(subject.Organization) > 0 {
- tags["organization"] = subject.Organization[0]
+ if len(cert.Subject.Organization) > 0 {
+ tags["organization"] = cert.Subject.Organization[0]
}
- if len(subject.OrganizationalUnit) > 0 {
- tags["organizational_unit"] = subject.OrganizationalUnit[0]
+ if len(cert.Subject.OrganizationalUnit) > 0 {
+ tags["organizational_unit"] = cert.Subject.OrganizationalUnit[0]
}
- if len(subject.Country) > 0 {
- tags["country"] = subject.Country[0]
+ if len(cert.Subject.Country) > 0 {
+ tags["country"] = cert.Subject.Country[0]
}
- if len(subject.Province) > 0 {
- tags["province"] = subject.Province[0]
+ if len(cert.Subject.Province) > 0 {
+ tags["province"] = cert.Subject.Province[0]
}
- if len(subject.Locality) > 0 {
- tags["locality"] = subject.Locality[0]
+ if len(cert.Subject.Locality) > 0 {
+ tags["locality"] = cert.Subject.Locality[0]
}
+ tags["issuer_common_name"] = cert.Issuer.CommonName
+ tags["issuer_serial_number"] = cert.Issuer.SerialNumber
+
+ san := append(cert.DNSNames, cert.EmailAddresses...)
+ for _, ip := range cert.IPAddresses {
+ san = append(san, ip.String())
+ }
+ for _, uri := range cert.URIs {
+ san = append(san, uri.String())
+ }
+ tags["san"] = strings.Join(san, ",")
+
return tags
}
@@ -164,14 +192,52 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error {
now := time.Now()
for _, location := range c.Sources {
- certs, err := c.getCert(location, c.Timeout.Duration*time.Second)
+ u, err := c.locationToURL(location)
+ if err != nil {
+ acc.AddError(err)
+ return nil
+ }
+
+ certs, err := c.getCert(u, c.Timeout.Duration*time.Second)
if err != nil {
acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error()))
}
- for _, cert := range certs {
+ for i, cert := range certs {
fields := getFields(cert, now)
- tags := getTags(cert.Subject, location)
+ tags := getTags(cert, location)
+
+ // The first certificate is the leaf/end-entity certificate which needs DNS
+ // name validation against the URL hostname.
+ opts := x509.VerifyOptions{
+ Intermediates: x509.NewCertPool(),
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
+ }
+ if i == 0 {
+ if c.ServerName == "" {
+ opts.DNSName = u.Hostname()
+ } else {
+ opts.DNSName = c.ServerName
+ }
+ for j, cert := range certs {
+ if j != 0 {
+ opts.Intermediates.AddCert(cert)
+ }
+ }
+ }
+ if c.tlsCfg.RootCAs != nil {
+ opts.Roots = c.tlsCfg.RootCAs
+ }
+
+ _, err = cert.Verify(opts)
+ if err == nil {
+ tags["verification"] = "valid"
+ fields["verification_code"] = 0
+ } else {
+ tags["verification"] = "invalid"
+ fields["verification_code"] = 1
+ fields["verification_error"] = err.Error()
+ }
acc.AddFields("x509_cert", fields, tags)
}
@@ -180,6 +246,20 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error {
return nil
}
+func (c *X509Cert) Init() error {
+ tlsCfg, err := c.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+ if tlsCfg == nil {
+ tlsCfg = &tls.Config{}
+ }
+
+ c.tlsCfg = tlsCfg
+
+ return nil
+}
+
func init() {
inputs.Add("x509_cert", func() telegraf.Input {
return &X509Cert{
diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go
index 933676417cf80..c3452445739f1 100644
--- a/plugins/inputs/x509_cert/x509_cert_test.go
+++ b/plugins/inputs/x509_cert/x509_cert_test.go
@@ -5,6 +5,7 @@ import (
"encoding/base64"
"fmt"
"io/ioutil"
+ "math/big"
"os"
"testing"
"time"
@@ -110,6 +111,7 @@ func TestGatherRemote(t *testing.T) {
Sources: []string{test.server},
Timeout: internal.Duration{Duration: test.timeout},
}
+ sc.Init()
sc.InsecureSkipVerify = true
testErr := false
@@ -140,6 +142,16 @@ func TestGatherLocal(t *testing.T) {
{name: "not a certificate", mode: 0640, content: "test", error: true},
{name: "wrong certificate", mode: 0640, content: wrongCert, error: true},
{name: "correct certificate", mode: 0640, content: pki.ReadServerCert()},
+ {name: "correct client certificate", mode: 0640, content: pki.ReadClientCert()},
+ {name: "correct certificate and extra trailing space", mode: 0640, content: pki.ReadServerCert() + " "},
+ {name: "correct certificate and extra leading space", mode: 0640, content: " " + pki.ReadServerCert()},
+ {name: "correct multiple certificates", mode: 0640, content: pki.ReadServerCert() + pki.ReadCACert()},
+ {name: "correct multiple certificates and key", mode: 0640, content: pki.ReadServerCert() + pki.ReadCACert() + pki.ReadServerKey()},
+ {name: "correct certificate and wrong certificate", mode: 0640, content: pki.ReadServerCert() + "\n" + wrongCert, error: true},
+ {name: "correct certificate and not a certificate", mode: 0640, content: pki.ReadServerCert() + "\ntest", error: true},
+ {name: "correct multiple certificates and extra trailing space", mode: 0640, content: pki.ReadServerCert() + pki.ReadServerCert() + " "},
+ {name: "correct multiple certificates and extra leading space", mode: 0640, content: " " + pki.ReadServerCert() + pki.ReadServerCert()},
+ {name: "correct multiple certificates and extra middle space", mode: 0640, content: pki.ReadServerCert() + " " + pki.ReadServerCert()},
}
for _, test := range tests {
@@ -169,6 +181,7 @@ func TestGatherLocal(t *testing.T) {
sc := X509Cert{
Sources: []string{f.Name()},
}
+ sc.Init()
error := false
@@ -185,6 +198,61 @@ func TestGatherLocal(t *testing.T) {
}
}
+func TestTags(t *testing.T) {
+ cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert())
+
+ f, err := ioutil.TempFile("", "x509_cert")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = f.Write([]byte(cert))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = f.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer os.Remove(f.Name())
+
+ sc := X509Cert{
+ Sources: []string{f.Name()},
+ }
+ sc.Init()
+
+ acc := testutil.Accumulator{}
+ err = sc.Gather(&acc)
+ require.NoError(t, err)
+
+ assert.True(t, acc.HasMeasurement("x509_cert"))
+
+ assert.True(t, acc.HasTag("x509_cert", "common_name"))
+ assert.Equal(t, "server.localdomain", acc.TagValue("x509_cert", "common_name"))
+
+ assert.True(t, acc.HasTag("x509_cert", "signature_algorithm"))
+ assert.Equal(t, "SHA256-RSA", acc.TagValue("x509_cert", "signature_algorithm"))
+
+ assert.True(t, acc.HasTag("x509_cert", "public_key_algorithm"))
+ assert.Equal(t, "RSA", acc.TagValue("x509_cert", "public_key_algorithm"))
+
+ assert.True(t, acc.HasTag("x509_cert", "issuer_common_name"))
+ assert.Equal(t, "Telegraf Test CA", acc.TagValue("x509_cert", "issuer_common_name"))
+
+ assert.True(t, acc.HasTag("x509_cert", "san"))
+ assert.Equal(t, "localhost,127.0.0.1", acc.TagValue("x509_cert", "san"))
+
+ assert.True(t, acc.HasTag("x509_cert", "serial_number"))
+ serialNumber := new(big.Int)
+ _, validSerialNumber := serialNumber.SetString(acc.TagValue("x509_cert", "serial_number"), 16)
+ if !validSerialNumber {
+ t.Errorf("Expected a valid Hex serial number but got %s", acc.TagValue("x509_cert", "serial_number"))
+ }
+ assert.Equal(t, big.NewInt(1), serialNumber)
+}
+
func TestGatherChain(t *testing.T) {
cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert())
@@ -218,6 +286,7 @@ func TestGatherChain(t *testing.T) {
sc := X509Cert{
Sources: []string{f.Name()},
}
+ sc.Init()
error := false
@@ -237,6 +306,7 @@ func TestGatherChain(t *testing.T) {
func TestStrings(t *testing.T) {
sc := X509Cert{}
+ sc.Init()
tests := []struct {
name string
@@ -265,6 +335,7 @@ func TestGatherCert(t *testing.T) {
m := &X509Cert{
Sources: []string{"https://www.influxdata.com:443"},
}
+ m.Init()
var acc testutil.Accumulator
err := m.Gather(&acc)
diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md
index b60711e305e69..f0e71a47d714c 100644
--- a/plugins/inputs/zfs/README.md
+++ b/plugins/inputs/zfs/README.md
@@ -1,4 +1,4 @@
-# ZFS plugin
+# ZFS Input Plugin
This ZFS plugin provides metrics from your ZFS filesystems. It supports ZFS on
Linux and FreeBSD. It gets ZFS stat from `/proc/spl/kstat/zfs` on Linux and
@@ -268,7 +268,7 @@ A short description for some of the metrics.
`arcstats_evict_l2_ineligible` We evicted something which cannot be stored in the l2.
Reasons could be:
- - We have multiple pools, we evicted something from a pool whithout an l2 device.
+ - We have multiple pools, we evicted something from a pool without an l2 device.
- The zfs property secondary cache.
`arcstats_c` Arc target size, this is the size the system thinks the arc should have.
diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go
index dba135cfd0bc8..87f21f67245f4 100644
--- a/plugins/inputs/zfs/zfs_freebsd_test.go
+++ b/plugins/inputs/zfs/zfs_freebsd_test.go
@@ -155,7 +155,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
err = z.Gather(&acc)
require.NoError(t, err)
- //four pool, vdev_cache_stats and zfetchstatus metrics
+ //four pool, vdev_cache_stats and zfetchstats metrics
intMetrics = getKstatMetricsVdevAndZfetch()
acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)
diff --git a/plugins/inputs/zipkin/README.md b/plugins/inputs/zipkin/README.md
index b3ff82c1306f6..f07ca6e55afad 100644
--- a/plugins/inputs/zipkin/README.md
+++ b/plugins/inputs/zipkin/README.md
@@ -1,4 +1,4 @@
-# Zipkin Plugin
+# Zipkin Input Plugin
This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go
index ddc0d4918f498..ea25b49a0fcca 100644
--- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go
+++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go
@@ -55,7 +55,7 @@ func main() {
zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second))
defer collector.Close()
if err != nil {
- log.Fatalf("Error intializing zipkin http collector: %v\n", err)
+ log.Fatalf("Error initializing zipkin http collector: %v\n", err)
}
tracer, err := zipkin.NewTracer(
diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go
index 60bf1b51ae6e5..dde89570b8969 100644
--- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go
+++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go
@@ -5,7 +5,7 @@ vice versa.
To convert from json to thrift,
the json is unmarshalled, converted to zipkincore.Span structures, and
marshalled into thrift binary protocol. The json must be in an array format (even if it only has one object),
-because the tool automatically tries to unmarshall the json into an array of structs.
+because the tool automatically tries to unmarshal the json into an array of structs.
To convert from thrift to json,
the opposite process must happen. The thrift binary data must be read into an array of
diff --git a/plugins/inputs/zipkin/convert_test.go b/plugins/inputs/zipkin/convert_test.go
index 92c1ba3ff33db..23a951594da1a 100644
--- a/plugins/inputs/zipkin/convert_test.go
+++ b/plugins/inputs/zipkin/convert_test.go
@@ -121,6 +121,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851331000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -138,6 +139,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851331000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -152,6 +154,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360904552000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -169,6 +172,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360904552000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -183,6 +187,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -199,6 +204,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -215,6 +221,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -231,6 +238,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -248,6 +256,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
},
wantErr: false,
@@ -296,6 +305,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(1) * time.Nanosecond).Nanoseconds(),
},
Time: time.Unix(1, 0).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -312,6 +322,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
"duration_ns": (time.Duration(1) * time.Nanosecond).Nanoseconds(),
},
Time: time.Unix(1, 0).UTC(),
+ Type: telegraf.Untyped,
},
},
},
diff --git a/plugins/inputs/zipkin/zipkin.go b/plugins/inputs/zipkin/zipkin.go
index 18a63dccd4cb1..4224fea3d2928 100644
--- a/plugins/inputs/zipkin/zipkin.go
+++ b/plugins/inputs/zipkin/zipkin.go
@@ -3,7 +3,6 @@ package zipkin
import (
"context"
"fmt"
- "log"
"net"
"net/http"
"strconv"
@@ -60,6 +59,8 @@ type Zipkin struct {
Port int
Path string
+ Log telegraf.Logger
+
address string
handler Handler
server *http.Server
@@ -105,7 +106,7 @@ func (z *Zipkin) Start(acc telegraf.Accumulator) error {
}
z.address = ln.Addr().String()
- log.Printf("I! Started the zipkin listener on %s", z.address)
+ z.Log.Infof("Started the zipkin listener on %s", z.address)
go func() {
wg.Add(1)
diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go
index 2ac269db19693..77bef853b7e52 100644
--- a/plugins/inputs/zipkin/zipkin_test.go
+++ b/plugins/inputs/zipkin/zipkin_test.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/google/go-cmp/cmp"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
@@ -40,6 +41,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851331000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -57,6 +59,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851331000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -71,6 +74,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360904552000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -88,6 +92,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360904552000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -102,6 +107,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -118,6 +124,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -134,6 +141,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -150,6 +158,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -167,6 +176,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1498688360851318000).UTC(),
+ Type: telegraf.Untyped,
},
},
wantErr: false,
@@ -189,6 +199,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -205,6 +216,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -221,6 +233,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
},
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
},
},
@@ -240,7 +253,9 @@ func TestZipkinPlugin(t *testing.T) {
},
Fields: map[string]interface{}{
"duration_ns": int64(3000000),
- }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
+ },
+ Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -257,6 +272,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -273,6 +289,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -290,6 +307,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -307,6 +325,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -324,6 +343,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -338,6 +358,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -354,6 +375,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -370,6 +392,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -387,6 +410,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -404,6 +428,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -421,6 +446,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -438,6 +464,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -455,6 +482,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -469,6 +497,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -485,6 +514,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -501,6 +531,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -518,6 +549,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -535,6 +567,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
{
Measurement: "zipkin",
@@ -552,6 +585,7 @@ func TestZipkinPlugin(t *testing.T) {
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
+ Type: telegraf.Untyped,
},
},
},
@@ -562,6 +596,7 @@ func TestZipkinPlugin(t *testing.T) {
DefaultNetwork = "tcp4"
z := &Zipkin{
+ Log: testutil.Logger{},
Path: "/api/v1/spans",
Port: 0,
}
diff --git a/plugins/inputs/zookeeper/README.md b/plugins/inputs/zookeeper/README.md
index d54caae44471b..0ce7f442a4bba 100644
--- a/plugins/inputs/zookeeper/README.md
+++ b/plugins/inputs/zookeeper/README.md
@@ -1,7 +1,7 @@
-## Zookeeper Input Plugin
+# Zookeeper Input Plugin
The zookeeper plugin collects variables outputted from the 'mntr' command
-[Zookeeper Admin](https://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html).
+[Zookeeper Admin](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html).
### Configuration
@@ -19,7 +19,7 @@ The zookeeper plugin collects variables outputted from the 'mntr' command
# timeout = "5s"
## Optional TLS Config
- # enable_ssl = true
+ # enable_tls = true
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go
index ad990f28cf784..5259e25b7163e 100644
--- a/plugins/inputs/zookeeper/zookeeper.go
+++ b/plugins/inputs/zookeeper/zookeeper.go
@@ -13,10 +13,12 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
+var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`)
+
// Zookeeper is a zookeeper plugin
type Zookeeper struct {
Servers []string
@@ -136,9 +138,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr
fields := make(map[string]interface{})
for scanner.Scan() {
line := scanner.Text()
-
- re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`)
- parts := re.FindStringSubmatch(string(line))
+ parts := zookeeperFormatRE.FindStringSubmatch(string(line))
if len(parts) != 3 {
return fmt.Errorf("unexpected line in mntr response: %q", line)
diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go
index a5d2a44daad59..6e8a5457fad60 100644
--- a/plugins/outputs/all/all.go
+++ b/plugins/outputs/all/all.go
@@ -10,10 +10,14 @@ import (
_ "github.com/influxdata/telegraf/plugins/outputs/cratedb"
_ "github.com/influxdata/telegraf/plugins/outputs/datadog"
_ "github.com/influxdata/telegraf/plugins/outputs/discard"
+ _ "github.com/influxdata/telegraf/plugins/outputs/dynatrace"
_ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch"
+ _ "github.com/influxdata/telegraf/plugins/outputs/exec"
+ _ "github.com/influxdata/telegraf/plugins/outputs/execd"
_ "github.com/influxdata/telegraf/plugins/outputs/file"
_ "github.com/influxdata/telegraf/plugins/outputs/graphite"
_ "github.com/influxdata/telegraf/plugins/outputs/graylog"
+ _ "github.com/influxdata/telegraf/plugins/outputs/health"
_ "github.com/influxdata/telegraf/plugins/outputs/http"
_ "github.com/influxdata/telegraf/plugins/outputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2"
@@ -21,8 +25,10 @@ import (
_ "github.com/influxdata/telegraf/plugins/outputs/kafka"
_ "github.com/influxdata/telegraf/plugins/outputs/kinesis"
_ "github.com/influxdata/telegraf/plugins/outputs/librato"
+ _ "github.com/influxdata/telegraf/plugins/outputs/logzio"
_ "github.com/influxdata/telegraf/plugins/outputs/mqtt"
_ "github.com/influxdata/telegraf/plugins/outputs/nats"
+ _ "github.com/influxdata/telegraf/plugins/outputs/newrelic"
_ "github.com/influxdata/telegraf/plugins/outputs/nsq"
_ "github.com/influxdata/telegraf/plugins/outputs/opentsdb"
_ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client"
@@ -30,5 +36,8 @@ import (
_ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy"
_ "github.com/influxdata/telegraf/plugins/outputs/socket_writer"
_ "github.com/influxdata/telegraf/plugins/outputs/stackdriver"
+ _ "github.com/influxdata/telegraf/plugins/outputs/sumologic"
+ _ "github.com/influxdata/telegraf/plugins/outputs/syslog"
+ _ "github.com/influxdata/telegraf/plugins/outputs/warp10"
_ "github.com/influxdata/telegraf/plugins/outputs/wavefront"
)
diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md
index fe44ea4edccd8..04715f8e352ad 100644
--- a/plugins/outputs/amqp/README.md
+++ b/plugins/outputs/amqp/README.md
@@ -1,6 +1,6 @@
# AMQP Output Plugin
-This plugin writes to a AMQP 0-9-1 Exchange, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
+This plugin writes to a AMQP 0-9-1 Exchange, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
This plugin does not bind the exchange to a queue.
@@ -33,14 +33,14 @@ For an introduction to AMQP see:
# exchange_type = "topic"
## If true, exchange will be passively declared.
- # exchange_declare_passive = false
+ # exchange_passive = false
## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"
## Additional exchange arguments.
# exchange_arguments = { }
- # exchange_arguments = {"hash_propery" = "timestamp"}
+ # exchange_arguments = {"hash_property" = "timestamp"}
## Authentication credentials for the PLAIN auth_method.
# username = ""
@@ -92,6 +92,14 @@ For an introduction to AMQP see:
## Recommended to set to true.
# use_batch_format = false
+ ## Content encoding for message payloads, can be set to "gzip" to or
+ ## "identity" to apply no encoding.
+ ##
+ ## Please note that when use_batch_format = false each amqp message contains only
+ ## a single metric, it is recommended to use compression with batch format
+ ## for best results.
+ # content_encoding = "identity"
+
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go
index dd45f72dce83d..0c7e04da7e14d 100644
--- a/plugins/outputs/amqp/amqp.go
+++ b/plugins/outputs/amqp/amqp.go
@@ -9,10 +9,9 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
-
"github.com/streadway/amqp"
)
@@ -55,6 +54,7 @@ type AMQP struct {
Headers map[string]string `toml:"headers"`
Timeout internal.Duration `toml:"timeout"`
UseBatchFormat bool `toml:"use_batch_format"`
+ ContentEncoding string `toml:"content_encoding"`
tls.ClientConfig
serializer serializers.Serializer
@@ -62,6 +62,7 @@ type AMQP struct {
client Client
config *ClientConfig
sentMessages int
+ encoder internal.ContentEncoder
}
type Client interface {
@@ -91,14 +92,14 @@ var sampleConfig = `
# exchange_type = "topic"
## If true, exchange will be passively declared.
- # exchange_declare_passive = false
+ # exchange_passive = false
## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"
## Additional exchange arguments.
# exchange_arguments = { }
- # exchange_arguments = {"hash_propery" = "timestamp"}
+ # exchange_arguments = {"hash_property" = "timestamp"}
## Authentication credentials for the PLAIN auth_method.
# username = ""
@@ -150,6 +151,14 @@ var sampleConfig = `
## Recommended to set to true.
# use_batch_format = false
+ ## Content encoding for message payloads, can be set to "gzip" to or
+ ## "identity" to apply no encoding.
+ ##
+ ## Please note that when use_batch_format = false each amqp message contains only
+ ## a single metric, it is recommended to use compression with batch format
+ ## for best results.
+ # content_encoding = "identity"
+
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -178,11 +187,16 @@ func (q *AMQP) Connect() error {
q.config = config
}
- client, err := q.connect(q.config)
+ var err error
+ q.encoder, err = internal.NewContentEncoder(q.ContentEncoding)
+ if err != nil {
+ return err
+ }
+
+ q.client, err = q.connect(q.config)
if err != nil {
return err
}
- q.client = client
return nil
}
@@ -206,8 +220,8 @@ func (q *AMQP) routingKey(metric telegraf.Metric) string {
func (q *AMQP) Write(metrics []telegraf.Metric) error {
batches := make(map[string][]telegraf.Metric)
- if q.ExchangeType == "direct" || q.ExchangeType == "header" {
- // Since the routing_key is ignored for these exchange types send as a
+ if q.ExchangeType == "header" {
+ // Since the routing_key is ignored for this exchange type send as a
// single batch.
batches[""] = metrics
} else {
@@ -228,6 +242,11 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error {
return err
}
+ body, err = q.encoder.Encode(body)
+ if err != nil {
+ return err
+ }
+
err = q.publish(key, body)
if err != nil {
// If this is the first attempt to publish and the connection is
@@ -282,7 +301,8 @@ func (q *AMQP) serialize(metrics []telegraf.Metric) ([]byte, error) {
for _, metric := range metrics {
octets, err := q.serializer.Serialize(metric)
if err != nil {
- return nil, err
+ log.Printf("D! [outputs.amqp] Could not serialize metric: %v", err)
+ continue
}
_, err = buf.Write(octets)
if err != nil {
@@ -299,6 +319,7 @@ func (q *AMQP) makeClientConfig() (*ClientConfig, error) {
exchange: q.Exchange,
exchangeType: q.ExchangeType,
exchangePassive: q.ExchangePassive,
+ encoding: q.ContentEncoding,
timeout: q.Timeout.Duration,
}
diff --git a/plugins/outputs/amqp/client.go b/plugins/outputs/amqp/client.go
index 0ee45d9500261..8c230b706b09a 100644
--- a/plugins/outputs/amqp/client.go
+++ b/plugins/outputs/amqp/client.go
@@ -19,6 +19,7 @@ type ClientConfig struct {
exchangePassive bool
exchangeDurable bool
exchangeArguments amqp.Table
+ encoding string
headers amqp.Table
deliveryMode uint8
tlsConfig *tls.Config
@@ -77,6 +78,10 @@ func Connect(config *ClientConfig) (*client, error) {
}
func (c *client) DeclareExchange() error {
+ if c.config.exchange == "" {
+ return nil
+ }
+
var err error
if c.config.exchangePassive {
err = c.channel.ExchangeDeclarePassive(
@@ -114,10 +119,11 @@ func (c *client) Publish(key string, body []byte) error {
false, // mandatory
false, // immediate
amqp.Publishing{
- Headers: c.config.headers,
- ContentType: "text/plain",
- Body: body,
- DeliveryMode: c.config.deliveryMode,
+ Headers: c.config.headers,
+ ContentType: "text/plain",
+ ContentEncoding: c.config.encoding,
+ Body: body,
+ DeliveryMode: c.config.deliveryMode,
})
}
diff --git a/plugins/outputs/application_insights/README.md b/plugins/outputs/application_insights/README.md
index c64e844885565..34017a89f0bab 100644
--- a/plugins/outputs/application_insights/README.md
+++ b/plugins/outputs/application_insights/README.md
@@ -7,6 +7,9 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azur
[[outputs.application_insights]]
## Instrumentation key of the Application Insights resource.
instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
+
+ ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
+ # endpoint_url = "https://dc.services.visualstudio.com/v2/track"
## Timeout for closing (default: 5s).
# timeout = "5s"
diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go
index 3da4202187126..3ab16af6fc313 100644
--- a/plugins/outputs/application_insights/application_insights.go
+++ b/plugins/outputs/application_insights/application_insights.go
@@ -24,6 +24,7 @@ type DiagnosticsMessageSubscriber interface {
type ApplicationInsights struct {
InstrumentationKey string
+ EndpointURL string
Timeout internal.Duration
EnableDiagnosticLogging bool
ContextTagSources map[string]string
@@ -43,6 +44,9 @@ var (
sampleConfig = `
## Instrumentation key of the Application Insights resource.
instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
+
+ ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
+ # endpoint_url = "https://dc.services.visualstudio.com/v2/track"
## Timeout for closing (default: 5s).
# timeout = "5s"
@@ -76,7 +80,7 @@ func (a *ApplicationInsights) Connect() error {
}
if a.transmitter == nil {
- a.transmitter = NewTransmitter(a.InstrumentationKey)
+ a.transmitter = NewTransmitter(a.InstrumentationKey, a.EndpointURL)
}
if a.EnableDiagnosticLogging && a.diagMsgSubscriber != nil {
diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go
index 561e6c9f93fea..5a017823c02db 100644
--- a/plugins/outputs/application_insights/application_insights_test.go
+++ b/plugins/outputs/application_insights/application_insights_test.go
@@ -184,7 +184,7 @@ func TestSimpleMetricCreated(t *testing.T) {
{"neither value nor count", map[string]interface{}{"v1": "alpha", "v2": 45.8}, "", []string{"v2"}},
{"value is of wrong type", map[string]interface{}{"value": "alpha", "count": 15}, "", []string{"count"}},
{"count is of wrong type", map[string]interface{}{"value": 23.77, "count": 7.5}, "", []string{"count", "value"}},
- {"count is out of range", map[string]interface{}{"value": -98.45E4, "count": math.MaxUint64 - uint64(20)}, "", []string{"value", "count"}},
+ {"count is out of range", map[string]interface{}{"value": -98.45e4, "count": math.MaxUint64 - uint64(20)}, "", []string{"value", "count"}},
{"several additional fields", map[string]interface{}{"alpha": 10, "bravo": "bravo", "charlie": 30, "delta": 40.7}, "", []string{"alpha", "charlie", "delta"}},
}
@@ -288,7 +288,7 @@ func TestTagsAppliedToTelemetry(t *testing.T) {
transmitter.AssertNumberOfCalls(t, "Track", len(tt.metricValueFields))
transmitter.AssertCalled(t, "Track", mock.AnythingOfType("*appinsights.MetricTelemetry"))
- // Will verify that all original tags are present in telemetry.Properies map
+ // Will verify that all original tags are present in telemetry.Properties map
verifyAdditionalTelemetry(assert, m, transmitter, tt.metricValueFields, metricName)
}
diff --git a/plugins/outputs/application_insights/transmitter.go b/plugins/outputs/application_insights/transmitter.go
index 44bc1b80626b5..024ea32809fb0 100644
--- a/plugins/outputs/application_insights/transmitter.go
+++ b/plugins/outputs/application_insights/transmitter.go
@@ -1,13 +1,21 @@
package application_insights
-import "github.com/Microsoft/ApplicationInsights-Go/appinsights"
+import (
+ "github.com/Microsoft/ApplicationInsights-Go/appinsights"
+)
type Transmitter struct {
client appinsights.TelemetryClient
}
-func NewTransmitter(ikey string) *Transmitter {
- return &Transmitter{client: appinsights.NewTelemetryClient(ikey)}
+func NewTransmitter(ikey string, endpointURL string) *Transmitter {
+ if len(endpointURL) == 0 {
+ return &Transmitter{client: appinsights.NewTelemetryClient(ikey)}
+ } else {
+ telemetryConfig := appinsights.NewTelemetryConfiguration(ikey)
+ telemetryConfig.EndpointUrl = endpointURL
+ return &Transmitter{client: appinsights.NewTelemetryClientFromConfig(telemetryConfig)}
+ }
}
func (t *Transmitter) Track(telemetry appinsights.Telemetry) {
diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go
index 408976c536338..f2b1db1dd6868 100644
--- a/plugins/outputs/azure_monitor/azure_monitor.go
+++ b/plugins/outputs/azure_monitor/azure_monitor.go
@@ -43,6 +43,35 @@ type AzureMonitor struct {
MetricOutsideWindow selfstat.Stat
}
+// VirtualMachineMetadata contains information about a VM from the metadata service
+type virtualMachineMetadata struct {
+ Compute struct {
+ Location string `json:"location"`
+ Name string `json:"name"`
+ ResourceGroupName string `json:"resourceGroupName"`
+ SubscriptionID string `json:"subscriptionId"`
+ VMScaleSetName string `json:"vmScaleSetName"`
+ } `json:"compute"`
+}
+
+func (m *virtualMachineMetadata) ResourceID() string {
+ if m.Compute.VMScaleSetName != "" {
+ return fmt.Sprintf(
+ resourceIDScaleSetTemplate,
+ m.Compute.SubscriptionID,
+ m.Compute.ResourceGroupName,
+ m.Compute.VMScaleSetName,
+ )
+ } else {
+ return fmt.Sprintf(
+ resourceIDTemplate,
+ m.Compute.SubscriptionID,
+ m.Compute.ResourceGroupName,
+ m.Compute.Name,
+ )
+ }
+}
+
type dimension struct {
name string
value string
@@ -63,11 +92,12 @@ const (
defaultNamespacePrefix = "Telegraf/"
defaultAuthResource = "https://monitoring.azure.com/"
- vmInstanceMetadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-12-01"
- resourceIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s"
- urlTemplate = "https://%s.monitoring.azure.com%s/metrics"
- urlOverrideTemplate = "%s%s/metrics"
- maxRequestBodySize = 4000000
+ vmInstanceMetadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-12-01"
+ resourceIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s"
+ resourceIDScaleSetTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s"
+ urlTemplate = "https://%s.monitoring.azure.com%s/metrics"
+ urlOverrideTemplate = "%s%s/metrics"
+ maxRequestBodySize = 4000000
)
var sampleConfig = `
@@ -200,31 +230,17 @@ func vmInstanceMetadata(c *http.Client) (string, string, error) {
return "", "", err
}
if resp.StatusCode >= 300 || resp.StatusCode < 200 {
- return "", "", fmt.Errorf("unable to fetch instance metadata: [%v] %s", resp.StatusCode, body)
- }
-
- // VirtualMachineMetadata contains information about a VM from the metadata service
- type VirtualMachineMetadata struct {
- Compute struct {
- Location string `json:"location"`
- Name string `json:"name"`
- ResourceGroupName string `json:"resourceGroupName"`
- SubscriptionID string `json:"subscriptionId"`
- } `json:"compute"`
+ return "", "", fmt.Errorf("unable to fetch instance metadata: [%s] %d",
+ vmInstanceMetadataURL, resp.StatusCode)
}
- var metadata VirtualMachineMetadata
+ var metadata virtualMachineMetadata
if err := json.Unmarshal(body, &metadata); err != nil {
return "", "", err
}
region := metadata.Compute.Location
- resourceID := fmt.Sprintf(
- resourceIDTemplate,
- metadata.Compute.SubscriptionID,
- metadata.Compute.ResourceGroupName,
- metadata.Compute.Name,
- )
+ resourceID := metadata.ResourceID()
return region, resourceID, nil
}
@@ -376,7 +392,7 @@ func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
var dimensionValues []string
for _, tag := range m.TagList() {
// Azure custom metrics service supports up to 10 dimensions
- if len(dimensionNames) > 10 {
+ if len(dimensionNames) >= 10 {
continue
}
diff --git a/plugins/outputs/cloud_pubsub/README.md b/plugins/outputs/cloud_pubsub/README.md
index 3a4088b61b92e..d3d5e2fe30063 100644
--- a/plugins/outputs/cloud_pubsub/README.md
+++ b/plugins/outputs/cloud_pubsub/README.md
@@ -51,12 +51,12 @@ generate it using `telegraf --usage cloud_pubsub`.
## Optional. Specifies a timeout for requests to the PubSub API.
# publish_timeout = "30s"
-
+
## Optional. If true, published PubSub message data will be base64-encoded.
# base64_data = false
-
+
## Optional. PubSub attributes to add to metrics.
- # [[inputs.pubsub.attributes]]
+ # [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value"
```
diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go
index c8fbf242da21c..826a75e1c1c68 100644
--- a/plugins/outputs/cloud_pubsub/pubsub.go
+++ b/plugins/outputs/cloud_pubsub/pubsub.go
@@ -2,11 +2,12 @@ package cloud_pubsub
import (
"context"
+ "encoding/base64"
"fmt"
+ "log"
"sync"
"cloud.google.com/go/pubsub"
- "encoding/base64"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/outputs"
@@ -61,7 +62,7 @@ const sampleConfig = `
# base64_data = false
## Optional. PubSub attributes to add to metrics.
- # [[inputs.pubsub.attributes]]
+ # [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value"
`
@@ -229,7 +230,8 @@ func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, erro
for i, m := range metrics {
b, err := ps.serializer.Serialize(m)
if err != nil {
- return nil, err
+ log.Printf("D! [outputs.cloud_pubsub] Could not serialize metric: %v", err)
+ continue
}
if ps.Base64Data {
diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go
index 76eb518d79383..6911ef139cb1e 100644
--- a/plugins/outputs/cloud_pubsub/pubsub_test.go
+++ b/plugins/outputs/cloud_pubsub/pubsub_test.go
@@ -3,8 +3,10 @@ package cloud_pubsub
import (
"testing"
- "cloud.google.com/go/pubsub"
"encoding/base64"
+
+ "cloud.google.com/go/pubsub"
+
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
@@ -73,7 +75,7 @@ func TestPubSub_WriteMultiple(t *testing.T) {
for _, testM := range testMetrics {
verifyRawMetricPublished(t, testM.m, topic.published)
}
- assert.Equalf(t, 1, topic.bundleCount, "unexpected bundle count")
+ assert.Equalf(t, 1, topic.getBundleCount(), "unexpected bundle count")
}
func TestPubSub_WriteOverCountThreshold(t *testing.T) {
@@ -97,7 +99,7 @@ func TestPubSub_WriteOverCountThreshold(t *testing.T) {
for _, testM := range testMetrics {
verifyRawMetricPublished(t, testM.m, topic.published)
}
- assert.Equalf(t, 2, topic.bundleCount, "unexpected bundle count")
+ assert.Equalf(t, 2, topic.getBundleCount(), "unexpected bundle count")
}
func TestPubSub_WriteOverByteThreshold(t *testing.T) {
@@ -120,7 +122,7 @@ func TestPubSub_WriteOverByteThreshold(t *testing.T) {
for _, testM := range testMetrics {
verifyRawMetricPublished(t, testM.m, topic.published)
}
- assert.Equalf(t, 2, topic.bundleCount, "unexpected bundle count")
+ assert.Equalf(t, 2, topic.getBundleCount(), "unexpected bundle count")
}
func TestPubSub_WriteBase64Single(t *testing.T) {
diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go
index d78d4fbd42b80..e23a765366704 100644
--- a/plugins/outputs/cloud_pubsub/topic_stubbed.go
+++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go
@@ -9,8 +9,9 @@ import (
"testing"
"time"
- "cloud.google.com/go/pubsub"
"encoding/base64"
+
+ "cloud.google.com/go/pubsub"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/parsers"
@@ -123,8 +124,7 @@ func (t *stubTopic) Publish(ctx context.Context, msg *pubsub.Message) publishRes
}
bundled := &bundledMsg{msg, r}
- err := t.bundler.Add(bundled, len(msg.Data))
- if err != nil {
+ if err := t.bundler.Add(bundled, len(msg.Data)); err != nil {
t.Fatalf("unexpected error while adding to bundle: %v", err)
}
return r
@@ -210,3 +210,9 @@ func (r *stubResult) Get(ctx context.Context) (string, error) {
return fmt.Sprintf("id-%s", r.metricIds[0]), nil
}
}
+
+func (t *stubTopic) getBundleCount() int {
+ t.bLock.Lock()
+ defer t.bLock.Unlock()
+ return t.bundleCount
+}
diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md
index 31619263f26f9..418fe86ffa489 100644
--- a/plugins/outputs/cloudwatch/README.md
+++ b/plugins/outputs/cloudwatch/README.md
@@ -45,4 +45,7 @@ also save AWS API cost. If enable this flag, this plugin would parse the require
[CloudWatch statistic fields](https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/#StatisticSet)
(count, min, max, and sum) and send them to CloudWatch. You could use `basicstats`
aggregator to calculate those fields. If not all statistic fields are available,
-all fields would still be sent as raw metrics.
\ No newline at end of file
+all fields would still be sent as raw metrics.
+
+### high_resolution_metrics
+Enable high resolution metrics (1 second precision) instead of standard ones (60 seconds precision)
\ No newline at end of file
diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go
index aaefa89ec2383..5e59ba2aaec1d 100644
--- a/plugins/outputs/cloudwatch/cloudwatch.go
+++ b/plugins/outputs/cloudwatch/cloudwatch.go
@@ -11,7 +11,7 @@ import (
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/influxdata/telegraf"
- internalaws "github.com/influxdata/telegraf/internal/config/aws"
+ internalaws "github.com/influxdata/telegraf/config/aws"
"github.com/influxdata/telegraf/plugins/outputs"
)
@@ -25,8 +25,9 @@ type CloudWatch struct {
Token string `toml:"token"`
EndpointURL string `toml:"endpoint_url"`
- Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace
- svc *cloudwatch.CloudWatch
+ Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace
+ HighResolutionMetrics bool `toml:"high_resolution_metrics"`
+ svc *cloudwatch.CloudWatch
WriteStatistics bool `toml:"write_statistics"`
}
@@ -47,11 +48,12 @@ type cloudwatchField interface {
}
type statisticField struct {
- metricName string
- fieldName string
- tags map[string]string
- values map[statisticType]float64
- timestamp time.Time
+ metricName string
+ fieldName string
+ tags map[string]string
+ values map[statisticType]float64
+ timestamp time.Time
+ storageResolution int64
}
func (f *statisticField) addValue(sType statisticType, value float64) {
@@ -81,6 +83,7 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum {
Sum: aws.Float64(sum),
SampleCount: aws.Float64(count),
},
+ StorageResolution: aws.Int64(f.storageResolution),
}
datums = append(datums, datum)
@@ -126,11 +129,12 @@ func (f *statisticField) hasAllFields() bool {
}
type valueField struct {
- metricName string
- fieldName string
- tags map[string]string
- value float64
- timestamp time.Time
+ metricName string
+ fieldName string
+ tags map[string]string
+ value float64
+ timestamp time.Time
+ storageResolution int64
}
func (f *valueField) addValue(sType statisticType, value float64) {
@@ -143,10 +147,11 @@ func (f *valueField) buildDatum() []*cloudwatch.MetricDatum {
return []*cloudwatch.MetricDatum{
{
- MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")),
- Value: aws.Float64(f.value),
- Dimensions: BuildDimensions(f.tags),
- Timestamp: aws.Time(f.timestamp),
+ MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")),
+ Value: aws.Float64(f.value),
+ Dimensions: BuildDimensions(f.tags),
+ Timestamp: aws.Time(f.timestamp),
+ StorageResolution: aws.Int64(f.storageResolution),
},
}
}
@@ -186,6 +191,9 @@ var sampleConfig = `
## You could use basicstats aggregator to calculate those fields. If not all statistic
## fields are available, all fields would still be sent as raw metrics.
# write_statistics = false
+
+ ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
+ # high_resolution_metrics = false
`
func (c *CloudWatch) SampleConfig() string {
@@ -220,7 +228,7 @@ func (c *CloudWatch) Write(metrics []telegraf.Metric) error {
var datums []*cloudwatch.MetricDatum
for _, m := range metrics {
- d := BuildMetricDatum(c.WriteStatistics, m)
+ d := BuildMetricDatum(c.WriteStatistics, c.HighResolutionMetrics, m)
datums = append(datums, d...)
}
@@ -278,10 +286,14 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch
// Make a MetricDatum from telegraf.Metric. It would check if all required fields of
// cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values.
// Otherwise, fields would still been built independently.
-func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch.MetricDatum {
+func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []*cloudwatch.MetricDatum {
fields := make(map[string]cloudwatchField)
tags := point.Tags()
+ storageResolution := int64(60)
+ if highResolutionMetrics {
+ storageResolution = 1
+ }
for k, v := range point.Fields() {
@@ -297,11 +309,12 @@ func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch.
// If statistic metric is not enabled or non-statistic type, just take current field as a value field.
if !buildStatistic || sType == statisticTypeNone {
fields[k] = &valueField{
- metricName: point.Name(),
- fieldName: k,
- tags: tags,
- timestamp: point.Time(),
- value: val,
+ metricName: point.Name(),
+ fieldName: k,
+ tags: tags,
+ timestamp: point.Time(),
+ value: val,
+ storageResolution: storageResolution,
}
continue
}
@@ -317,6 +330,7 @@ func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch.
values: map[statisticType]float64{
sType: val,
},
+ storageResolution: storageResolution,
}
} else {
// Add new statistic value to this field
diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go
index acadca8424f8b..b2466e4d046d4 100644
--- a/plugins/outputs/cloudwatch/cloudwatch_test.go
+++ b/plugins/outputs/cloudwatch/cloudwatch_test.go
@@ -75,11 +75,11 @@ func TestBuildMetricDatums(t *testing.T) {
testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108
}
for _, point := range validMetrics {
- datums := BuildMetricDatum(false, point)
+ datums := BuildMetricDatum(false, false, point)
assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point))
}
for _, point := range invalidMetrics {
- datums := BuildMetricDatum(false, point)
+ datums := BuildMetricDatum(false, false, point)
assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point))
}
@@ -89,7 +89,7 @@ func TestBuildMetricDatums(t *testing.T) {
map[string]interface{}{"value_max": float64(10), "value_min": float64(0), "value_sum": float64(100), "value_count": float64(20)},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
)
- datums := BuildMetricDatum(true, statisticMetric)
+ datums := BuildMetricDatum(true, false, statisticMetric)
assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric))
multiFieldsMetric, _ := metric.New(
@@ -98,7 +98,7 @@ func TestBuildMetricDatums(t *testing.T) {
map[string]interface{}{"valueA": float64(10), "valueB": float64(0), "valueC": float64(100), "valueD": float64(20)},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
)
- datums = BuildMetricDatum(true, multiFieldsMetric)
+ datums = BuildMetricDatum(true, false, multiFieldsMetric)
assert.Equal(4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric))
multiStatisticMetric, _ := metric.New(
@@ -112,10 +112,27 @@ func TestBuildMetricDatums(t *testing.T) {
},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
)
- datums = BuildMetricDatum(true, multiStatisticMetric)
+ datums = BuildMetricDatum(true, false, multiStatisticMetric)
assert.Equal(7, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", multiStatisticMetric))
}
+func TestMetricDatumResolution(t *testing.T) {
+ const expectedStandardResolutionValue = int64(60)
+ const expectedHighResolutionValue = int64(1)
+
+ assert := assert.New(t)
+
+ metric := testutil.TestMetric(1)
+
+ standardResolutionDatum := BuildMetricDatum(false, false, metric)
+ actualStandardResolutionValue := *standardResolutionDatum[0].StorageResolution
+ assert.Equal(expectedStandardResolutionValue, actualStandardResolutionValue)
+
+ highResolutionDatum := BuildMetricDatum(false, true, metric)
+ actualHighResolutionValue := *highResolutionDatum[0].StorageResolution
+ assert.Equal(expectedHighResolutionValue, actualHighResolutionValue)
+}
+
func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) {
input := testutil.MustMetric(
"cpu",
@@ -129,7 +146,7 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) {
time.Unix(0, 0),
)
- datums := BuildMetricDatum(true, input)
+ datums := BuildMetricDatum(true, false, input)
require.Len(t, datums[0].Dimensions, 1)
}
diff --git a/plugins/outputs/datadog/README.md b/plugins/outputs/datadog/README.md
index 0563d64449743..ad1c7a02592e1 100644
--- a/plugins/outputs/datadog/README.md
+++ b/plugins/outputs/datadog/README.md
@@ -1,9 +1,30 @@
# Datadog Output Plugin
-This plugin writes to the [Datadog Metrics API](http://docs.datadoghq.com/api/#metrics)
-and requires an `apikey` which can be obtained [here](https://app.datadoghq.com/account/settings#api)
-for the account.
+This plugin writes to the [Datadog Metrics API][metrics] and requires an
+`apikey` which can be obtained [here][apikey] for the account.
-If the point value being sent cannot be converted to a float64, the metric is skipped.
-Metrics are grouped by converting any `_` characters to `.` in the Point Name.
\ No newline at end of file
+### Configuration
+
+```toml
+[[outputs.datadog]]
+ ## Datadog API key
+ apikey = "my-secret-key"
+
+ ## Connection timeout.
+ # timeout = "5s"
+
+ ## Write URL override; useful for debugging.
+ # url = "https://app.datadoghq.com/api/v1/series"
+```
+
+### Metrics
+
+Datadog metric names are formed by joining the Telegraf metric name and the field
+key with a `.` character.
+
+Field values are converted to floating point numbers. Strings and floats that
+cannot be sent over JSON, namely NaN and Inf, are ignored.
+
+[metrics]: https://docs.datadoghq.com/api/v1/metrics/#submit-metrics
+[apikey]: https://app.datadoghq.com/account/settings#api
diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go
index 62e73f11548a6..2d1a937883655 100644
--- a/plugins/outputs/datadog/datadog.go
+++ b/plugins/outputs/datadog/datadog.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"log"
+ "math"
"net/http"
"net/url"
"strings"
@@ -24,13 +25,13 @@ type Datadog struct {
var sampleConfig = `
## Datadog API key
- apikey = "my-secret-key" # required.
-
- # The base endpoint URL can optionally be specified but it defaults to:
- #url = "https://app.datadoghq.com/api/v1/series"
+ apikey = "my-secret-key"
## Connection timeout.
# timeout = "5s"
+
+ ## Write URL override; useful for debugging.
+ # url = "https://app.datadoghq.com/api/v1/series"
`
type TimeSeries struct {
@@ -63,9 +64,6 @@ func (d *Datadog) Connect() error {
}
func (d *Datadog) Write(metrics []telegraf.Metric) error {
- if len(metrics) == 0 {
- return nil
- }
ts := TimeSeries{}
tempSeries := []*Metric{}
metricCounter := 0
@@ -75,6 +73,10 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error {
metricTags := buildTags(m.TagList())
host, _ := m.GetTag("host")
+ if len(dogMs) == 0 {
+ continue
+ }
+
for fieldName, dogM := range dogMs {
// name of the datadog measurement
var dname string
@@ -98,6 +100,10 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error {
}
}
+ if len(tempSeries) == 0 {
+ return nil
+ }
+
redactedApiKey := "****************"
ts.Series = make([]*Metric, metricCounter)
copy(ts.Series, tempSeries[0:])
@@ -166,9 +172,12 @@ func buildTags(tagList []*telegraf.Tag) []string {
}
func verifyValue(v interface{}) bool {
- switch v.(type) {
+ switch v := v.(type) {
case string:
return false
+ case float64:
+ // The payload will be encoded as JSON, which does not allow NaN or Inf.
+ return !math.IsNaN(v) && !math.IsInf(v, 0)
}
return true
}
diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go
index 7bbc91254efdd..be8541ee8a92d 100644
--- a/plugins/outputs/datadog/datadog_test.go
+++ b/plugins/outputs/datadog/datadog_test.go
@@ -3,15 +3,15 @@ package datadog
import (
"encoding/json"
"fmt"
+ "math"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"time"
- "github.com/influxdata/telegraf/testutil"
-
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -249,3 +249,45 @@ func TestVerifyValue(t *testing.T) {
}
}
}
+
+func TestNaNIsSkipped(t *testing.T) {
+ plugin := &Datadog{
+ Apikey: "testing",
+ URL: "", // No request will be sent because all fields are skipped
+ }
+
+ err := plugin.Connect()
+ require.NoError(t, err)
+
+ err = plugin.Write([]telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": math.NaN(),
+ },
+ time.Now()),
+ })
+ require.NoError(t, err)
+}
+
+func TestInfIsSkipped(t *testing.T) {
+ plugin := &Datadog{
+ Apikey: "testing",
+ URL: "", // No request will be sent because all fields are skipped
+ }
+
+ err := plugin.Connect()
+ require.NoError(t, err)
+
+ err = plugin.Write([]telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": math.Inf(0),
+ },
+ time.Now()),
+ })
+ require.NoError(t, err)
+}
diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md
new file mode 100644
index 0000000000000..522d8e4a85024
--- /dev/null
+++ b/plugins/outputs/dynatrace/README.md
@@ -0,0 +1,43 @@
+# Dynatrace Output Plugin
+
+This plugin is sending telegraf metrics to [Dynatrace](www.dynatrace.com). It has two operational modes.
+
+## Running alongside Dynatrace OneAgent
+
+if you run the Telegraf agent on a host or VM that is monitored by the Dynatrace OneAgent then you only need to enable the plugin but need no further configuration. The Dynatrace telegraf output plugin will send all metrics to the OneAgent which will use its secure and load balanced connection to send the metrics to your Dynatrace SaaS or Managed environment.
+
+## Running standalone
+
+If you run the Telegraf agent on a host or VM without a OneAgent you will need to configure the environment API endpoint to send the metrics to and an API token for security.
+
+The endpoint for the Dynatrace Metrics API is
+
+* Managed https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest
+* SaaS https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest
+
+You can learn more about how to use the Dynatrace API [here](https://www.dynatrace.com/support/help/dynatrace-api/)
+
+You will also need to configure an API token for secure access. Find out how to create a token [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/tokens/) or simply navigate to **Settings > Integration > Dynatrace API** in your Dynatrace environment and create a token with Dynatrace API and create a new token with
+'Ingest metrics data points' access scope enabled.
+
+## Configuration
+
+```toml
+[[outputs.dynatrace]]
+ ## Leave empty or use the local ingest endpoint of your OneAgent monitored host (e.g.: http://127.0.0.1:14499/metrics/ingest).
+ ## Set Dynatrace environment URL (e.g.: https://YOUR_DOMAIN/api/v2/metrics/ingest) if you do not use a OneAgent
+ url = ""
+ api_token = ""
+ ## Optional prefix for metric names (e.g.: "telegraf.")
+ prefix = "telegraf."
+ ## Flag for skipping the tls certificate check, just for testing purposes, should be false by default
+ insecure_skip_verify = false
+
+```
+
+## Requirements
+
+You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher
+
+## Limitations
+Telegraf measurements which can't be converted to a float64 are skipped.
diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go
new file mode 100644
index 0000000000000..cbab667464dac
--- /dev/null
+++ b/plugins/outputs/dynatrace/dynatrace.go
@@ -0,0 +1,273 @@
+package dynatrace
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/outputs"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ oneAgentMetricsUrl = "http://127.0.0.1:14499/metrics/ingest"
+)
+
+var (
+ reNameAllowedCharList = regexp.MustCompile("[^A-Za-z0-9.]+")
+ maxDimKeyLen = 100
+ maxMetricKeyLen = 250
+)
+
+// Dynatrace Configuration for the Dynatrace output plugin
+type Dynatrace struct {
+ URL string `toml:"url"`
+ APIToken string `toml:"api_token"`
+ Prefix string `toml:"prefix"`
+ Log telegraf.Logger `toml:"-"`
+ Timeout internal.Duration `toml:"timeout"`
+
+ tls.ClientConfig
+
+ client *http.Client
+}
+
+const sampleConfig = `
+ ## For usage with the Dynatrace OneAgent you can omit any configuration,
+ ## the only requirement is that the OneAgent is running on the same host.
+ ## Only setup environment url and token if you want to monitor a Host without the OneAgent present.
+ ##
+ ## Your Dynatrace environment URL.
+ ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default)
+ ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest"
+ ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest"
+ url = ""
+
+ ## Your Dynatrace API token.
+ ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API
+ ## The API token needs data ingest scope permission. When using OneAgent, no API token is required.
+ api_token = ""
+
+ ## Optional prefix for metric names (e.g.: "telegraf.")
+ prefix = "telegraf."
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## Optional flag for ignoring tls certificate check
+ # insecure_skip_verify = false
+
+
+ ## Connection timeout, defaults to "5s" if not set.
+ timeout = "5s"
+`
+
+// Connect Connects the Dynatrace output plugin to the Telegraf stream
+func (d *Dynatrace) Connect() error {
+ return nil
+}
+
+// Close Closes the Dynatrace output plugin
+func (d *Dynatrace) Close() error {
+ d.client = nil
+ return nil
+}
+
+// SampleConfig Returns a sample configuration for the Dynatrace output plugin
+func (d *Dynatrace) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description returns the description for the Dynatrace output plugin
+func (d *Dynatrace) Description() string {
+ return "Send telegraf metrics to a Dynatrace environment"
+}
+
+// Normalizes a metric keys or metric dimension identifiers
+// according to Dynatrace format.
+func (d *Dynatrace) normalize(s string, max int) (string, error) {
+ s = reNameAllowedCharList.ReplaceAllString(s, "_")
+
+ // Strip Digits and underscores if they are at the beginning of the string
+ normalizedString := strings.TrimLeft(s, "_0123456789")
+
+ for strings.HasPrefix(normalizedString, "_") {
+ normalizedString = normalizedString[1:]
+ }
+
+ if len(normalizedString) > max {
+ normalizedString = normalizedString[:max]
+ }
+
+ for strings.HasSuffix(normalizedString, "_") {
+ normalizedString = normalizedString[:len(normalizedString)-1]
+ }
+
+ if len(normalizedString) == 0 {
+ return "", fmt.Errorf("error normalizing the string: %s", s)
+ }
+ return normalizedString, nil
+}
+
+func (d *Dynatrace) escape(v string) string {
+ return strconv.Quote(v)
+}
+
+func (d *Dynatrace) Write(metrics []telegraf.Metric) error {
+ var buf bytes.Buffer
+ var tagb bytes.Buffer
+ if len(metrics) == 0 {
+ return nil
+ }
+
+ for _, metric := range metrics {
+ // first write the tags into a buffer
+ tagb.Reset()
+ if len(metric.Tags()) > 0 {
+ keys := make([]string, 0, len(metric.Tags()))
+ for k := range metric.Tags() {
+ keys = append(keys, k)
+ }
+ // sort tag keys to expect the same order in ech run
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ tagKey, err := d.normalize(k, maxDimKeyLen)
+ if err != nil {
+ continue
+ }
+ fmt.Fprintf(&tagb, ",%s=%s", strings.ToLower(tagKey), d.escape(metric.Tags()[k]))
+
+ }
+ }
+ if len(metric.Fields()) > 0 {
+ for k, v := range metric.Fields() {
+ var value string
+ switch v := v.(type) {
+ case string:
+ continue
+ case float64:
+ if !math.IsNaN(v) && !math.IsInf(v, 0) {
+ value = fmt.Sprintf("%f", v)
+ } else {
+ continue
+ }
+ case uint64:
+ value = strconv.FormatUint(v, 10)
+ case int64:
+ value = strconv.FormatInt(v, 10)
+ case bool:
+ if v {
+ value = "1"
+ } else {
+ value = "0"
+ }
+ default:
+ d.Log.Debugf("Dynatrace type not supported! %s", v)
+ continue
+ }
+
+ // metric name
+ metricKey, err := d.normalize(k, maxMetricKeyLen)
+ if err != nil {
+ continue
+ }
+
+ metricID, err := d.normalize(d.Prefix+metric.Name()+"."+metricKey, maxMetricKeyLen)
+ // write metric name combined with its field
+ if err != nil {
+ continue
+ }
+ fmt.Fprintf(&buf, "%s", metricID)
+ // add the tag string
+ fmt.Fprintf(&buf, "%s", tagb.String())
+
+ // write measured value
+ fmt.Fprintf(&buf, " %v\n", value)
+ }
+ }
+ }
+
+ return d.send(buf.Bytes())
+}
+
+func (d *Dynatrace) send(msg []byte) error {
+ var err error
+ req, err := http.NewRequest("POST", d.URL, bytes.NewBuffer(msg))
+ if err != nil {
+ d.Log.Errorf("Dynatrace error: %s", err.Error())
+ return fmt.Errorf("Dynatrace error while creating HTTP request:, %s", err.Error())
+ }
+ req.Header.Add("Content-Type", "text/plain; charset=UTF-8")
+
+ if len(d.APIToken) != 0 {
+ req.Header.Add("Authorization", "Api-Token "+d.APIToken)
+ }
+ // add user-agent header to identify metric source
+ req.Header.Add("User-Agent", "telegraf")
+
+ resp, err := d.client.Do(req)
+ if err != nil {
+ d.Log.Errorf("Dynatrace error: %s", err.Error())
+ fmt.Println(req)
+ return fmt.Errorf("Dynatrace error while sending HTTP request:, %s", err.Error())
+ }
+ defer resp.Body.Close()
+
+ // print metric line results as info log
+ if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusAccepted {
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ d.Log.Errorf("Dynatrace error reading response")
+ }
+ bodyString := string(bodyBytes)
+ d.Log.Debugf("Dynatrace returned: %s", bodyString)
+ } else {
+ return fmt.Errorf("Dynatrace request failed with response code:, %d", resp.StatusCode)
+ }
+
+ return nil
+}
+
+func (d *Dynatrace) Init() error {
+ if len(d.URL) == 0 {
+ d.Log.Infof("Dynatrace URL is empty, defaulting to OneAgent metrics interface")
+ d.URL = oneAgentMetricsUrl
+ }
+ if d.URL != oneAgentMetricsUrl && len(d.APIToken) == 0 {
+ d.Log.Errorf("Dynatrace api_token is a required field for Dynatrace output")
+ return fmt.Errorf("api_token is a required field for Dynatrace output")
+ }
+
+ tlsCfg, err := d.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ d.client = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSClientConfig: tlsCfg,
+ },
+ Timeout: d.Timeout.Duration,
+ }
+ return nil
+}
+
+func init() {
+ outputs.Add("dynatrace", func() telegraf.Output {
+ return &Dynatrace{
+ Timeout: internal.Duration{Duration: time.Second * 5},
+ }
+ })
+}
diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go
new file mode 100644
index 0000000000000..cf6549c72ff11
--- /dev/null
+++ b/plugins/outputs/dynatrace/dynatrace_test.go
@@ -0,0 +1,331 @@
+package dynatrace
+
+import (
+ "encoding/json"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+)
+
+func TestNilMetrics(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`)
+ }))
+ defer ts.Close()
+
+ d := &Dynatrace{
+ Timeout: internal.Duration{Duration: time.Second * 5},
+ }
+
+ d.URL = ts.URL
+ d.APIToken = "123"
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.NoError(t, err)
+
+ err = d.Connect()
+ require.NoError(t, err)
+
+ err = d.Write(nil)
+ require.NoError(t, err)
+}
+
+func TestEmptyMetricsSlice(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`)
+ }))
+ defer ts.Close()
+
+ d := &Dynatrace{}
+
+ d.URL = ts.URL
+ d.APIToken = "123"
+ d.Log = testutil.Logger{}
+
+ err := d.Init()
+ require.NoError(t, err)
+
+ err = d.Connect()
+ require.NoError(t, err)
+ empty := []telegraf.Metric{}
+ err = d.Write(empty)
+ require.NoError(t, err)
+}
+
+func TestMockURL(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`)
+ }))
+ defer ts.Close()
+
+ d := &Dynatrace{}
+
+ d.URL = ts.URL
+ d.APIToken = "123"
+ d.Log = testutil.Logger{}
+
+ err := d.Init()
+ require.NoError(t, err)
+ err = d.Connect()
+ require.NoError(t, err)
+ err = d.Write(testutil.MockMetrics())
+ require.NoError(t, err)
+}
+
+func TestMissingURL(t *testing.T) {
+ d := &Dynatrace{}
+
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.Equal(t, oneAgentMetricsUrl, d.URL)
+ err = d.Connect()
+ require.Equal(t, oneAgentMetricsUrl, d.URL)
+ require.NoError(t, err)
+}
+
+func TestMissingAPITokenMissingURL(t *testing.T) {
+ d := &Dynatrace{}
+
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.Equal(t, oneAgentMetricsUrl, d.URL)
+ err = d.Connect()
+ require.Equal(t, oneAgentMetricsUrl, d.URL)
+ require.NoError(t, err)
+}
+
+func TestMissingAPIToken(t *testing.T) {
+ d := &Dynatrace{}
+
+ d.URL = "test"
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.Error(t, err)
+}
+
+func TestSendMetric(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // check the encoded result
+ bodyBytes, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ require.NoError(t, err)
+ }
+ bodyString := string(bodyBytes)
+ expected := "mymeasurement.myfield,host=\"192.168.0.1\",nix=\"nix\" 3.140000\nmymeasurement.value,host=\"192.168.0.1\" 3.140000\n"
+ if bodyString != expected {
+ t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString)
+ }
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`)
+ }))
+ defer ts.Close()
+
+ d := &Dynatrace{}
+
+ d.URL = ts.URL
+ d.APIToken = "123"
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.NoError(t, err)
+ err = d.Connect()
+ require.NoError(t, err)
+
+ // Init metrics
+
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1", "nix": "nix"},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ m2, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ metrics := []telegraf.Metric{m1, m2}
+
+ err = d.Write(metrics)
+ require.NoError(t, err)
+}
+
+func TestSendSingleMetricWithUnorderedTags(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // check the encoded result
+ bodyBytes, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ require.NoError(t, err)
+ }
+ bodyString := string(bodyBytes)
+ expected := "mymeasurement.myfield,a=\"test\",b=\"test\",c=\"test\" 3.140000\n"
+ if bodyString != expected {
+ t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString)
+ }
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`)
+ }))
+ defer ts.Close()
+
+ d := &Dynatrace{}
+
+ d.URL = ts.URL
+ d.APIToken = "123"
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.NoError(t, err)
+ err = d.Connect()
+ require.NoError(t, err)
+
+ // Init metrics
+
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"a": "test", "c": "test", "b": "test"},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ metrics := []telegraf.Metric{m1}
+
+ err = d.Write(metrics)
+ require.NoError(t, err)
+}
+
+func TestSendMetricWithoutTags(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ // check the encoded result
+ bodyBytes, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ require.NoError(t, err)
+ }
+ bodyString := string(bodyBytes)
+ expected := "mymeasurement.myfield 3.140000\n"
+ if bodyString != expected {
+ t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString)
+ }
+ json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`)
+ }))
+ defer ts.Close()
+
+ d := &Dynatrace{}
+
+ d.URL = ts.URL
+ d.APIToken = "123"
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.NoError(t, err)
+ err = d.Connect()
+ require.NoError(t, err)
+
+ // Init metrics
+
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ metrics := []telegraf.Metric{m1}
+
+ err = d.Write(metrics)
+ require.NoError(t, err)
+}
+
+func TestSendMetricWithUpperCaseTagKeys(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ // check the encoded result
+ bodyBytes, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ require.NoError(t, err)
+ }
+ bodyString := string(bodyBytes)
+ expected := "mymeasurement.myfield,aaa=\"test\",b_b=\"test\",ccc=\"test\" 3.140000\n"
+ if bodyString != expected {
+ t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString)
+ }
+ json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`)
+ }))
+ defer ts.Close()
+
+ d := &Dynatrace{}
+
+ d.URL = ts.URL
+ d.APIToken = "123"
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.NoError(t, err)
+ err = d.Connect()
+ require.NoError(t, err)
+
+ // Init metrics
+
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"AAA": "test", "CcC": "test", "B B": "test"},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ metrics := []telegraf.Metric{m1}
+
+ err = d.Write(metrics)
+ require.NoError(t, err)
+}
+
+func TestSendBooleanMetricWithoutTags(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ // check the encoded result
+ bodyBytes, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ require.NoError(t, err)
+ }
+ bodyString := string(bodyBytes)
+ expected := "mymeasurement.myfield 1\n"
+ if bodyString != expected {
+ t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString)
+ }
+ json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`)
+ }))
+ defer ts.Close()
+
+ d := &Dynatrace{}
+
+ d.URL = ts.URL
+ d.APIToken = "123"
+ d.Log = testutil.Logger{}
+ err := d.Init()
+ require.NoError(t, err)
+ err = d.Connect()
+ require.NoError(t, err)
+
+ // Init metrics
+
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{},
+ map[string]interface{}{"myfield": bool(true)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ metrics := []telegraf.Metric{m1}
+
+ err = d.Write(metrics)
+ require.NoError(t, err)
+}
diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md
index 11f3c1385fd5c..2616ff1a64d08 100644
--- a/plugins/outputs/elasticsearch/README.md
+++ b/plugins/outputs/elasticsearch/README.md
@@ -1,10 +1,10 @@
-## Elasticsearch Output Plugin for Telegraf
+# Elasticsearch Output Plugin
-This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic (http://olivere.github.io/elastic/).
+This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic (
-Currently it only supports Elasticsearch 5.x series.
+It supports Elasticsearch releases from 5.x up to 7.x.
-## Elasticsearch indexes and templates
+### Elasticsearch indexes and templates
### Indexes per time-frame
@@ -22,7 +22,7 @@ For more information on how this works, see https://www.elastic.co/guide/en/elas
This plugin can create a working template for use with telegraf metrics. It uses Elasticsearch dynamic templates feature to set proper types for the tags and metrics fields.
If the template specified already exists, it will not overwrite unless you configure this plugin to do so. Thus you can customize this template after its creation if necessary.
-Example of an index template created by telegraf:
+Example of an index template created by telegraf on Elasticsearch 5.x:
```json
{
@@ -35,6 +35,8 @@ Example of an index template created by telegraf:
"limit": "5000"
}
},
+ "auto_expand_replicas" : "0-1",
+ "codec" : "best_compression",
"refresh_interval": "10s"
}
},
@@ -142,10 +144,9 @@ This plugin will format the events in the following way:
}
```
-### Configuration:
+### Configuration
```toml
-# Configuration for Elasticsearch to send metrics to.
[[outputs.elasticsearch]]
## The full HTTP endpoint URL for your Elasticsearch instance
## Multiple urls can be specified as part of the same cluster,
@@ -159,7 +160,7 @@ This plugin will format the events in the following way:
## Set the interval to check if the Elasticsearch nodes are available
## Setting to "0s" will disable the health check (not recommended in production)
health_check_interval = "10s"
- ## HTTP basic authentication details (eg. when using Shield)
+ ## HTTP basic authentication details.
# username = "telegraf"
# password = "mypassword"
@@ -195,9 +196,21 @@ This plugin will format the events in the following way:
template_name = "telegraf"
## Set to true if you want telegraf to overwrite an existing template
overwrite_template = false
+ ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
+ ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
+ force_document_id = false
```
-### Required parameters:
+#### Permissions
+
+If you are using authentication within your Elasticsearch cluster, you need
+to create a account and create a role with at least the manage role in the
+Cluster Privileges category. Overwise, your account will not be able to
+connect to your Elasticsearch cluster and send logs to your cluster. After
+that, you need to add "create_indice" and "write" permission to your specific
+index pattern.
+
+#### Required parameters:
* `urls`: A list containing the full HTTP URL of one or more nodes from your Elasticsearch instance.
* `index_name`: The target index for metrics. You can use the date specifiers below to create indexes per time frame.
@@ -209,9 +222,10 @@ This plugin will format the events in the following way:
%H - hour (00..23)
%V - week of the year (ISO week) (01..53)
```
+
Additionally, you can specify dynamic index names by using tags with the notation ```{{tag_name}}```. This will store the metrics with different tag values in different indices. If the tag does not exist in a particular metric, the `default_tag_value` will be used instead.
-### Optional parameters:
+#### Optional parameters:
* `timeout`: Elasticsearch client timeout, defaults to "5s" if not set.
* `enable_sniffer`: Set to true to ask Elasticsearch a list of all cluster nodes, thus it is not necessary to list all nodes in the urls config option.
@@ -221,8 +235,9 @@ Additionally, you can specify dynamic index names by using tags with the notatio
* `manage_template`: Set to true if you want telegraf to manage its index template. If enabled it will create a recommended index template for telegraf indexes.
* `template_name`: The template name used for telegraf indexes.
* `overwrite_template`: Set to true if you want telegraf to overwrite an existing template.
+* `force_document_id`: Set to true will compute a unique hash from as sha256(concat(timestamp,measurement,series-hash)),enables resend or update data withoud ES duplicated documents.
-## Known issues
+### Known issues
Integer values collected that are bigger than 2^63 and smaller than 1e21 (or in this exact same window of their negative counterparts) are encoded by golang JSON encoder in decimal format and that is not fully supported by Elasticsearch dynamic field mapping. This causes the metrics with such values to be dropped in case a field mapping has not been created yet on the telegraf index. If that's the case you will see an exception on Elasticsearch side like this:
diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go
index 56169135ac3be..b17a945b3523c 100644
--- a/plugins/outputs/elasticsearch/elasticsearch.go
+++ b/plugins/outputs/elasticsearch/elasticsearch.go
@@ -1,17 +1,21 @@
package elasticsearch
import (
+ "bytes"
"context"
"fmt"
"log"
"net/http"
"strconv"
"strings"
+ "text/template"
"time"
+ "crypto/sha256"
+
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"gopkg.in/olivere/elastic.v5"
)
@@ -29,6 +33,8 @@ type Elasticsearch struct {
ManageTemplate bool
TemplateName string
OverwriteTemplate bool
+ ForceDocumentId bool
+ MajorReleaseNumber int
tls.ClientConfig
Client *elastic.Client
@@ -47,7 +53,7 @@ var sampleConfig = `
## Set the interval to check if the Elasticsearch nodes are available
## Setting to "0s" will disable the health check (not recommended in production)
health_check_interval = "10s"
- ## HTTP basic authentication details (eg. when using Shield)
+ ## HTTP basic authentication details
# username = "telegraf"
# password = "mypassword"
@@ -83,8 +89,86 @@ var sampleConfig = `
template_name = "telegraf"
## Set to true if you want telegraf to overwrite an existing template
overwrite_template = false
+ ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
+ ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
+ force_document_id = false
`
+const telegrafTemplate = `
+{
+ {{ if (lt .Version 6) }}
+ "template": "{{.TemplatePattern}}",
+ {{ else }}
+ "index_patterns" : [ "{{.TemplatePattern}}" ],
+ {{ end }}
+ "settings": {
+ "index": {
+ "refresh_interval": "10s",
+ "mapping.total_fields.limit": 5000,
+ "auto_expand_replicas" : "0-1",
+ "codec" : "best_compression"
+ }
+ },
+ "mappings" : {
+ {{ if (lt .Version 7) }}
+ "metrics" : {
+ {{ if (lt .Version 6) }}
+ "_all": { "enabled": false },
+ {{ end }}
+ {{ end }}
+ "properties" : {
+ "@timestamp" : { "type" : "date" },
+ "measurement_name" : { "type" : "keyword" }
+ },
+ "dynamic_templates": [
+ {
+ "tags": {
+ "match_mapping_type": "string",
+ "path_match": "tag.*",
+ "mapping": {
+ "ignore_above": 512,
+ "type": "keyword"
+ }
+ }
+ },
+ {
+ "metrics_long": {
+ "match_mapping_type": "long",
+ "mapping": {
+ "type": "float",
+ "index": false
+ }
+ }
+ },
+ {
+ "metrics_double": {
+ "match_mapping_type": "double",
+ "mapping": {
+ "type": "float",
+ "index": false
+ }
+ }
+ },
+ {
+ "text_fields": {
+ "match": "*",
+ "mapping": {
+ "norms": false
+ }
+ }
+ }
+ ]
+ {{ if (lt .Version 7) }}
+ }
+ {{ end }}
+ }
+}`
+
+type templatePart struct {
+ TemplatePattern string
+ Version int
+}
+
func (a *Elasticsearch) Connect() error {
if a.URLs == nil || a.IndexName == "" {
return fmt.Errorf("Elasticsearch urls or index_name is not defined")
@@ -142,14 +226,15 @@ func (a *Elasticsearch) Connect() error {
}
// quit if ES version is not supported
- i, err := strconv.Atoi(strings.Split(esVersion, ".")[0])
- if err != nil || i < 5 {
+ majorReleaseNumber, err := strconv.Atoi(strings.Split(esVersion, ".")[0])
+ if err != nil || majorReleaseNumber < 5 {
return fmt.Errorf("Elasticsearch version not supported: %s", esVersion)
}
log.Println("I! Elasticsearch version: " + esVersion)
a.Client = client
+ a.MajorReleaseNumber = majorReleaseNumber
if a.ManageTemplate {
err := a.manageTemplate(ctx)
@@ -163,6 +248,19 @@ func (a *Elasticsearch) Connect() error {
return nil
}
+// GetPointID generates a unique ID for a Metric Point
+func GetPointID(m telegraf.Metric) string {
+
+ var buffer bytes.Buffer
+ //Timestamp(ns),measurement name and Series Hash for compute the final SHA256 based hash ID
+
+ buffer.WriteString(strconv.FormatInt(m.Time().Local().UnixNano(), 10))
+ buffer.WriteString(m.Name())
+ buffer.WriteString(strconv.FormatUint(m.HashID(), 10))
+
+ return fmt.Sprintf("%x", sha256.Sum256(buffer.Bytes()))
+}
+
func (a *Elasticsearch) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 {
return nil
@@ -184,10 +282,18 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error {
m["tag"] = metric.Tags()
m[name] = metric.Fields()
- bulkRequest.Add(elastic.NewBulkIndexRequest().
- Index(indexName).
- Type("metrics").
- Doc(m))
+ br := elastic.NewBulkIndexRequest().Index(indexName).Doc(m)
+
+ if a.ForceDocumentId {
+ id := GetPointID(metric)
+ br.Id(id)
+ }
+
+ if a.MajorReleaseNumber <= 6 {
+ br.Type("metrics")
+ }
+
+ bulkRequest.Add(br)
}
@@ -237,65 +343,16 @@ func (a *Elasticsearch) manageTemplate(ctx context.Context) error {
}
if (a.OverwriteTemplate) || (!templateExists) || (templatePattern != "") {
- // Create or update the template
- tmpl := fmt.Sprintf(`
- {
- "template":"%s",
- "settings": {
- "index": {
- "refresh_interval": "10s",
- "mapping.total_fields.limit": 5000
- }
- },
- "mappings" : {
- "_default_" : {
- "_all": { "enabled": false },
- "properties" : {
- "@timestamp" : { "type" : "date" },
- "measurement_name" : { "type" : "keyword" }
- },
- "dynamic_templates": [
- {
- "tags": {
- "match_mapping_type": "string",
- "path_match": "tag.*",
- "mapping": {
- "ignore_above": 512,
- "type": "keyword"
- }
- }
- },
- {
- "metrics_long": {
- "match_mapping_type": "long",
- "mapping": {
- "type": "float",
- "index": false
- }
- }
- },
- {
- "metrics_double": {
- "match_mapping_type": "double",
- "mapping": {
- "type": "float",
- "index": false
- }
- }
- },
- {
- "text_fields": {
- "match": "*",
- "mapping": {
- "norms": false
- }
- }
- }
- ]
- }
- }
- }`, templatePattern+"*")
- _, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl).Do(ctx)
+ tp := templatePart{
+ TemplatePattern: templatePattern + "*",
+ Version: a.MajorReleaseNumber,
+ }
+
+ t := template.Must(template.New("template").Parse(telegrafTemplate))
+ var tmpl bytes.Buffer
+
+ t.Execute(&tmpl, tp)
+ _, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl.String()).Do(ctx)
if errCreateTemplate != nil {
return fmt.Errorf("Elasticsearch failed to create index template %s : %s", a.TemplateName, errCreateTemplate)
diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md
new file mode 100644
index 0000000000000..d82676a251e4e
--- /dev/null
+++ b/plugins/outputs/exec/README.md
@@ -0,0 +1,26 @@
+# Exec Output Plugin
+
+This plugin sends telegraf metrics to an external application over stdin.
+
+The command should be defined similar to docker's `exec` form:
+
+ ["executable", "param1", "param2"]
+
+On non-zero exit stderr will be logged at error level.
+
+### Configuration
+
+```toml
+[[outputs.exec]]
+ ## Command to ingest metrics via stdin.
+ command = ["tee", "-a", "/dev/null"]
+
+ ## Timeout for command to complete.
+ # timeout = "5s"
+
+ ## Data format to output.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+ # data_format = "influx"
+```
diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go
new file mode 100644
index 0000000000000..d3697627e5f92
--- /dev/null
+++ b/plugins/outputs/exec/exec.go
@@ -0,0 +1,151 @@
+package exec
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os/exec"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/outputs"
+ "github.com/influxdata/telegraf/plugins/serializers"
+)
+
+const maxStderrBytes = 512
+
+// Exec defines the exec output plugin.
+type Exec struct {
+ Command []string `toml:"command"`
+ Timeout internal.Duration `toml:"timeout"`
+
+ runner Runner
+ serializer serializers.Serializer
+}
+
+var sampleConfig = `
+ ## Command to ingest metrics via stdin.
+ command = ["tee", "-a", "/dev/null"]
+
+ ## Timeout for command to complete.
+ # timeout = "5s"
+
+ ## Data format to output.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+ # data_format = "influx"
+`
+
+// SetSerializer sets the serializer for the output.
+func (e *Exec) SetSerializer(serializer serializers.Serializer) {
+ e.serializer = serializer
+}
+
+// Connect satisfies the Output interface.
+func (e *Exec) Connect() error {
+ return nil
+}
+
+// Close satisfies the Output interface.
+func (e *Exec) Close() error {
+ return nil
+}
+
+// Description describes the plugin.
+func (e *Exec) Description() string {
+ return "Send metrics to command as input over stdin"
+}
+
+// SampleConfig returns a sample configuration.
+func (e *Exec) SampleConfig() string {
+ return sampleConfig
+}
+
+// Write writes the metrics to the configured command.
+func (e *Exec) Write(metrics []telegraf.Metric) error {
+ var buffer bytes.Buffer
+ serializedMetrics, err := e.serializer.SerializeBatch(metrics)
+ if err != nil {
+ return err
+ }
+ buffer.Write(serializedMetrics)
+
+ if buffer.Len() <= 0 {
+ return nil
+ }
+
+ return e.runner.Run(e.Timeout.Duration, e.Command, &buffer)
+}
+
+// Runner provides an interface for running exec.Cmd.
+type Runner interface {
+ Run(time.Duration, []string, io.Reader) error
+}
+
+// CommandRunner runs a command with the ability to kill the process before the timeout.
+type CommandRunner struct {
+ cmd *exec.Cmd
+}
+
+// Run runs the command.
+func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.Reader) error {
+ cmd := exec.Command(command[0], command[1:]...)
+ cmd.Stdin = buffer
+ var stderr bytes.Buffer
+ cmd.Stderr = &stderr
+
+ err := internal.RunTimeout(cmd, timeout)
+ s := stderr
+
+ if err != nil {
+ if err == internal.TimeoutErr {
+ return fmt.Errorf("%q timed out and was killed", command)
+ }
+
+ if s.Len() > 0 {
+ log.Printf("E! [outputs.exec] Command error: %q", truncate(s))
+ }
+
+ if status, ok := internal.ExitStatus(err); ok {
+ return fmt.Errorf("%q exited %d with %s", command, status, err.Error())
+ }
+
+ return fmt.Errorf("%q failed with %s", command, err.Error())
+ }
+
+ c.cmd = cmd
+
+ return nil
+}
+
+func truncate(buf bytes.Buffer) string {
+ // Limit the number of bytes.
+ didTruncate := false
+ if buf.Len() > maxStderrBytes {
+ buf.Truncate(maxStderrBytes)
+ didTruncate = true
+ }
+ if i := bytes.IndexByte(buf.Bytes(), '\n'); i > 0 {
+ // Only show truncation if the newline wasn't the last character.
+ if i < buf.Len()-1 {
+ didTruncate = true
+ }
+ buf.Truncate(i)
+ }
+ if didTruncate {
+ buf.WriteString("...")
+ }
+ return buf.String()
+}
+
+func init() {
+ outputs.Add("exec", func() telegraf.Output {
+ return &Exec{
+ runner: &CommandRunner{},
+ Timeout: internal.Duration{Duration: time.Second * 5},
+ }
+ })
+}
diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go
new file mode 100644
index 0000000000000..850ba7328a03b
--- /dev/null
+++ b/plugins/outputs/exec/exec_test.go
@@ -0,0 +1,105 @@
+package exec
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func TestExec(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test due to OS/executable dependencies")
+ }
+
+ tests := []struct {
+ name string
+ command []string
+ err bool
+ metrics []telegraf.Metric
+ }{
+ {
+ name: "test success",
+ command: []string{"tee"},
+ err: false,
+ metrics: testutil.MockMetrics(),
+ },
+ {
+ name: "test doesn't accept stdin",
+ command: []string{"sleep", "5s"},
+ err: true,
+ metrics: testutil.MockMetrics(),
+ },
+ {
+ name: "test command not found",
+ command: []string{"/no/exist", "-h"},
+ err: true,
+ metrics: testutil.MockMetrics(),
+ },
+ {
+ name: "test no metrics output",
+ command: []string{"tee"},
+ err: false,
+ metrics: []telegraf.Metric{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ e := &Exec{
+ Command: tt.command,
+ Timeout: internal.Duration{Duration: time.Second},
+ runner: &CommandRunner{},
+ }
+
+ s, _ := serializers.NewInfluxSerializer()
+ e.SetSerializer(s)
+
+ e.Connect()
+
+ require.Equal(t, tt.err, e.Write(tt.metrics) != nil)
+ })
+ }
+}
+
+func TestTruncate(t *testing.T) {
+ tests := []struct {
+ name string
+ buf *bytes.Buffer
+ len int
+ }{
+ {
+ name: "long out",
+ buf: bytes.NewBufferString(strings.Repeat("a", maxStderrBytes+100)),
+ len: maxStderrBytes + len("..."),
+ },
+ {
+ name: "multiline out",
+ buf: bytes.NewBufferString("hola\ngato\n"),
+ len: len("hola") + len("..."),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := truncate(*tt.buf)
+ require.Equal(t, tt.len, len(s))
+ })
+ }
+}
+
+func TestExecDocs(t *testing.T) {
+ e := &Exec{}
+ e.Description()
+ e.SampleConfig()
+ require.NoError(t, e.Close())
+
+ e = &Exec{runner: &CommandRunner{}}
+ require.NoError(t, e.Close())
+}
diff --git a/plugins/outputs/execd/README.md b/plugins/outputs/execd/README.md
new file mode 100644
index 0000000000000..8569c1033fcea
--- /dev/null
+++ b/plugins/outputs/execd/README.md
@@ -0,0 +1,29 @@
+# Execd Output Plugin
+
+The `execd` plugin runs an external program as a daemon.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration:
+
+```toml
+[[outputs.execd]]
+ ## One program to run as daemon.
+ ## NOTE: process and each argument should each be their own string
+ command = ["my-telegraf-output", "--some-flag", "value"]
+
+ ## Delay before the process is restarted after an unexpected termination
+ restart_delay = "10s"
+
+ ## Data format to export.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+ data_format = "influx"
+```
+
+### Example
+
+see [examples][]
+
+[examples]: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/execd/examples/
diff --git a/plugins/outputs/execd/examples/file/file.sh b/plugins/outputs/execd/examples/file/file.sh
new file mode 100644
index 0000000000000..4cdd0279b6d2a
--- /dev/null
+++ b/plugins/outputs/execd/examples/file/file.sh
@@ -0,0 +1,5 @@
+# Usage: sh file.sh output_filename.ext
+# reads from stdin and writes out to a file named on the command line.
+while read line; do
+ echo "$line" >> $1
+done < /dev/stdin
diff --git a/plugins/outputs/execd/examples/file/telegraf.conf b/plugins/outputs/execd/examples/file/telegraf.conf
new file mode 100644
index 0000000000000..0259e95f5cb85
--- /dev/null
+++ b/plugins/outputs/execd/examples/file/telegraf.conf
@@ -0,0 +1,9 @@
+[agent]
+ interval = "1s"
+
+[[inputs.execd]]
+ command = ["ruby", "plugins/inputs/execd/examples/count.rb"]
+
+[[outputs.execd]]
+ command = ["sh", "plugins/outputs/execd/examples/file/file.sh"]
+ data_format = "json"
diff --git a/plugins/outputs/execd/examples/redis/redis_influx.rb b/plugins/outputs/execd/examples/redis/redis_influx.rb
new file mode 100644
index 0000000000000..9bac10e67b802
--- /dev/null
+++ b/plugins/outputs/execd/examples/redis/redis_influx.rb
@@ -0,0 +1,19 @@
+#!/usr/bin/env ruby
+#
+# An example of funneling metrics to Redis pub/sub.
+#
+# to run this, you may need to:
+# gem install redis
+#
+require 'redis'
+
+r = Redis.new(host: "127.0.0.1", port: 6379, db: 1)
+
+loop do
+ # example input: "counter_ruby count=0 1591741648101185000"
+ line = STDIN.readline.chomp
+
+ key = line.split(" ")[0]
+ key = key.split(",")[0]
+ r.publish(key, line)
+end
diff --git a/plugins/outputs/execd/examples/redis/redis_json.rb b/plugins/outputs/execd/examples/redis/redis_json.rb
new file mode 100644
index 0000000000000..e0939634b6575
--- /dev/null
+++ b/plugins/outputs/execd/examples/redis/redis_json.rb
@@ -0,0 +1,21 @@
+#!/usr/bin/env ruby
+#
+# An example of funneling metrics to Redis pub/sub.
+#
+# to run this, you may need to:
+# gem install redis
+#
+require 'redis'
+require 'json'
+
+r = Redis.new(host: "127.0.0.1", port: 6379, db: 1)
+
+loop do
+ # example input: "{"fields":{"count":0},"name":"counter_ruby","tags":{"host":"localhost"},"timestamp":1586374982}"
+ line = STDIN.readline.chomp
+
+ l = JSON.parse(line)
+
+ key = l["name"]
+ r.publish(key, line)
+end
diff --git a/plugins/outputs/execd/examples/redis/telegraf.conf b/plugins/outputs/execd/examples/redis/telegraf.conf
new file mode 100644
index 0000000000000..765930c7a9dc7
--- /dev/null
+++ b/plugins/outputs/execd/examples/redis/telegraf.conf
@@ -0,0 +1,15 @@
+[agent]
+ flush_interval = "1s"
+ interval = "1s"
+
+[[inputs.execd]]
+ command = ["ruby", "plugins/inputs/execd/examples/count.rb"]
+ signal = "none"
+
+[[outputs.execd]]
+ command = ["ruby", "plugins/outputs/execd/examples/redis/redis_influx.rb"]
+ data_format = "influx"
+
+# [[outputs.file]]
+# files = ["stdout"]
+# data_format = "influx"
diff --git a/plugins/outputs/execd/execd.go b/plugins/outputs/execd/execd.go
new file mode 100644
index 0000000000000..acace77ada0df
--- /dev/null
+++ b/plugins/outputs/execd/execd.go
@@ -0,0 +1,129 @@
+package execd
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/internal/process"
+ "github.com/influxdata/telegraf/plugins/outputs"
+ "github.com/influxdata/telegraf/plugins/serializers"
+)
+
+const sampleConfig = `
+ ## Program to run as daemon
+ command = ["my-telegraf-output", "--some-flag", "value"]
+
+ ## Delay before the process is restarted after an unexpected termination
+ restart_delay = "10s"
+
+ ## Data format to export.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+ data_format = "influx"
+`
+
+type Execd struct {
+ Command []string `toml:"command"`
+ RestartDelay config.Duration `toml:"restart_delay"`
+ Log telegraf.Logger
+
+ process *process.Process
+ serializer serializers.Serializer
+}
+
+func (e *Execd) SampleConfig() string {
+ return sampleConfig
+}
+
+func (e *Execd) Description() string {
+ return "Run executable as long-running output plugin"
+}
+
+func (e *Execd) SetSerializer(s serializers.Serializer) {
+ e.serializer = s
+}
+
+func (e *Execd) Init() error {
+ if len(e.Command) == 0 {
+ return fmt.Errorf("no command specified")
+ }
+
+ var err error
+
+ e.process, err = process.New(e.Command)
+ if err != nil {
+ return fmt.Errorf("error creating process %s: %w", e.Command, err)
+ }
+ e.process.Log = e.Log
+ e.process.RestartDelay = time.Duration(e.RestartDelay)
+ e.process.ReadStdoutFn = e.cmdReadOut
+ e.process.ReadStderrFn = e.cmdReadErr
+
+ return nil
+}
+
+func (e *Execd) Connect() error {
+ if err := e.process.Start(); err != nil {
+ // if there was only one argument, and it contained spaces, warn the user
+ // that they may have configured it wrong.
+ if len(e.Command) == 1 && strings.Contains(e.Command[0], " ") {
+ e.Log.Warn("The outputs.execd Command contained spaces but no arguments. " +
+ "This setting expects the program and arguments as an array of strings, " +
+ "not as a space-delimited string. See the plugin readme for an example.")
+ }
+ return fmt.Errorf("failed to start process %s: %w", e.Command, err)
+ }
+
+ return nil
+}
+
+func (e *Execd) Close() error {
+ e.process.Stop()
+ return nil
+}
+
+func (e *Execd) Write(metrics []telegraf.Metric) error {
+ for _, m := range metrics {
+ b, err := e.serializer.Serialize(m)
+ if err != nil {
+ return fmt.Errorf("error serializing metrics: %s", err)
+ }
+
+ if _, err = e.process.Stdin.Write(b); err != nil {
+ return fmt.Errorf("error writing metrics %s", err)
+ }
+ }
+ return nil
+}
+
+func (e *Execd) cmdReadErr(out io.Reader) {
+ scanner := bufio.NewScanner(out)
+
+ for scanner.Scan() {
+ e.Log.Errorf("stderr: %s", scanner.Text())
+ }
+
+ if err := scanner.Err(); err != nil {
+ e.Log.Errorf("Error reading stderr: %s", err)
+ }
+}
+
+func (e *Execd) cmdReadOut(out io.Reader) {
+ scanner := bufio.NewScanner(out)
+
+ for scanner.Scan() {
+ e.Log.Info(scanner.Text())
+ }
+}
+
+func init() {
+ outputs.Add("execd", func() telegraf.Output {
+ return &Execd{}
+ })
+}
diff --git a/plugins/outputs/execd/execd_test.go b/plugins/outputs/execd/execd_test.go
new file mode 100644
index 0000000000000..46bde795ec2ed
--- /dev/null
+++ b/plugins/outputs/execd/execd_test.go
@@ -0,0 +1,113 @@
+package execd
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/parsers/influx"
+ "github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+var now = time.Date(2020, 6, 30, 16, 16, 0, 0, time.UTC)
+
+func TestExternalOutputWorks(t *testing.T) {
+ influxSerializer, err := serializers.NewInfluxSerializer()
+ require.NoError(t, err)
+
+ exe, err := os.Executable()
+ require.NoError(t, err)
+
+ e := &Execd{
+ Command: []string{exe, "-testoutput"},
+ RestartDelay: config.Duration(5 * time.Second),
+ serializer: influxSerializer,
+ Log: testutil.Logger{},
+ }
+
+ require.NoError(t, e.Init())
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ e.process.ReadStderrFn = func(rstderr io.Reader) {
+ scanner := bufio.NewScanner(rstderr)
+
+ for scanner.Scan() {
+ t.Errorf("stderr: %q", scanner.Text())
+ }
+
+ if err := scanner.Err(); err != nil {
+ if !strings.HasSuffix(err.Error(), "already closed") {
+ t.Errorf("error reading stderr: %v", err)
+ }
+ }
+ wg.Done()
+ }
+
+ m, err := metric.New(
+ "cpu",
+ map[string]string{"name": "cpu1"},
+ map[string]interface{}{"idle": 50, "sys": 30},
+ now,
+ )
+ require.NoError(t, err)
+
+ require.NoError(t, e.Connect())
+ require.NoError(t, e.Write([]telegraf.Metric{m}))
+ require.NoError(t, e.Close())
+ wg.Wait()
+}
+
+var testoutput = flag.Bool("testoutput", false,
+ "if true, act like line input program instead of test")
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if *testoutput {
+ runOutputConsumerProgram()
+ os.Exit(0)
+ }
+ code := m.Run()
+ os.Exit(code)
+}
+
+func runOutputConsumerProgram() {
+ parser := influx.NewStreamParser(os.Stdin)
+
+ for {
+ metric, err := parser.Next()
+ if err != nil {
+ if err == influx.EOF {
+ return // stream ended
+ }
+ if parseErr, isParseError := err.(*influx.ParseError); isParseError {
+ fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
+ os.Exit(1)
+ }
+ fmt.Fprintf(os.Stderr, "ERR %v\n", err)
+ os.Exit(1)
+ }
+
+ expected := testutil.MustMetric("cpu",
+ map[string]string{"name": "cpu1"},
+ map[string]interface{}{"idle": 50, "sys": 30},
+ now,
+ )
+
+ if !testutil.MetricEqual(expected, metric) {
+ fmt.Fprintf(os.Stderr, "metric doesn't match expected\n")
+ os.Exit(1)
+ }
+ }
+}
diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md
index de577eacfe605..45d0ac1556c47 100644
--- a/plugins/outputs/file/README.md
+++ b/plugins/outputs/file/README.md
@@ -1,13 +1,31 @@
-# file Output Plugin
+# File Output Plugin
This plugin writes telegraf metrics to files
### Configuration
-```
+
+```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
+ ## Use batch serialization format instead of line based delimiting. The
+ ## batch format allows for the production of non line based output formats and
+ ## may more efficiently encode and write metrics.
+ # use_batch_format = false
+
+ ## The file will be rotated after the time interval specified. When set
+ ## to 0 no time based rotation is performed.
+ # rotation_interval = "0h"
+
+ ## The logfile will be rotated when it becomes larger than the specified
+ ## size. When set to 0 no size based rotation is performed.
+ # rotation_max_size = "0MB"
+
+ ## Maximum number of rotated archives to keep, any older logs are deleted.
+ ## If set to -1, no archives are removed.
+ # rotation_max_archives = 5
+
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go
index 0ef61df519202..3798f107aa157 100644
--- a/plugins/outputs/file/file.go
+++ b/plugins/outputs/file/file.go
@@ -6,16 +6,22 @@ import (
"os"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/rotate"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
)
type File struct {
- Files []string
-
- writers []io.Writer
- closers []io.Closer
-
+ Files []string `toml:"files"`
+ RotationInterval internal.Duration `toml:"rotation_interval"`
+ RotationMaxSize internal.Size `toml:"rotation_max_size"`
+ RotationMaxArchives int `toml:"rotation_max_archives"`
+ UseBatchFormat bool `toml:"use_batch_format"`
+ Log telegraf.Logger `toml:"-"`
+
+ writer io.Writer
+ closers []io.Closer
serializer serializers.Serializer
}
@@ -23,6 +29,23 @@ var sampleConfig = `
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
+ ## Use batch serialization format instead of line based delimiting. The
+ ## batch format allows for the production of non line based output formats and
+ ## may more efficiently encode metric groups.
+ # use_batch_format = false
+
+ ## The file will be rotated after the time interval specified. When set
+ ## to 0 no time based rotation is performed.
+ # rotation_interval = "0d"
+
+ ## The logfile will be rotated when it becomes larger than the specified
+ ## size. When set to 0 no size based rotation is performed.
+ # rotation_max_size = "0MB"
+
+ ## Maximum number of rotated archives to keep, any older logs are deleted.
+ ## If set to -1, no archives are removed.
+ # rotation_max_archives = 5
+
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -35,23 +58,27 @@ func (f *File) SetSerializer(serializer serializers.Serializer) {
}
func (f *File) Connect() error {
+ writers := []io.Writer{}
+
if len(f.Files) == 0 {
f.Files = []string{"stdout"}
}
for _, file := range f.Files {
if file == "stdout" {
- f.writers = append(f.writers, os.Stdout)
+ writers = append(writers, os.Stdout)
} else {
- of, err := os.OpenFile(file, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModeAppend|0644)
+ of, err := rotate.NewFileWriter(
+ file, f.RotationInterval.Duration, f.RotationMaxSize.Size, f.RotationMaxArchives)
if err != nil {
return err
}
- f.writers = append(f.writers, of)
+ writers = append(writers, of)
f.closers = append(f.closers, of)
}
}
+ f.writer = io.MultiWriter(writers...)
return nil
}
@@ -76,19 +103,31 @@ func (f *File) Description() string {
func (f *File) Write(metrics []telegraf.Metric) error {
var writeErr error = nil
- for _, metric := range metrics {
- b, err := f.serializer.Serialize(metric)
+
+ if f.UseBatchFormat {
+ octets, err := f.serializer.SerializeBatch(metrics)
if err != nil {
- return fmt.Errorf("failed to serialize message: %s", err)
+ f.Log.Errorf("Could not serialize metric: %v", err)
}
- for _, writer := range f.writers {
- _, err = writer.Write(b)
- if err != nil && writer != os.Stdout {
- writeErr = fmt.Errorf("E! failed to write message: %s, %s", b, err)
+ _, err = f.writer.Write(octets)
+ if err != nil {
+ f.Log.Errorf("Error writing to file: %v", err)
+ }
+ } else {
+ for _, metric := range metrics {
+ b, err := f.serializer.Serialize(metric)
+ if err != nil {
+ f.Log.Debugf("Could not serialize metric: %v", err)
+ }
+
+ _, err = f.writer.Write(b)
+ if err != nil {
+ writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err)
}
}
}
+
return writeErr
}
diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md
index 878eb8048f485..b6b36cfcab4a0 100644
--- a/plugins/outputs/graphite/README.md
+++ b/plugins/outputs/graphite/README.md
@@ -21,9 +21,22 @@ see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
template = "host.tags.measurement.field"
+ ## Graphite templates patterns
+ ## 1. Template for cpu
+ ## 2. Template for disk*
+ ## 3. Default template
+ # templates = [
+ # "cpu tags.measurement.host.field",
+ # "disk* measurement.field",
+ # "host.measurement.tags.field"
+ #]
+
## Enable Graphite tags support
# graphite_tag_support = false
+ ## Character for separating metric name and field for Graphite tags
+ # graphite_separator = "."
+
## timeout in seconds for the write connection to graphite
timeout = 2
diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go
index 09cdbe0809d6c..6c871ae174580 100644
--- a/plugins/outputs/graphite/graphite.go
+++ b/plugins/outputs/graphite/graphite.go
@@ -10,19 +10,21 @@ import (
"time"
"github.com/influxdata/telegraf"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
)
type Graphite struct {
GraphiteTagSupport bool
+ GraphiteSeparator string
// URL is only for backwards compatibility
- Servers []string
- Prefix string
- Template string
- Timeout int
- conns []net.Conn
+ Servers []string
+ Prefix string
+ Template string
+ Templates []string
+ Timeout int
+ conns []net.Conn
tlsint.ClientConfig
}
@@ -40,6 +42,19 @@ var sampleConfig = `
## Enable Graphite tags support
# graphite_tag_support = false
+ ## Character for separating metric name and field for Graphite tags
+ # graphite_separator = "."
+
+ ## Graphite templates patterns
+ ## 1. Template for cpu
+ ## 2. Template for disk*
+ ## 3. Default template
+ # templates = [
+ # "cpu tags.measurement.host.field",
+ # "disk* measurement.field",
+ # "host.measurement.tags.field"
+ #]
+
## timeout in seconds for the write connection to graphite
timeout = 2
@@ -134,7 +149,7 @@ func checkEOF(conn net.Conn) {
func (g *Graphite) Write(metrics []telegraf.Metric) error {
// Prepare data
var batch []byte
- s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport)
+ s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.GraphiteSeparator, g.Templates)
if err != nil {
return err
}
diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go
index 3857236e50d37..82aad0d7d6ee6 100644
--- a/plugins/outputs/graphite/graphite_test.go
+++ b/plugins/outputs/graphite/graphite_test.go
@@ -98,6 +98,190 @@ func TestGraphiteOK(t *testing.T) {
g.Close()
}
+func TestGraphiteOkWithSeparatorDot(t *testing.T) {
+ var wg sync.WaitGroup
+ // Start TCP server
+ wg.Add(1)
+ t.Log("Starting server")
+ TCPServer1(t, &wg)
+
+ // Init plugin
+ g := Graphite{
+ Prefix: "my.prefix",
+ GraphiteSeparator: ".",
+ }
+
+ // Init metrics
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m2, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m3, _ := metric.New(
+ "my_measurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ // Prepare point list
+ metrics := []telegraf.Metric{m1}
+ metrics2 := []telegraf.Metric{m2, m3}
+ err1 := g.Connect()
+ require.NoError(t, err1)
+ // Send Data
+ t.Log("Send first data")
+ err2 := g.Write(metrics)
+ require.NoError(t, err2)
+
+ // Waiting TCPserver, should reconnect and resend
+ wg.Wait()
+ t.Log("Finished Waiting for first data")
+ var wg2 sync.WaitGroup
+ // Start TCP server
+ wg2.Add(1)
+ TCPServer2(t, &wg2)
+ //Write but expect an error, but reconnect
+ err3 := g.Write(metrics2)
+ t.Log("Finished writing second data, it should have reconnected automatically")
+
+ require.NoError(t, err3)
+ t.Log("Finished writing third data")
+ wg2.Wait()
+ g.Close()
+}
+
+func TestGraphiteOkWithSeparatorUnderscore(t *testing.T) {
+ var wg sync.WaitGroup
+ // Start TCP server
+ wg.Add(1)
+ t.Log("Starting server")
+ TCPServer1(t, &wg)
+
+ // Init plugin
+ g := Graphite{
+ Prefix: "my.prefix",
+ GraphiteSeparator: "_",
+ }
+
+ // Init metrics
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m2, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m3, _ := metric.New(
+ "my_measurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ // Prepare point list
+ metrics := []telegraf.Metric{m1}
+ metrics2 := []telegraf.Metric{m2, m3}
+ err1 := g.Connect()
+ require.NoError(t, err1)
+ // Send Data
+ t.Log("Send first data")
+ err2 := g.Write(metrics)
+ require.NoError(t, err2)
+
+ // Waiting TCPserver, should reconnect and resend
+ wg.Wait()
+ t.Log("Finished Waiting for first data")
+ var wg2 sync.WaitGroup
+ // Start TCP server
+ wg2.Add(1)
+ TCPServer2(t, &wg2)
+ //Write but expect an error, but reconnect
+ err3 := g.Write(metrics2)
+ t.Log("Finished writing second data, it should have reconnected automatically")
+
+ require.NoError(t, err3)
+ t.Log("Finished writing third data")
+ wg2.Wait()
+ g.Close()
+}
+
+func TestGraphiteOKWithMultipleTemplates(t *testing.T) {
+ var wg sync.WaitGroup
+ // Start TCP server
+ wg.Add(1)
+ t.Log("Starting server")
+ TCPServer1WithMultipleTemplates(t, &wg)
+
+ // Init plugin
+ g := Graphite{
+ Prefix: "my.prefix",
+ Template: "measurement.host.tags.field",
+ Templates: []string{
+ "my_* host.measurement.tags.field",
+ "measurement.tags.host.field",
+ },
+ }
+
+ // Init metrics
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1", "mytag": "valuetag"},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m2, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1", "mytag": "valuetag"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m3, _ := metric.New(
+ "my_measurement",
+ map[string]string{"host": "192.168.0.1", "mytag": "valuetag"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ // Prepare point list
+ metrics := []telegraf.Metric{m1}
+ metrics2 := []telegraf.Metric{m2, m3}
+ err1 := g.Connect()
+ require.NoError(t, err1)
+ // Send Data
+ t.Log("Send first data")
+ err2 := g.Write(metrics)
+ require.NoError(t, err2)
+
+ // Waiting TCPserver, should reconnect and resend
+ wg.Wait()
+ t.Log("Finished Waiting for first data")
+ var wg2 sync.WaitGroup
+ // Start TCP server
+ wg2.Add(1)
+ TCPServer2WithMultipleTemplates(t, &wg2)
+ //Write but expect an error, but reconnect
+ err3 := g.Write(metrics2)
+ t.Log("Finished writing second data, it should have reconnected automatically")
+
+ require.NoError(t, err3)
+ t.Log("Finished writing third data")
+ wg2.Wait()
+ g.Close()
+}
+
func TestGraphiteOkWithTags(t *testing.T) {
var wg sync.WaitGroup
// Start TCP server
@@ -158,6 +342,128 @@ func TestGraphiteOkWithTags(t *testing.T) {
g.Close()
}
+func TestGraphiteOkWithTagsAndSeparatorDot(t *testing.T) {
+ var wg sync.WaitGroup
+ // Start TCP server
+ wg.Add(1)
+ t.Log("Starting server")
+ TCPServer1WithTags(t, &wg)
+
+ // Init plugin
+ g := Graphite{
+ Prefix: "my.prefix",
+ GraphiteTagSupport: true,
+ GraphiteSeparator: ".",
+ }
+
+ // Init metrics
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m2, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m3, _ := metric.New(
+ "my_measurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ // Prepare point list
+ metrics := []telegraf.Metric{m1}
+ metrics2 := []telegraf.Metric{m2, m3}
+ err1 := g.Connect()
+ require.NoError(t, err1)
+ // Send Data
+ t.Log("Send first data")
+ err2 := g.Write(metrics)
+ require.NoError(t, err2)
+
+ // Waiting TCPserver, should reconnect and resend
+ wg.Wait()
+ t.Log("Finished Waiting for first data")
+ var wg2 sync.WaitGroup
+ // Start TCP server
+ wg2.Add(1)
+ TCPServer2WithTags(t, &wg2)
+ //Write but expect an error, but reconnect
+ err3 := g.Write(metrics2)
+ t.Log("Finished writing second data, it should have reconnected automatically")
+
+ require.NoError(t, err3)
+ t.Log("Finished writing third data")
+ wg2.Wait()
+ g.Close()
+}
+
+func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) {
+ var wg sync.WaitGroup
+ // Start TCP server
+ wg.Add(1)
+ t.Log("Starting server")
+ TCPServer1WithTagsSeparatorUnderscore(t, &wg)
+
+ // Init plugin
+ g := Graphite{
+ Prefix: "my_prefix",
+ GraphiteTagSupport: true,
+ GraphiteSeparator: "_",
+ }
+
+ // Init metrics
+ m1, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"myfield": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m2, _ := metric.New(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ m3, _ := metric.New(
+ "my_measurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ // Prepare point list
+ metrics := []telegraf.Metric{m1}
+ metrics2 := []telegraf.Metric{m2, m3}
+ err1 := g.Connect()
+ require.NoError(t, err1)
+ // Send Data
+ t.Log("Send first data")
+ err2 := g.Write(metrics)
+ require.NoError(t, err2)
+
+ // Waiting TCPserver, should reconnect and resend
+ wg.Wait()
+ t.Log("Finished Waiting for first data")
+ var wg2 sync.WaitGroup
+ // Start TCP server
+ wg2.Add(1)
+ TCPServer2WithTagsSeparatorUnderscore(t, &wg2)
+ //Write but expect an error, but reconnect
+ err3 := g.Write(metrics2)
+ t.Log("Finished writing second data, it should have reconnected automatically")
+
+ require.NoError(t, err3)
+ t.Log("Finished writing third data")
+ wg2.Wait()
+ g.Close()
+}
+
func TCPServer1(t *testing.T, wg *sync.WaitGroup) {
tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
go func() {
@@ -188,6 +494,36 @@ func TCPServer2(t *testing.T, wg *sync.WaitGroup) {
}()
}
+func TCPServer1WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) {
+ tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
+ go func() {
+ defer wg.Done()
+ conn, _ := (tcpServer).Accept()
+ reader := bufio.NewReader(conn)
+ tp := textproto.NewReader(reader)
+ data1, _ := tp.ReadLine()
+ assert.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1.myfield 3.14 1289430000", data1)
+ conn.Close()
+ tcpServer.Close()
+ }()
+}
+
+func TCPServer2WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) {
+ tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
+ go func() {
+ defer wg.Done()
+ conn2, _ := (tcpServer).Accept()
+ reader := bufio.NewReader(conn2)
+ tp := textproto.NewReader(reader)
+ data2, _ := tp.ReadLine()
+ assert.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1 3.14 1289430000", data2)
+ data3, _ := tp.ReadLine()
+ assert.Equal(t, "my.prefix.192_168_0_1.my_measurement.valuetag 3.14 1289430000", data3)
+ conn2.Close()
+ tcpServer.Close()
+ }()
+}
+
func TCPServer1WithTags(t *testing.T, wg *sync.WaitGroup) {
tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
go func() {
@@ -217,3 +553,33 @@ func TCPServer2WithTags(t *testing.T, wg *sync.WaitGroup) {
tcpServer.Close()
}()
}
+
+func TCPServer1WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) {
+ tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
+ go func() {
+ defer wg.Done()
+ conn, _ := (tcpServer).Accept()
+ reader := bufio.NewReader(conn)
+ tp := textproto.NewReader(reader)
+ data1, _ := tp.ReadLine()
+ assert.Equal(t, "my_prefix_mymeasurement_myfield;host=192.168.0.1 3.14 1289430000", data1)
+ conn.Close()
+ tcpServer.Close()
+ }()
+}
+
+func TCPServer2WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) {
+ tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
+ go func() {
+ defer wg.Done()
+ conn2, _ := (tcpServer).Accept()
+ reader := bufio.NewReader(conn2)
+ tp := textproto.NewReader(reader)
+ data2, _ := tp.ReadLine()
+ assert.Equal(t, "my_prefix_mymeasurement;host=192.168.0.1 3.14 1289430000", data2)
+ data3, _ := tp.ReadLine()
+ assert.Equal(t, "my_prefix_my_measurement;host=192.168.0.1 3.14 1289430000", data3)
+ conn2.Close()
+ tcpServer.Close()
+ }()
+}
diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md
index 39863b5417f94..4945ce46f84f0 100644
--- a/plugins/outputs/graylog/README.md
+++ b/plugins/outputs/graylog/README.md
@@ -1,14 +1,18 @@
# Graylog Output Plugin
-This plugin writes to a Graylog instance using the "gelf" format.
+This plugin writes to a Graylog instance using the "[GELF][]" format.
-It requires a `servers` name.
+[GELF]: https://docs.graylog.org/en/3.1/pages/gelf.html#gelf-payload-specification
### Configuration:
```toml
-# Send telegraf metrics to graylog(s)
[[outputs.graylog]]
- ## UDP endpoint for your graylog instance(s).
- servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
+ ## UDP endpoint for your graylog instances.
+ servers = ["127.0.0.1:12201"]
+
+ ## The field to use as the GELF short_message, if unset the static string
+ ## "telegraf" will be used.
+ ## example: short_message_field = "message"
+ # short_message_field = ""
```
diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go
index 4b2c1693a469a..34f2ec6d93932 100644
--- a/plugins/outputs/graylog/graylog.go
+++ b/plugins/outputs/graylog/graylog.go
@@ -150,13 +150,19 @@ func (g *Gelf) send(b []byte) (n int, err error) {
}
type Graylog struct {
- Servers []string
- writer io.Writer
+ Servers []string `toml:"servers"`
+ ShortMessageField string `toml:"short_message_field"`
+ writer io.Writer
}
var sampleConfig = `
## UDP endpoint for your graylog instance.
- servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
+ servers = ["127.0.0.1:12201"]
+
+ ## The field to use as the GELF short_message, if unset the static string
+ ## "telegraf" will be used.
+ ## example: short_message_field = "message"
+ # short_message_field = ""
`
func (g *Graylog) Connect() error {
@@ -184,16 +190,12 @@ func (g *Graylog) SampleConfig() string {
}
func (g *Graylog) Description() string {
- return "Send telegraf metrics to graylog(s)"
+ return "Send telegraf metrics to graylog"
}
func (g *Graylog) Write(metrics []telegraf.Metric) error {
- if len(metrics) == 0 {
- return nil
- }
-
for _, metric := range metrics {
- values, err := serialize(metric)
+ values, err := g.serialize(metric)
if err != nil {
return err
}
@@ -201,14 +203,14 @@ func (g *Graylog) Write(metrics []telegraf.Metric) error {
for _, value := range values {
_, err := g.writer.Write([]byte(value))
if err != nil {
- return fmt.Errorf("FAILED to write message: %s, %s", value, err)
+ return fmt.Errorf("error writing message: %q, %v", value, err)
}
}
}
return nil
}
-func serialize(metric telegraf.Metric) ([]string, error) {
+func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) {
out := []string{}
m := make(map[string]interface{})
@@ -217,7 +219,7 @@ func serialize(metric telegraf.Metric) ([]string, error) {
m["short_message"] = "telegraf"
m["name"] = metric.Name()
- if host, ok := metric.Tags()["host"]; ok {
+ if host, ok := metric.GetTag("host"); ok {
m["host"] = host
} else {
host, err := os.Hostname()
@@ -227,14 +229,18 @@ func serialize(metric telegraf.Metric) ([]string, error) {
m["host"] = host
}
- for key, value := range metric.Tags() {
- if key != "host" {
- m["_"+key] = value
+ for _, tag := range metric.TagList() {
+ if tag.Key != "host" {
+ m["_"+tag.Key] = tag.Value
}
}
- for key, value := range metric.Fields() {
- m["_"+key] = value
+ for _, field := range metric.FieldList() {
+ if field.Key == g.ShortMessageField {
+ m["short_message"] = field.Value
+ } else {
+ m["_"+field.Key] = field.Value
+ }
}
serialized, err := ejson.Marshal(m)
diff --git a/plugins/outputs/health/README.md b/plugins/outputs/health/README.md
new file mode 100644
index 0000000000000..0a56d51928ff5
--- /dev/null
+++ b/plugins/outputs/health/README.md
@@ -0,0 +1,64 @@
+# Health Output Plugin
+
+The health plugin provides a HTTP health check resource that can be configured
+to return a failure status code based on the value of a metric.
+
+When the plugin is healthy it will return a 200 response; when unhealthy it
+will return a 503 response. The default state is healthy, one or more checks
+must fail in order for the resource to enter the failed state.
+
+### Configuration
+```toml
+[[outputs.health]]
+ ## Address and port to listen on.
+ ## ex: service_address = "http://localhost:8080"
+ ## service_address = "unix:///var/run/telegraf-health.sock"
+ # service_address = "http://:8080"
+
+ ## The maximum duration for reading the entire request.
+ # read_timeout = "5s"
+ ## The maximum duration for writing the entire response.
+ # write_timeout = "5s"
+
+ ## Username and password to accept for HTTP basic authentication.
+ # basic_username = "user1"
+ # basic_password = "secret"
+
+ ## Allowed CA certificates for client certificates.
+ # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+ ## TLS server certificate and private key.
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## One or more check sub-tables should be defined, it is also recommended to
+ ## use metric filtering to limit the metrics that flow into this output.
+ ##
+ ## When using the default buffer sizes, this example will fail when the
+ ## metric buffer is half full.
+ ##
+ ## namepass = ["internal_write"]
+ ## tagpass = { output = ["influxdb"] }
+ ##
+ ## [[outputs.health.compares]]
+ ## field = "buffer_size"
+ ## lt = 5000.0
+ ##
+ ## [[outputs.health.contains]]
+ ## field = "buffer_size"
+```
+
+#### compares
+
+The `compares` check is used to assert basic mathematical relationships. Use
+it by choosing a field key and one or more comparisons that must hold true. If
+the field is not found on a metric no comparison will be made.
+
+Comparisons must be hold true on all metrics for the check to pass.
+
+#### contains
+
+The `contains` check can be used to require a field key to exist on at least
+one metric.
+
+If the field is found on any metric the check passes.
diff --git a/plugins/outputs/health/compares.go b/plugins/outputs/health/compares.go
new file mode 100644
index 0000000000000..9228bd2df7187
--- /dev/null
+++ b/plugins/outputs/health/compares.go
@@ -0,0 +1,77 @@
+package health
+
+import (
+ "github.com/influxdata/telegraf"
+)
+
+type Compares struct {
+ Field string `toml:"field"`
+ GT *float64 `toml:"gt"`
+ GE *float64 `toml:"ge"`
+ LT *float64 `toml:"lt"`
+ LE *float64 `toml:"le"`
+ EQ *float64 `toml:"eq"`
+ NE *float64 `toml:"ne"`
+}
+
+func (c *Compares) runChecks(fv float64) bool {
+ if c.GT != nil && !(fv > *c.GT) {
+ return false
+ }
+ if c.GE != nil && !(fv >= *c.GE) {
+ return false
+ }
+ if c.LT != nil && !(fv < *c.LT) {
+ return false
+ }
+ if c.LE != nil && !(fv <= *c.LE) {
+ return false
+ }
+ if c.EQ != nil && !(fv == *c.EQ) {
+ return false
+ }
+ if c.NE != nil && !(fv != *c.NE) {
+ return false
+ }
+ return true
+}
+
+func (c *Compares) Check(metrics []telegraf.Metric) bool {
+ success := true
+ for _, m := range metrics {
+ fv, ok := m.GetField(c.Field)
+ if !ok {
+ continue
+ }
+
+ f, ok := asFloat(fv)
+ if !ok {
+ return false
+ }
+
+ result := c.runChecks(f)
+ if !result {
+ success = false
+ }
+ }
+ return success
+}
+
+func asFloat(fv interface{}) (float64, bool) {
+ switch v := fv.(type) {
+ case int64:
+ return float64(v), true
+ case float64:
+ return v, true
+ case uint64:
+ return float64(v), true
+ case bool:
+ if v {
+ return 1.0, true
+ } else {
+ return 0.0, true
+ }
+ default:
+ return 0.0, false
+ }
+}
diff --git a/plugins/outputs/health/compares_test.go b/plugins/outputs/health/compares_test.go
new file mode 100644
index 0000000000000..26f0dc1e1b7a2
--- /dev/null
+++ b/plugins/outputs/health/compares_test.go
@@ -0,0 +1,268 @@
+package health_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/outputs/health"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func addr(v float64) *float64 {
+ return &v
+}
+
+func TestFieldNotFoundIsSuccess(t *testing.T) {
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Now()),
+ }
+
+ compares := &health.Compares{
+ Field: "time_idle",
+ GT: addr(42.0),
+ }
+ result := compares.Check(metrics)
+ require.True(t, result)
+}
+
+func TestStringFieldIsFailure(t *testing.T) {
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": "foo",
+ },
+ time.Now()),
+ }
+
+ compares := &health.Compares{
+ Field: "time_idle",
+ GT: addr(42.0),
+ }
+ result := compares.Check(metrics)
+ require.False(t, result)
+}
+
+func TestFloatConvert(t *testing.T) {
+ tests := []struct {
+ name string
+ metrics []telegraf.Metric
+ expected bool
+ }{
+ {
+ name: "int64 field",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": int64(42.0),
+ },
+ time.Now()),
+ },
+ expected: true,
+ },
+ {
+ name: "uint64 field",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": uint64(42.0),
+ },
+ time.Now()),
+ },
+ expected: true,
+ },
+ {
+ name: "float64 field",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": float64(42.0),
+ },
+ time.Now()),
+ },
+ expected: true,
+ },
+ {
+ name: "bool field true",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": true,
+ },
+ time.Now()),
+ },
+ expected: true,
+ },
+ {
+ name: "bool field false",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": false,
+ },
+ time.Now()),
+ },
+ expected: false,
+ },
+ {
+ name: "string field",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": "42.0",
+ },
+ time.Now()),
+ },
+ expected: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ compares := &health.Compares{
+ Field: "time_idle",
+ GT: addr(0.0),
+ }
+ actual := compares.Check(tt.metrics)
+ require.Equal(t, tt.expected, actual)
+ })
+ }
+}
+
+func TestOperators(t *testing.T) {
+ tests := []struct {
+ name string
+ compares *health.Compares
+ expected bool
+ }{
+ {
+ name: "gt",
+ compares: &health.Compares{
+ Field: "time_idle",
+ GT: addr(41.0),
+ },
+ expected: true,
+ },
+ {
+ name: "not gt",
+ compares: &health.Compares{
+ Field: "time_idle",
+ GT: addr(42.0),
+ },
+ expected: false,
+ },
+ {
+ name: "ge",
+ compares: &health.Compares{
+ Field: "time_idle",
+ GE: addr(42.0),
+ },
+ expected: true,
+ },
+ {
+ name: "not ge",
+ compares: &health.Compares{
+ Field: "time_idle",
+ GE: addr(43.0),
+ },
+ expected: false,
+ },
+ {
+ name: "lt",
+ compares: &health.Compares{
+ Field: "time_idle",
+ LT: addr(43.0),
+ },
+ expected: true,
+ },
+ {
+ name: "not lt",
+ compares: &health.Compares{
+ Field: "time_idle",
+ LT: addr(42.0),
+ },
+ expected: false,
+ },
+ {
+ name: "le",
+ compares: &health.Compares{
+ Field: "time_idle",
+ LE: addr(42.0),
+ },
+ expected: true,
+ },
+ {
+ name: "not le",
+ compares: &health.Compares{
+ Field: "time_idle",
+ LE: addr(41.0),
+ },
+ expected: false,
+ },
+ {
+ name: "eq",
+ compares: &health.Compares{
+ Field: "time_idle",
+ EQ: addr(42.0),
+ },
+ expected: true,
+ },
+ {
+ name: "not eq",
+ compares: &health.Compares{
+ Field: "time_idle",
+ EQ: addr(41.0),
+ },
+ expected: false,
+ },
+ {
+ name: "ne",
+ compares: &health.Compares{
+ Field: "time_idle",
+ NE: addr(41.0),
+ },
+ expected: true,
+ },
+ {
+ name: "not ne",
+ compares: &health.Compares{
+ Field: "time_idle",
+ NE: addr(42.0),
+ },
+ expected: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Now()),
+ }
+ actual := tt.compares.Check(metrics)
+ require.Equal(t, tt.expected, actual)
+ })
+ }
+}
diff --git a/plugins/outputs/health/contains.go b/plugins/outputs/health/contains.go
new file mode 100644
index 0000000000000..ff03667e032f5
--- /dev/null
+++ b/plugins/outputs/health/contains.go
@@ -0,0 +1,19 @@
+package health
+
+import "github.com/influxdata/telegraf"
+
+type Contains struct {
+ Field string `toml:"field"`
+}
+
+func (c *Contains) Check(metrics []telegraf.Metric) bool {
+ success := false
+ for _, m := range metrics {
+ ok := m.HasField(c.Field)
+ if ok {
+ success = true
+ }
+ }
+
+ return success
+}
diff --git a/plugins/outputs/health/contains_test.go b/plugins/outputs/health/contains_test.go
new file mode 100644
index 0000000000000..2337dd867c907
--- /dev/null
+++ b/plugins/outputs/health/contains_test.go
@@ -0,0 +1,68 @@
+package health_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/outputs/health"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFieldFound(t *testing.T) {
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Now()),
+ }
+
+ contains := &health.Contains{
+ Field: "time_idle",
+ }
+ result := contains.Check(metrics)
+ require.True(t, result)
+}
+
+func TestFieldNotFound(t *testing.T) {
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Now()),
+ }
+
+ contains := &health.Contains{
+ Field: "time_idle",
+ }
+ result := contains.Check(metrics)
+ require.False(t, result)
+}
+
+func TestOneMetricWithFieldIsSuccess(t *testing.T) {
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Now()),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Now()),
+ }
+
+ contains := &health.Contains{
+ Field: "time_idle",
+ }
+ result := contains.Check(metrics)
+ require.True(t, result)
+}
diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go
new file mode 100644
index 0000000000000..f411305616954
--- /dev/null
+++ b/plugins/outputs/health/health.go
@@ -0,0 +1,272 @@
+package health
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/outputs"
+)
+
+const (
+ defaultServiceAddress = "tcp://:8080"
+ defaultReadTimeout = 5 * time.Second
+ defaultWriteTimeout = 5 * time.Second
+)
+
+var sampleConfig = `
+ ## Address and port to listen on.
+ ## ex: service_address = "http://localhost:8080"
+ ## service_address = "unix:///var/run/telegraf-health.sock"
+ # service_address = "http://:8080"
+
+ ## The maximum duration for reading the entire request.
+ # read_timeout = "5s"
+ ## The maximum duration for writing the entire response.
+ # write_timeout = "5s"
+
+ ## Username and password to accept for HTTP basic authentication.
+ # basic_username = "user1"
+ # basic_password = "secret"
+
+ ## Allowed CA certificates for client certificates.
+ # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+ ## TLS server certificate and private key.
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+
+ ## One or more check sub-tables should be defined, it is also recommended to
+ ## use metric filtering to limit the metrics that flow into this output.
+ ##
+ ## When using the default buffer sizes, this example will fail when the
+ ## metric buffer is half full.
+ ##
+ ## namepass = ["internal_write"]
+ ## tagpass = { output = ["influxdb"] }
+ ##
+ ## [[outputs.health.compares]]
+ ## field = "buffer_size"
+ ## lt = 5000.0
+ ##
+ ## [[outputs.health.contains]]
+ ## field = "buffer_size"
+`
+
+type Checker interface {
+ // Check returns true if the metrics meet its criteria.
+ Check(metrics []telegraf.Metric) bool
+}
+
+type Health struct {
+ ServiceAddress string `toml:"service_address"`
+ ReadTimeout internal.Duration `toml:"read_timeout"`
+ WriteTimeout internal.Duration `toml:"write_timeout"`
+ BasicUsername string `toml:"basic_username"`
+ BasicPassword string `toml:"basic_password"`
+ tlsint.ServerConfig
+
+ Compares []*Compares `toml:"compares"`
+ Contains []*Contains `toml:"contains"`
+ checkers []Checker
+
+ wg sync.WaitGroup
+ server *http.Server
+ origin string
+ network string
+ address string
+ tlsConf *tls.Config
+
+ mu sync.Mutex
+ healthy bool
+}
+
+func (h *Health) SampleConfig() string {
+ return sampleConfig
+}
+
+func (h *Health) Description() string {
+ return "Configurable HTTP health check resource based on metrics"
+}
+
+func (h *Health) Init() error {
+ u, err := url.Parse(h.ServiceAddress)
+ if err != nil {
+ return err
+ }
+
+ switch u.Scheme {
+ case "http", "https":
+ h.network = "tcp"
+ h.address = u.Host
+ case "unix":
+ h.network = u.Scheme
+ h.address = u.Path
+ case "tcp4", "tcp6", "tcp":
+ h.network = u.Scheme
+ h.address = u.Host
+ default:
+ return errors.New("service_address contains invalid scheme")
+ }
+
+ h.tlsConf, err = h.ServerConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ h.checkers = make([]Checker, 0)
+ for i := range h.Compares {
+ h.checkers = append(h.checkers, h.Compares[i])
+ }
+ for i := range h.Contains {
+ h.checkers = append(h.checkers, h.Contains[i])
+ }
+
+ return nil
+}
+
+// Connect starts the HTTP server.
+func (h *Health) Connect() error {
+ authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, "health", onAuthError)
+
+ h.server = &http.Server{
+ Addr: h.ServiceAddress,
+ Handler: authHandler(h),
+ ReadTimeout: h.ReadTimeout.Duration,
+ WriteTimeout: h.WriteTimeout.Duration,
+ TLSConfig: h.tlsConf,
+ }
+
+ listener, err := h.listen()
+ if err != nil {
+ return err
+ }
+
+ h.origin = h.getOrigin(listener)
+
+ log.Printf("I! [outputs.health] Listening on %s", h.origin)
+
+ h.wg.Add(1)
+ go func() {
+ defer h.wg.Done()
+ err := h.server.Serve(listener)
+ if err != http.ErrServerClosed {
+ log.Printf("E! [outputs.health] Serve error on %s: %v", h.origin, err)
+ }
+ h.origin = ""
+ }()
+
+ return nil
+}
+
+func onAuthError(_ http.ResponseWriter) {
+}
+
+func (h *Health) listen() (net.Listener, error) {
+ if h.tlsConf != nil {
+ return tls.Listen(h.network, h.address, h.tlsConf)
+ } else {
+ return net.Listen(h.network, h.address)
+ }
+}
+
+func (h *Health) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ var code = http.StatusOK
+ if !h.isHealthy() {
+ code = http.StatusServiceUnavailable
+ }
+
+ rw.Header().Set("Server", internal.ProductToken())
+ http.Error(rw, http.StatusText(code), code)
+}
+
+// Write runs all checks over the metric batch and adjust health state.
+func (h *Health) Write(metrics []telegraf.Metric) error {
+ healthy := true
+ for _, checker := range h.checkers {
+ success := checker.Check(metrics)
+ if !success {
+ healthy = false
+ }
+ }
+
+ h.setHealthy(healthy)
+ return nil
+}
+
+// Close shuts down the HTTP server.
+func (h *Health) Close() error {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ h.server.Shutdown(ctx)
+ h.wg.Wait()
+ return nil
+}
+
+// Origin returns the URL of the HTTP server.
+func (h *Health) Origin() string {
+ return h.origin
+}
+
+func (h *Health) getOrigin(listener net.Listener) string {
+ scheme := "http"
+ if h.tlsConf != nil {
+ scheme = "https"
+ }
+ if h.network == "unix" {
+ scheme = "unix"
+ }
+
+ switch h.network {
+ case "unix":
+ origin := &url.URL{
+ Scheme: scheme,
+ Path: listener.Addr().String(),
+ }
+ return origin.String()
+ default:
+ origin := &url.URL{
+ Scheme: scheme,
+ Host: listener.Addr().String(),
+ }
+ return origin.String()
+ }
+
+}
+
+func (h *Health) setHealthy(healthy bool) {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.healthy = healthy
+}
+
+func (h *Health) isHealthy() bool {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.healthy
+}
+
+func NewHealth() *Health {
+ return &Health{
+ ServiceAddress: defaultServiceAddress,
+ ReadTimeout: internal.Duration{Duration: defaultReadTimeout},
+ WriteTimeout: internal.Duration{Duration: defaultWriteTimeout},
+ healthy: true,
+ }
+}
+
+func init() {
+ outputs.Add("health", func() telegraf.Output {
+ return NewHealth()
+ })
+}
diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go
new file mode 100644
index 0000000000000..5bf35ad8320e4
--- /dev/null
+++ b/plugins/outputs/health/health_test.go
@@ -0,0 +1,204 @@
+package health_test
+
+import (
+ "io/ioutil"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/outputs/health"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+var pki = testutil.NewPKI("../../../testutil/pki")
+
+func TestHealth(t *testing.T) {
+ type Options struct {
+ Compares []*health.Compares `toml:"compares"`
+ Contains []*health.Contains `toml:"contains"`
+ }
+
+ now := time.Now()
+ tests := []struct {
+ name string
+ options Options
+ metrics []telegraf.Metric
+ expectedCode int
+ }{
+ {
+ name: "healthy on startup",
+ expectedCode: 200,
+ },
+ {
+ name: "check passes",
+ options: Options{
+ Compares: []*health.Compares{
+ {
+ Field: "time_idle",
+ GT: func() *float64 { v := 0.0; return &v }(),
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ now),
+ },
+ expectedCode: 200,
+ },
+ {
+ name: "check fails",
+ options: Options{
+ Compares: []*health.Compares{
+ {
+ Field: "time_idle",
+ LT: func() *float64 { v := 0.0; return &v }(),
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ now),
+ },
+ expectedCode: 503,
+ },
+ {
+ name: "mixed check fails",
+ options: Options{
+ Compares: []*health.Compares{
+ {
+ Field: "time_idle",
+ LT: func() *float64 { v := 0.0; return &v }(),
+ },
+ },
+ Contains: []*health.Contains{
+ {
+ Field: "foo",
+ },
+ },
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ now),
+ },
+ expectedCode: 503,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ output := health.NewHealth()
+ output.ServiceAddress = "tcp://127.0.0.1:0"
+ output.Compares = tt.options.Compares
+ output.Contains = tt.options.Contains
+
+ err := output.Init()
+ require.NoError(t, err)
+
+ err = output.Connect()
+ require.NoError(t, err)
+
+ err = output.Write(tt.metrics)
+ require.NoError(t, err)
+
+ resp, err := http.Get(output.Origin())
+ require.NoError(t, err)
+ require.Equal(t, tt.expectedCode, resp.StatusCode)
+
+ _, err = ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ err = output.Close()
+ require.NoError(t, err)
+ })
+ }
+}
+
+func TestInitServiceAddress(t *testing.T) {
+ tests := []struct {
+ name string
+ plugin *health.Health
+ err bool
+ origin string
+ }{
+ {
+ name: "port without scheme is not allowed",
+ plugin: &health.Health{
+ ServiceAddress: ":8080",
+ },
+ err: true,
+ },
+ {
+ name: "path without scheme is not allowed",
+ plugin: &health.Health{
+ ServiceAddress: "/tmp/telegraf",
+ },
+ err: true,
+ },
+ {
+ name: "tcp with port maps to http",
+ plugin: &health.Health{
+ ServiceAddress: "tcp://:8080",
+ },
+ },
+ {
+ name: "tcp with tlsconf maps to https",
+ plugin: &health.Health{
+ ServiceAddress: "tcp://:8080",
+ ServerConfig: *pki.TLSServerConfig(),
+ },
+ },
+ {
+ name: "tcp4 is allowed",
+ plugin: &health.Health{
+ ServiceAddress: "tcp4://:8080",
+ },
+ },
+ {
+ name: "tcp6 is allowed",
+ plugin: &health.Health{
+ ServiceAddress: "tcp6://:8080",
+ },
+ },
+ {
+ name: "http scheme",
+ plugin: &health.Health{
+ ServiceAddress: "http://:8080",
+ },
+ },
+ {
+ name: "https scheme",
+ plugin: &health.Health{
+ ServiceAddress: "https://:8080",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ output := health.NewHealth()
+ output.ServiceAddress = tt.plugin.ServiceAddress
+
+ err := output.Init()
+ if tt.err {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ })
+ }
+}
diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md
index 5697b603076de..0229c0e6ada7f 100644
--- a/plugins/outputs/http/README.md
+++ b/plugins/outputs/http/README.md
@@ -9,7 +9,7 @@ data formats. For data_formats that support batching, metrics are sent in batch
# A plugin that can transmit metrics over HTTP
[[outputs.http]]
## URL is the address to send metrics to
- url = "http://127.0.0.1:8080/metric"
+ url = "http://127.0.0.1:8080/telegraf"
## Timeout for HTTP message
# timeout = "5s"
@@ -40,12 +40,12 @@ data formats. For data_formats that support batching, metrics are sent in batch
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
+ ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+ ## compress body or "identity" to apply no encoding.
+ # content_encoding = "identity"
+
## Additional HTTP headers
# [outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
-
- ## HTTP Content-Encoding for write request body, can be set to "gzip" to
- ## compress body or "identity" to apply no encoding.
- # content_encoding = "identity"
```
diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go
index abcea74b57d0f..d75d5ef5a4df2 100644
--- a/plugins/outputs/http/http.go
+++ b/plugins/outputs/http/http.go
@@ -12,16 +12,20 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
+const (
+ defaultURL = "http://127.0.0.1:8080/telegraf"
+)
+
var sampleConfig = `
## URL is the address to send metrics to
- url = "http://127.0.0.1:8080/metric"
+ url = "http://127.0.0.1:8080/telegraf"
## Timeout for HTTP message
# timeout = "5s"
@@ -52,14 +56,14 @@ var sampleConfig = `
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
+ ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+ ## compress body or "identity" to apply no encoding.
+ # content_encoding = "identity"
+
## Additional HTTP headers
# [outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
-
- ## HTTP Content-Encoding for write request body, can be set to "gzip" to
- ## compress body or "identity" to apply no encoding.
- # content_encoding = "identity"
`
const (
@@ -172,10 +176,12 @@ func (h *HTTP) write(reqBody []byte) error {
var err error
if h.ContentEncoding == "gzip" {
- reqBodyBuffer, err = internal.CompressWithGzip(reqBodyBuffer)
+ rc, err := internal.CompressWithGzip(reqBodyBuffer)
if err != nil {
return err
}
+ defer rc.Close()
+ reqBodyBuffer = rc
}
req, err := http.NewRequest(h.Method, h.URL, reqBodyBuffer)
@@ -187,12 +193,15 @@ func (h *HTTP) write(reqBody []byte) error {
req.SetBasicAuth(h.Username, h.Password)
}
- req.Header.Set("User-Agent", "Telegraf/"+internal.Version())
+ req.Header.Set("User-Agent", internal.ProductToken())
req.Header.Set("Content-Type", defaultContentType)
if h.ContentEncoding == "gzip" {
req.Header.Set("Content-Encoding", "gzip")
}
for k, v := range h.Headers {
+ if strings.ToLower(k) == "host" {
+ req.Host = v
+ }
req.Header.Set(k, v)
}
@@ -215,6 +224,7 @@ func init() {
return &HTTP{
Timeout: internal.Duration{Duration: defaultClientTimeout},
Method: defaultMethod,
+ URL: defaultURL,
}
})
}
diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go
index 0decdf0246c02..abcf2db33dabc 100644
--- a/plugins/outputs/http/http_test.go
+++ b/plugins/outputs/http/http_test.go
@@ -431,11 +431,9 @@ func TestDefaultUserAgent(t *testing.T) {
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
require.NoError(t, err)
- internal.SetVersion("1.2.3")
-
t.Run("default-user-agent", func(t *testing.T) {
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- require.Equal(t, "Telegraf/1.2.3", r.Header.Get("User-Agent"))
+ require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent"))
w.WriteHeader(http.StatusOK)
})
diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md
index 48ab3d51b92c1..aefc03690a8da 100644
--- a/plugins/outputs/influxdb/README.md
+++ b/plugins/outputs/influxdb/README.md
@@ -23,6 +23,9 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser
## tag is not set the 'database' option is used as the default.
# database_tag = ""
+ ## If true, the 'database_tag' will not be included in the written metric.
+ # exclude_database_tag = false
+
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
@@ -32,6 +35,13 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
+ ## The value of this tag will be used to determine the retention policy. If this
+ ## tag is not set the 'retention_policy' option is used as the default.
+ # retention_policy_tag = ""
+
+ ## If true, the 'retention_policy_tag' will not be included in the written metric.
+ # exclude_retention_policy_tag = false
+
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
@@ -74,4 +84,9 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser
# influx_uint_support = false
```
+### Metrics
+
+Reference the [influx serializer][] for details about metric production.
+
[InfluxDB v1.x]: https://github.com/influxdata/influxdb
+[influx serializer]: /plugins/serializers/influx/README.md#Metrics
diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go
index 43aa55ea86736..19ae6f31f45c6 100644
--- a/plugins/outputs/influxdb/http.go
+++ b/plugins/outputs/influxdb/http.go
@@ -6,7 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
- "log"
+ "io/ioutil"
"net"
"net/http"
"net/url"
@@ -83,29 +83,39 @@ func (r WriteResponse) Error() string {
}
type HTTPConfig struct {
- URL *url.URL
- UserAgent string
- Timeout time.Duration
- Username string
- Password string
- TLSConfig *tls.Config
- Proxy *url.URL
- Headers map[string]string
- ContentEncoding string
- Database string
- DatabaseTag string
- RetentionPolicy string
- Consistency string
- SkipDatabaseCreation bool
+ URL *url.URL
+ UserAgent string
+ Timeout time.Duration
+ Username string
+ Password string
+ TLSConfig *tls.Config
+ Proxy *url.URL
+ Headers map[string]string
+ ContentEncoding string
+ Database string
+ DatabaseTag string
+ ExcludeDatabaseTag bool
+ RetentionPolicy string
+ RetentionPolicyTag string
+ ExcludeRetentionPolicyTag bool
+ Consistency string
+ SkipDatabaseCreation bool
InfluxUintSupport bool `toml:"influx_uint_support"`
Serializer *influx.Serializer
+ Log telegraf.Logger
}
type httpClient struct {
- client *http.Client
- config HTTPConfig
- createdDatabases map[string]bool
+ client *http.Client
+ config HTTPConfig
+ // Tracks that the 'create database` statement was executed for the
+ // database. An attempt to create the database is made each time a new
+ // database is encountered in the database_tag and after a "database not
+ // found" error occurs.
+ createDatabaseExecuted map[string]bool
+
+ log telegraf.Logger
}
func NewHTTPClient(config HTTPConfig) (*httpClient, error) {
@@ -123,7 +133,7 @@ func NewHTTPClient(config HTTPConfig) (*httpClient, error) {
userAgent := config.UserAgent
if userAgent == "" {
- userAgent = "Telegraf/" + internal.Version()
+ userAgent = internal.ProductToken()
}
if config.Headers == nil {
@@ -171,8 +181,9 @@ func NewHTTPClient(config HTTPConfig) (*httpClient, error) {
Timeout: config.Timeout,
Transport: transport,
},
- createdDatabases: make(map[string]bool),
- config: config,
+ createDatabaseExecuted: make(map[string]bool),
+ config: config,
+ log: config.Log,
}
return client, nil
}
@@ -198,6 +209,7 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error
resp, err := c.client.Do(req.WithContext(ctx))
if err != nil {
+ internal.OnClientError(c.client, err)
return err
}
defer resp.Body.Close()
@@ -208,7 +220,6 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error
if err != nil {
if resp.StatusCode == 200 {
- c.createdDatabases[database] = true
return nil
}
@@ -218,12 +229,19 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error
}
}
- // Even with a 200 response there can be an error
+ // Even with a 200 status code there can be an error in the response body.
+ // If there is also no error string then the operation was successful.
if resp.StatusCode == http.StatusOK && queryResp.Error() == "" {
- c.createdDatabases[database] = true
+ c.createDatabaseExecuted[database] = true
return nil
}
+ // Don't attempt to recreate the database after a 403 Forbidden error.
+ // This behavior exists only to maintain backwards compatibility.
+ if resp.StatusCode == http.StatusForbidden {
+ c.createDatabaseExecuted[database] = true
+ }
+
return &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
@@ -231,60 +249,88 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error
}
}
+type dbrp struct {
+ Database string
+ RetentionPolicy string
+}
+
// Write sends the metrics to InfluxDB
func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error {
- batches := make(map[string][]telegraf.Metric)
- if c.config.DatabaseTag == "" {
- err := c.writeBatch(ctx, c.config.Database, metrics)
- if err != nil {
- return err
+ // If these options are not used, we can skip in plugin batching and send
+ // the full batch in a single request.
+ if c.config.DatabaseTag == "" && c.config.RetentionPolicyTag == "" {
+ return c.writeBatch(ctx, c.config.Database, c.config.RetentionPolicy, metrics)
+ }
+
+ batches := make(map[dbrp][]telegraf.Metric)
+ for _, metric := range metrics {
+ db, ok := metric.GetTag(c.config.DatabaseTag)
+ if !ok {
+ db = c.config.Database
}
- } else {
- for _, metric := range metrics {
- db, ok := metric.GetTag(c.config.DatabaseTag)
- if !ok {
- db = c.config.Database
- }
- if _, ok := batches[db]; !ok {
- batches[db] = make([]telegraf.Metric, 0)
- }
+ rp, ok := metric.GetTag(c.config.RetentionPolicyTag)
+ if !ok {
+ rp = c.config.RetentionPolicy
+ }
- batches[db] = append(batches[db], metric)
+ dbrp := dbrp{
+ Database: db,
+ RetentionPolicy: rp,
}
- for db, batch := range batches {
- if !c.config.SkipDatabaseCreation && !c.createdDatabases[db] {
- err := c.CreateDatabase(ctx, db)
- if err != nil {
- log.Printf("W! [outputs.influxdb] when writing to [%s]: database %q creation failed: %v",
- c.config.URL, db, err)
- }
+ if c.config.ExcludeDatabaseTag || c.config.ExcludeRetentionPolicyTag {
+ // Avoid modifying the metric in case we need to retry the request.
+ metric = metric.Copy()
+ metric.Accept()
+ if c.config.ExcludeDatabaseTag {
+ metric.RemoveTag(c.config.DatabaseTag)
+ }
+ if c.config.ExcludeRetentionPolicyTag {
+ metric.RemoveTag(c.config.RetentionPolicyTag)
}
+ }
- err := c.writeBatch(ctx, db, batch)
+ batches[dbrp] = append(batches[dbrp], metric)
+ }
+
+ for dbrp, batch := range batches {
+ if !c.config.SkipDatabaseCreation && !c.createDatabaseExecuted[dbrp.Database] {
+ err := c.CreateDatabase(ctx, dbrp.Database)
if err != nil {
- return err
+ c.log.Warnf("When writing to [%s]: database %q creation failed: %v",
+ c.config.URL, dbrp.Database, err)
}
}
+
+ err := c.writeBatch(ctx, dbrp.Database, dbrp.RetentionPolicy, batch)
+ if err != nil {
+ return err
+ }
}
return nil
}
-func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegraf.Metric) error {
- url, err := makeWriteURL(c.config.URL, db, c.config.RetentionPolicy, c.config.Consistency)
+func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []telegraf.Metric) error {
+ loc, err := makeWriteURL(c.config.URL, db, rp, c.config.Consistency)
if err != nil {
return err
}
- reader := influx.NewReader(metrics, c.config.Serializer)
- req, err := c.makeWriteRequest(url, reader)
+ reader, err := c.requestBodyReader(metrics)
+ if err != nil {
+ return err
+ }
+ defer reader.Close()
+
+ req, err := c.makeWriteRequest(loc, reader)
if err != nil {
return err
}
resp, err := c.client.Do(req.WithContext(ctx))
if err != nil {
+ internal.OnClientError(c.client, err)
return err
}
defer resp.Body.Close()
@@ -323,7 +369,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr
// discarded for being older than the retention policy. Usually this not
// a cause for concern and we don't want to retry.
if strings.Contains(desc, errStringPointsBeyondRP) {
- log.Printf("W! [outputs.influxdb]: when writing to [%s]: received error %v",
+ c.log.Warnf("When writing to [%s]: received error %v",
c.URL(), desc)
return nil
}
@@ -332,7 +378,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr
// correctable at this point and so the point is dropped instead of
// retrying.
if strings.Contains(desc, errStringPartialWrite) {
- log.Printf("E! [outputs.influxdb]: when writing to [%s]: received error %v; discarding points",
+ c.log.Errorf("When writing to [%s]: received error %v; discarding points",
c.URL(), desc)
return nil
}
@@ -340,7 +386,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr
// This error indicates a bug in either Telegraf line protocol
// serialization, retries would not be successful.
if strings.Contains(desc, errStringUnableToParse) {
- log.Printf("E! [outputs.influxdb]: when writing to [%s]: received error %v; discarding points",
+ c.log.Errorf("When writing to [%s]: received error %v; discarding points",
c.URL(), desc)
return nil
}
@@ -375,12 +421,6 @@ func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) {
func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) {
var err error
- if c.config.ContentEncoding == "gzip" {
- body, err = internal.CompressWithGzip(body)
- if err != nil {
- return nil, err
- }
- }
req, err := http.NewRequest("POST", url, body)
if err != nil {
@@ -397,6 +437,23 @@ func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request
return req, nil
}
+// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is usefully to fast close the write
+// side of the connection in case of error
+func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser, error) {
+ reader := influx.NewReader(metrics, c.config.Serializer)
+
+ if c.config.ContentEncoding == "gzip" {
+ rc, err := internal.CompressWithGzip(reader)
+ if err != nil {
+ return nil, err
+ }
+
+ return rc, nil
+ }
+
+ return ioutil.NopCloser(reader), nil
+}
+
func (c *httpClient) addHeaders(req *http.Request) {
if c.config.Username != "" || c.config.Password != "" {
req.SetBasicAuth(c.config.Username, c.config.Password)
@@ -448,3 +505,7 @@ func makeQueryURL(loc *url.URL) (string, error) {
}
return u.String(), nil
}
+
+func (c *httpClient) Close() {
+ c.client.CloseIdleConnections()
+}
diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go
index 2b6b45eefa8d0..1d030d36cd583 100644
--- a/plugins/outputs/influxdb/http_test.go
+++ b/plugins/outputs/influxdb/http_test.go
@@ -21,6 +21,7 @@ import (
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/outputs/influxdb"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -247,8 +248,6 @@ func TestHTTP_Write(t *testing.T) {
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
require.NoError(t, err)
- internal.SetVersion("1.2.3")
-
tests := []struct {
name string
config influxdb.HTTPConfig
@@ -261,6 +260,7 @@ func TestHTTP_Write(t *testing.T) {
config: influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, r.FormValue("db"), "telegraf")
@@ -277,6 +277,7 @@ func TestHTTP_Write(t *testing.T) {
Database: "telegraf",
Username: "guy",
Password: "smiley",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
@@ -292,6 +293,7 @@ func TestHTTP_Write(t *testing.T) {
URL: u,
Database: "telegraf",
UserAgent: "telegraf",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, r.Header.Get("User-Agent"), "telegraf")
@@ -303,9 +305,10 @@ func TestHTTP_Write(t *testing.T) {
config: influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
- require.Equal(t, r.Header.Get("User-Agent"), "Telegraf/1.2.3")
+ require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent"))
w.WriteHeader(http.StatusNoContent)
},
},
@@ -313,6 +316,7 @@ func TestHTTP_Write(t *testing.T) {
name: "default database",
config: influxdb.HTTPConfig{
URL: u,
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, "telegraf", r.FormValue("db"))
@@ -327,6 +331,7 @@ func TestHTTP_Write(t *testing.T) {
"A": "B",
"C": "D",
},
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, r.Header.Get("A"), "B")
@@ -340,6 +345,7 @@ func TestHTTP_Write(t *testing.T) {
URL: u,
Database: "telegraf",
RetentionPolicy: "foo",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, "foo", r.FormValue("rp"))
@@ -352,6 +358,7 @@ func TestHTTP_Write(t *testing.T) {
URL: u,
Database: "telegraf",
Consistency: "all",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, "all", r.FormValue("consistency"))
@@ -363,6 +370,7 @@ func TestHTTP_Write(t *testing.T) {
config: influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
@@ -377,6 +385,7 @@ func TestHTTP_Write(t *testing.T) {
config: influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
@@ -391,6 +400,7 @@ func TestHTTP_Write(t *testing.T) {
config: influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
@@ -405,6 +415,7 @@ func TestHTTP_Write(t *testing.T) {
config: influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadGateway)
@@ -422,6 +433,7 @@ func TestHTTP_Write(t *testing.T) {
config: influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
@@ -523,6 +535,7 @@ func TestHTTP_WritePathPrefix(t *testing.T) {
config := influxdb.HTTPConfig{
URL: u,
Database: "telegraf",
+ Log: testutil.Logger{},
}
client, err := influxdb.NewHTTPClient(config)
@@ -577,6 +590,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) {
URL: u,
Database: "telegraf",
ContentEncoding: "gzip",
+ Log: testutil.Logger{},
}
client, err := influxdb.NewHTTPClient(config)
@@ -616,6 +630,7 @@ func TestHTTP_UnixSocket(t *testing.T) {
config: influxdb.HTTPConfig{
URL: &url.URL{Scheme: "unix", Path: sock},
Database: "xyzzy",
+ Log: testutil.Logger{},
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, `CREATE DATABASE "xyzzy"`, r.FormValue("q"))
@@ -658,3 +673,465 @@ func TestHTTP_UnixSocket(t *testing.T) {
})
}
}
+
+func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/write":
+ r.ParseForm()
+ require.Equal(t, r.Form["db"], []string{"foo"})
+
+ body, err := ioutil.ReadAll(r.Body)
+ require.NoError(t, err)
+ require.Contains(t, string(body), "cpu value=42")
+
+ w.WriteHeader(http.StatusNoContent)
+ return
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ }),
+ )
+ defer ts.Close()
+
+ addr := &url.URL{
+ Scheme: "http",
+ Host: ts.Listener.Addr().String(),
+ }
+
+ config := influxdb.HTTPConfig{
+ URL: addr,
+ Database: "telegraf",
+ DatabaseTag: "database",
+ ExcludeDatabaseTag: true,
+ Log: testutil.Logger{},
+ }
+
+ client, err := influxdb.NewHTTPClient(config)
+ require.NoError(t, err)
+
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "database": "foo",
+ },
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ ctx := context.Background()
+ err = client.Write(ctx, metrics)
+ require.NoError(t, err)
+ err = client.Write(ctx, metrics)
+ require.NoError(t, err)
+}
+
+func TestDBRPTags(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ config influxdb.HTTPConfig
+ metrics []telegraf.Metric
+ handlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request)
+ url string
+ }{
+ {
+ name: "defaults",
+ config: influxdb.HTTPConfig{
+ URL: u,
+ Database: "telegraf",
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "database": "foo",
+ },
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, r.FormValue("db"), "telegraf")
+ require.Equal(t, r.FormValue("rp"), "")
+ w.WriteHeader(http.StatusNoContent)
+ },
+ },
+ {
+ name: "static retention policy",
+ config: influxdb.HTTPConfig{
+ URL: u,
+ Database: "telegraf",
+ RetentionPolicy: "foo",
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, r.FormValue("db"), "telegraf")
+ require.Equal(t, r.FormValue("rp"), "foo")
+ w.WriteHeader(http.StatusNoContent)
+ },
+ },
+ {
+ name: "retention policy tag",
+ config: influxdb.HTTPConfig{
+ URL: u,
+ SkipDatabaseCreation: true,
+ Database: "telegraf",
+ RetentionPolicyTag: "rp",
+ Log: testutil.Logger{},
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "rp": "foo",
+ },
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, r.FormValue("db"), "telegraf")
+ require.Equal(t, r.FormValue("rp"), "foo")
+ body, err := ioutil.ReadAll(r.Body)
+ require.NoError(t, err)
+ require.Contains(t, string(body), "cpu,rp=foo value=42")
+ w.WriteHeader(http.StatusNoContent)
+ },
+ },
+ {
+ name: "retention policy tag fallback to static rp",
+ config: influxdb.HTTPConfig{
+ URL: u,
+ SkipDatabaseCreation: true,
+ Database: "telegraf",
+ RetentionPolicy: "foo",
+ RetentionPolicyTag: "rp",
+ Log: testutil.Logger{},
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, r.FormValue("db"), "telegraf")
+ require.Equal(t, r.FormValue("rp"), "foo")
+ w.WriteHeader(http.StatusNoContent)
+ },
+ },
+ {
+ name: "retention policy tag fallback to unset rp",
+ config: influxdb.HTTPConfig{
+ URL: u,
+ SkipDatabaseCreation: true,
+ Database: "telegraf",
+ RetentionPolicyTag: "rp",
+ Log: testutil.Logger{},
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, r.FormValue("db"), "telegraf")
+ require.Equal(t, r.FormValue("rp"), "")
+ w.WriteHeader(http.StatusNoContent)
+ },
+ },
+ {
+ name: "exclude retention policy tag",
+ config: influxdb.HTTPConfig{
+ URL: u,
+ SkipDatabaseCreation: true,
+ Database: "telegraf",
+ RetentionPolicyTag: "rp",
+ ExcludeRetentionPolicyTag: true,
+ Log: testutil.Logger{},
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "rp": "foo",
+ },
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, r.FormValue("db"), "telegraf")
+ require.Equal(t, r.FormValue("rp"), "foo")
+ body, err := ioutil.ReadAll(r.Body)
+ require.NoError(t, err)
+ require.Contains(t, string(body), "cpu value=42")
+ w.WriteHeader(http.StatusNoContent)
+ },
+ },
+ {
+ name: "exclude database tag keeps retention policy tag",
+ config: influxdb.HTTPConfig{
+ URL: u,
+ SkipDatabaseCreation: true,
+ Database: "telegraf",
+ RetentionPolicyTag: "rp",
+ ExcludeDatabaseTag: true,
+ Log: testutil.Logger{},
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "rp": "foo",
+ },
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, r.FormValue("db"), "telegraf")
+ require.Equal(t, r.FormValue("rp"), "foo")
+ body, err := ioutil.ReadAll(r.Body)
+ require.NoError(t, err)
+ require.Contains(t, string(body), "cpu,rp=foo value=42")
+ w.WriteHeader(http.StatusNoContent)
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/write":
+ tt.handlerFunc(t, w, r)
+ return
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ })
+
+ client, err := influxdb.NewHTTPClient(tt.config)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+ err = client.Write(ctx, tt.metrics)
+ require.NoError(t, err)
+ })
+ }
+}
+
+type MockHandlerChain struct {
+ handlers []http.HandlerFunc
+}
+
+func (h *MockHandlerChain) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if len(h.handlers) == 0 {
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ next, rest := h.handlers[0], h.handlers[1:]
+ h.handlers = rest
+ next(w, r)
+}
+
+func (h *MockHandlerChain) Done() bool {
+ return len(h.handlers) == 0
+}
+
+func TestDBRPTagsCreateDatabaseNotCalledOnRetryAfterForbidden(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ handlers := &MockHandlerChain{
+ handlers: []http.HandlerFunc{
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/query":
+ if r.FormValue("q") != `CREATE DATABASE "telegraf"` {
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ w.WriteHeader(http.StatusForbidden)
+ w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`))
+ default:
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ },
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/write":
+ w.WriteHeader(http.StatusNoContent)
+ default:
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ },
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/write":
+ w.WriteHeader(http.StatusNoContent)
+ default:
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ },
+ },
+ }
+ ts.Config.Handler = handlers
+
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ output := influxdb.InfluxDB{
+ URL: u.String(),
+ Database: "telegraf",
+ DatabaseTag: "database",
+ Log: testutil.Logger{},
+ CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) {
+ return influxdb.NewHTTPClient(*config)
+ },
+ }
+ err = output.Connect()
+ require.NoError(t, err)
+ err = output.Write(metrics)
+ require.NoError(t, err)
+ err = output.Write(metrics)
+ require.NoError(t, err)
+
+ require.True(t, handlers.Done(), "all handlers not called")
+}
+
+func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ handlers := &MockHandlerChain{
+ handlers: []http.HandlerFunc{
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/query":
+ if r.FormValue("q") != `CREATE DATABASE "telegraf"` {
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ w.WriteHeader(http.StatusForbidden)
+ w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`))
+ default:
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ },
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/write":
+ w.WriteHeader(http.StatusNotFound)
+ w.Write([]byte(`{"error": "database not found: \"telegraf\""}`))
+ default:
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ },
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/query":
+ if r.FormValue("q") != `CREATE DATABASE "telegraf"` {
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ w.WriteHeader(http.StatusForbidden)
+ default:
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ },
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/write":
+ w.WriteHeader(http.StatusNoContent)
+ default:
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ },
+ },
+ }
+ ts.Config.Handler = handlers
+
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ output := influxdb.InfluxDB{
+ URL: u.String(),
+ Database: "telegraf",
+ DatabaseTag: "database",
+ Log: testutil.Logger{},
+ CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) {
+ return influxdb.NewHTTPClient(*config)
+ },
+ }
+
+ err = output.Connect()
+ require.NoError(t, err)
+ err = output.Write(metrics)
+ require.Error(t, err)
+ err = output.Write(metrics)
+ require.NoError(t, err)
+
+ require.True(t, handlers.Done(), "all handlers not called")
+}
diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go
index 3b3e8020607ab..68e8c93ac4aa5 100644
--- a/plugins/outputs/influxdb/influxdb.go
+++ b/plugins/outputs/influxdb/influxdb.go
@@ -4,14 +4,13 @@ import (
"context"
"errors"
"fmt"
- "log"
"math/rand"
"net/url"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers/influx"
)
@@ -27,26 +26,30 @@ type Client interface {
CreateDatabase(ctx context.Context, database string) error
Database() string
URL() string
+ Close()
}
// InfluxDB struct is the primary data structure for the plugin
type InfluxDB struct {
- URL string // url deprecated in 0.1.9; use urls
- URLs []string `toml:"urls"`
- Username string
- Password string
- Database string
- DatabaseTag string `toml:"database_tag"`
- UserAgent string
- RetentionPolicy string
- WriteConsistency string
- Timeout internal.Duration
- UDPPayload internal.Size `toml:"udp_payload"`
- HTTPProxy string `toml:"http_proxy"`
- HTTPHeaders map[string]string `toml:"http_headers"`
- ContentEncoding string `toml:"content_encoding"`
- SkipDatabaseCreation bool `toml:"skip_database_creation"`
- InfluxUintSupport bool `toml:"influx_uint_support"`
+ URL string // url deprecated in 0.1.9; use urls
+ URLs []string `toml:"urls"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ Database string `toml:"database"`
+ DatabaseTag string `toml:"database_tag"`
+ ExcludeDatabaseTag bool `toml:"exclude_database_tag"`
+ RetentionPolicy string `toml:"retention_policy"`
+ RetentionPolicyTag string `toml:"retention_policy_tag"`
+ ExcludeRetentionPolicyTag bool `toml:"exclude_retention_policy_tag"`
+ UserAgent string `toml:"user_agent"`
+ WriteConsistency string `toml:"write_consistency"`
+ Timeout internal.Duration `toml:"timeout"`
+ UDPPayload internal.Size `toml:"udp_payload"`
+ HTTPProxy string `toml:"http_proxy"`
+ HTTPHeaders map[string]string `toml:"http_headers"`
+ ContentEncoding string `toml:"content_encoding"`
+ SkipDatabaseCreation bool `toml:"skip_database_creation"`
+ InfluxUintSupport bool `toml:"influx_uint_support"`
tls.ClientConfig
Precision string // precision deprecated in 1.0; value is ignored
@@ -56,7 +59,7 @@ type InfluxDB struct {
CreateHTTPClientF func(config *HTTPConfig) (Client, error)
CreateUDPClientF func(config *UDPConfig) (Client, error)
- serializer *influx.Serializer
+ Log telegraf.Logger
}
var sampleConfig = `
@@ -76,6 +79,9 @@ var sampleConfig = `
## tag is not set the 'database' option is used as the default.
# database_tag = ""
+ ## If true, the 'database_tag' will not be included in the written metric.
+ # exclude_database_tag = false
+
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
@@ -85,6 +91,13 @@ var sampleConfig = `
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
+ ## The value of this tag will be used to determine the retention policy. If this
+ ## tag is not set the 'retention_policy' option is used as the default.
+ # retention_policy_tag = ""
+
+ ## If true, the 'retention_policy_tag' will not be included in the written metric.
+ # exclude_retention_policy_tag = false
+
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
@@ -140,11 +153,6 @@ func (i *InfluxDB) Connect() error {
urls = append(urls, defaultURL)
}
- i.serializer = influx.NewSerializer()
- if i.InfluxUintSupport {
- i.serializer.SetFieldTypeSupport(influx.UintSupport)
- }
-
for _, u := range urls {
parts, err := url.Parse(u)
if err != nil {
@@ -183,6 +191,9 @@ func (i *InfluxDB) Connect() error {
}
func (i *InfluxDB) Close() error {
+ for _, client := range i.clients {
+ client.Close()
+ }
return nil
}
@@ -213,13 +224,13 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
if !i.SkipDatabaseCreation {
err := client.CreateDatabase(ctx, apiError.Database)
if err != nil {
- log.Printf("E! [outputs.influxdb] when writing to [%s]: database %q not found and failed to recreate",
+ i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate",
client.URL(), apiError.Database)
}
}
}
- log.Printf("E! [outputs.influxdb] when writing to [%s]: %v", client.URL(), err)
+ i.Log.Errorf("When writing to [%s]: %v", client.URL(), err)
}
return errors.New("could not write any address")
@@ -229,7 +240,8 @@ func (i *InfluxDB) udpClient(url *url.URL) (Client, error) {
config := &UDPConfig{
URL: url,
MaxPayloadSize: int(i.UDPPayload.Size),
- Serializer: i.serializer,
+ Serializer: i.newSerializer(),
+ Log: i.Log,
}
c, err := i.CreateUDPClientF(config)
@@ -247,21 +259,25 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL)
}
config := &HTTPConfig{
- URL: url,
- Timeout: i.Timeout.Duration,
- TLSConfig: tlsConfig,
- UserAgent: i.UserAgent,
- Username: i.Username,
- Password: i.Password,
- Proxy: proxy,
- ContentEncoding: i.ContentEncoding,
- Headers: i.HTTPHeaders,
- Database: i.Database,
- DatabaseTag: i.DatabaseTag,
- SkipDatabaseCreation: i.SkipDatabaseCreation,
- RetentionPolicy: i.RetentionPolicy,
- Consistency: i.WriteConsistency,
- Serializer: i.serializer,
+ URL: url,
+ Timeout: i.Timeout.Duration,
+ TLSConfig: tlsConfig,
+ UserAgent: i.UserAgent,
+ Username: i.Username,
+ Password: i.Password,
+ Proxy: proxy,
+ ContentEncoding: i.ContentEncoding,
+ Headers: i.HTTPHeaders,
+ Database: i.Database,
+ DatabaseTag: i.DatabaseTag,
+ ExcludeDatabaseTag: i.ExcludeDatabaseTag,
+ SkipDatabaseCreation: i.SkipDatabaseCreation,
+ RetentionPolicy: i.RetentionPolicy,
+ RetentionPolicyTag: i.RetentionPolicyTag,
+ ExcludeRetentionPolicyTag: i.ExcludeRetentionPolicyTag,
+ Consistency: i.WriteConsistency,
+ Serializer: i.newSerializer(),
+ Log: i.Log,
}
c, err := i.CreateHTTPClientF(config)
@@ -272,14 +288,23 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL)
if !i.SkipDatabaseCreation {
err = c.CreateDatabase(ctx, c.Database())
if err != nil {
- log.Printf("W! [outputs.influxdb] when writing to [%s]: database %q creation failed: %v",
- c.URL(), i.Database, err)
+ i.Log.Warnf("When writing to [%s]: database %q creation failed: %v",
+ c.URL(), c.Database(), err)
}
}
return c, nil
}
+func (i *InfluxDB) newSerializer() *influx.Serializer {
+ serializer := influx.NewSerializer()
+ if i.InfluxUintSupport {
+ serializer.SetFieldTypeSupport(influx.UintSupport)
+ }
+
+ return serializer
+}
+
func init() {
outputs.Add("influxdb", func() telegraf.Output {
return &InfluxDB{
diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go
index 2f47d8134fcb7..476211069af7b 100644
--- a/plugins/outputs/influxdb/influxdb_test.go
+++ b/plugins/outputs/influxdb/influxdb_test.go
@@ -8,9 +8,10 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs/influxdb"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -19,6 +20,9 @@ type MockClient struct {
WriteF func(context.Context, []telegraf.Metric) error
CreateDatabaseF func(ctx context.Context, database string) error
DatabaseF func() string
+ CloseF func()
+
+ log telegraf.Logger
}
func (c *MockClient) URL() string {
@@ -37,6 +41,14 @@ func (c *MockClient) Database() string {
return c.DatabaseF()
}
+func (c *MockClient) Close() {
+ c.CloseF()
+}
+
+func (c *MockClient) SetLogger(log telegraf.Logger) {
+ c.log = log
+}
+
func TestDeprecatedURLSupport(t *testing.T) {
var actual *influxdb.UDPConfig
output := influxdb.InfluxDB{
@@ -47,6 +59,9 @@ func TestDeprecatedURLSupport(t *testing.T) {
return &MockClient{}, nil
},
}
+
+ output.Log = testutil.Logger{}
+
err := output.Connect()
require.NoError(t, err)
require.Equal(t, "udp://localhost:8089", actual.URL.String())
@@ -67,6 +82,9 @@ func TestDefaultURL(t *testing.T) {
}, nil
},
}
+
+ output.Log = testutil.Logger{}
+
err := output.Connect()
require.NoError(t, err)
require.Equal(t, "http://localhost:8086", actual.URL.String())
@@ -84,6 +102,8 @@ func TestConnectUDPConfig(t *testing.T) {
return &MockClient{}, nil
},
}
+ output.Log = testutil.Logger{}
+
err := output.Connect()
require.NoError(t, err)
@@ -125,6 +145,9 @@ func TestConnectHTTPConfig(t *testing.T) {
}, nil
},
}
+
+ output.Log = testutil.Logger{}
+
err := output.Connect()
require.NoError(t, err)
@@ -148,7 +171,6 @@ func TestConnectHTTPConfig(t *testing.T) {
func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) {
output := influxdb.InfluxDB{
URLs: []string{"http://localhost:8086"},
-
CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) {
return &MockClient{
DatabaseF: func() string {
@@ -168,12 +190,13 @@ func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) {
},
URLF: func() string {
return "http://localhost:8086"
-
},
}, nil
},
}
+ output.Log = testutil.Logger{}
+
err := output.Connect()
require.NoError(t, err)
diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go
index 31c854defd51f..0add3c6c39de6 100644
--- a/plugins/outputs/influxdb/udp.go
+++ b/plugins/outputs/influxdb/udp.go
@@ -5,7 +5,6 @@ import (
"bytes"
"context"
"fmt"
- "log"
"net"
"net/url"
@@ -32,6 +31,7 @@ type UDPConfig struct {
URL *url.URL
Serializer *influx.Serializer
Dialer Dialer
+ Log telegraf.Logger
}
func NewUDPClient(config UDPConfig) (*udpClient, error) {
@@ -60,6 +60,7 @@ func NewUDPClient(config UDPConfig) (*udpClient, error) {
url: config.URL,
serializer: serializer,
dialer: dialer,
+ log: config.Log,
}
return client, nil
}
@@ -69,6 +70,7 @@ type udpClient struct {
dialer Dialer
serializer *influx.Serializer
url *url.URL
+ log telegraf.Logger
}
func (c *udpClient) URL() string {
@@ -93,7 +95,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error
if err != nil {
// Since we are serializing multiple metrics, don't fail the
// entire batch just because of one unserializable metric.
- log.Printf("E! [outputs.influxdb] when writing to [%s] could not serialize metric: %v",
+ c.log.Errorf("When writing to [%s] could not serialize metric: %v",
c.URL(), err)
continue
}
@@ -136,3 +138,6 @@ func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
}
return 0, nil, nil
}
+
+func (c *udpClient) Close() {
+}
diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go
index 136ebb787f61b..2e60c586c7a03 100644
--- a/plugins/outputs/influxdb/udp_test.go
+++ b/plugins/outputs/influxdb/udp_test.go
@@ -14,6 +14,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/outputs/influxdb"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -182,6 +183,7 @@ func TestUDP_ErrorLogging(t *testing.T) {
return conn, nil
},
},
+ Log: testutil.Logger{},
},
metrics: []telegraf.Metric{getMetric()},
logContains: `could not serialize metric: "cpu": need more space`,
@@ -196,6 +198,7 @@ func TestUDP_ErrorLogging(t *testing.T) {
return conn, nil
},
},
+ Log: testutil.Logger{},
},
metrics: []telegraf.Metric{
func() telegraf.Metric {
diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md
index 830e70b41e10f..1605bda0c725a 100644
--- a/plugins/outputs/influxdb_v2/README.md
+++ b/plugins/outputs/influxdb_v2/README.md
@@ -11,6 +11,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
+ ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
urls = ["http://127.0.0.1:9999"]
## Token for authentication.
@@ -26,6 +27,9 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service.
## tag is not set the 'bucket' option is used as the default.
# bucket_tag = ""
+ ## If true, the bucket tag will not be added to the metric.
+ # exclude_bucket_tag = false
+
## Timeout for HTTP messages.
# timeout = "5s"
@@ -54,4 +58,9 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service.
# insecure_skip_verify = false
```
+### Metrics
+
+Reference the [influx serializer][] for details about metric production.
+
[InfluxDB v2.x]: https://github.com/influxdata/influxdb
+[influx serializer]: /plugins/serializers/influx/README.md#Metrics
diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go
index cdc40c1486c67..2a32c5f4c60ea 100644
--- a/plugins/outputs/influxdb_v2/http.go
+++ b/plugins/outputs/influxdb_v2/http.go
@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
+ "io/ioutil"
"log"
"net"
"net/http"
@@ -40,28 +41,30 @@ const (
)
type HTTPConfig struct {
- URL *url.URL
- Token string
- Organization string
- Bucket string
- BucketTag string
- Timeout time.Duration
- Headers map[string]string
- Proxy *url.URL
- UserAgent string
- ContentEncoding string
- TLSConfig *tls.Config
+ URL *url.URL
+ Token string
+ Organization string
+ Bucket string
+ BucketTag string
+ ExcludeBucketTag bool
+ Timeout time.Duration
+ Headers map[string]string
+ Proxy *url.URL
+ UserAgent string
+ ContentEncoding string
+ TLSConfig *tls.Config
Serializer *influx.Serializer
}
type httpClient struct {
- ContentEncoding string
- Timeout time.Duration
- Headers map[string]string
- Organization string
- Bucket string
- BucketTag string
+ ContentEncoding string
+ Timeout time.Duration
+ Headers map[string]string
+ Organization string
+ Bucket string
+ BucketTag string
+ ExcludeBucketTag bool
client *http.Client
serializer *influx.Serializer
@@ -81,7 +84,7 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) {
userAgent := config.UserAgent
if userAgent == "" {
- userAgent = "Telegraf/" + internal.Version()
+ userAgent = internal.ProductToken()
}
var headers = make(map[string]string, len(config.Headers)+2)
@@ -130,13 +133,14 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) {
Timeout: timeout,
Transport: transport,
},
- url: config.URL,
- ContentEncoding: config.ContentEncoding,
- Timeout: timeout,
- Headers: headers,
- Organization: config.Organization,
- Bucket: config.Bucket,
- BucketTag: config.BucketTag,
+ url: config.URL,
+ ContentEncoding: config.ContentEncoding,
+ Timeout: timeout,
+ Headers: headers,
+ Organization: config.Organization,
+ Bucket: config.Bucket,
+ BucketTag: config.BucketTag,
+ ExcludeBucketTag: config.ExcludeBucketTag,
}
return client, nil
}
@@ -185,6 +189,13 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error
batches[bucket] = make([]telegraf.Metric, 0)
}
+ if c.ExcludeBucketTag {
+ // Avoid modifying the metric in case we need to retry the request.
+ metric = metric.Copy()
+ metric.Accept()
+ metric.RemoveTag(c.BucketTag)
+ }
+
batches[bucket] = append(batches[bucket], metric)
}
@@ -199,19 +210,25 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error
}
func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error {
- url, err := makeWriteURL(*c.url, c.Organization, bucket)
+ loc, err := makeWriteURL(*c.url, c.Organization, bucket)
if err != nil {
return err
}
- reader := influx.NewReader(metrics, c.serializer)
- req, err := c.makeWriteRequest(url, reader)
+ reader, err := c.requestBodyReader(metrics)
+ if err != nil {
+ return err
+ }
+ defer reader.Close()
+
+ req, err := c.makeWriteRequest(loc, reader)
if err != nil {
return err
}
resp, err := c.client.Do(req.WithContext(ctx))
if err != nil {
+ internal.OnClientError(c.client, err)
return err
}
defer resp.Body.Close()
@@ -228,21 +245,33 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te
}
switch resp.StatusCode {
- case http.StatusBadRequest, http.StatusUnauthorized,
- http.StatusForbidden, http.StatusRequestEntityTooLarge:
+ case http.StatusBadRequest, http.StatusRequestEntityTooLarge:
log.Printf("E! [outputs.influxdb_v2] Failed to write metric: %s\n", desc)
return nil
- case http.StatusTooManyRequests, http.StatusServiceUnavailable:
+ case http.StatusUnauthorized, http.StatusForbidden:
+ return fmt.Errorf("failed to write metric: %s", desc)
+ case http.StatusTooManyRequests:
retryAfter := resp.Header.Get("Retry-After")
retry, err := strconv.Atoi(retryAfter)
if err != nil {
- retry = 0
+ return errors.New("rate limit exceeded")
}
if retry > defaultMaxWait {
retry = defaultMaxWait
}
c.retryTime = time.Now().Add(time.Duration(retry) * time.Second)
- return fmt.Errorf("Waiting %ds for server before sending metric again", retry)
+ return fmt.Errorf("waiting %ds for server before sending metric again", retry)
+ case http.StatusServiceUnavailable:
+ retryAfter := resp.Header.Get("Retry-After")
+ retry, err := strconv.Atoi(retryAfter)
+ if err != nil {
+ return errors.New("server responded: service unavailable")
+ }
+ if retry > defaultMaxWait {
+ retry = defaultMaxWait
+ }
+ c.retryTime = time.Now().Add(time.Duration(retry) * time.Second)
+ return fmt.Errorf("waiting %ds for server before sending metric again", retry)
}
// This is only until platform spec is fully implemented. As of the
@@ -260,12 +289,6 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te
func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) {
var err error
- if c.ContentEncoding == "gzip" {
- body, err = internal.CompressWithGzip(body)
- if err != nil {
- return nil, err
- }
- }
req, err := http.NewRequest("POST", url, body)
if err != nil {
@@ -282,6 +305,23 @@ func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request
return req, nil
}
+// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is usefully to fast close the write
+// side of the connection in case of error
+func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser, error) {
+ reader := influx.NewReader(metrics, c.serializer)
+
+ if c.ContentEncoding == "gzip" {
+ rc, err := internal.CompressWithGzip(reader)
+ if err != nil {
+ return nil, err
+ }
+
+ return rc, nil
+ }
+
+ return ioutil.NopCloser(reader), nil
+}
+
func (c *httpClient) addHeaders(req *http.Request) {
for header, value := range c.Headers {
req.Header.Set(header, value)
@@ -306,3 +346,7 @@ func makeWriteURL(loc url.URL, org, bucket string) (string, error) {
loc.RawQuery = params.Encode()
return loc.String(), nil
}
+
+func (c *httpClient) Close() {
+ c.client.CloseIdleConnections()
+}
diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go
index 33ff9e24b90e3..23c3ff05e17b6 100644
--- a/plugins/outputs/influxdb_v2/http_test.go
+++ b/plugins/outputs/influxdb_v2/http_test.go
@@ -1,10 +1,17 @@
package influxdb_v2_test
import (
+ "context"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
"net/url"
"testing"
+ "time"
+ "github.com/influxdata/telegraf"
influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -47,3 +54,60 @@ func TestNewHTTPClient(t *testing.T) {
}
}
}
+
+func TestWriteBucketTagWorksOnRetry(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/api/v2/write":
+ r.ParseForm()
+ require.Equal(t, r.Form["bucket"], []string{"foo"})
+
+ body, err := ioutil.ReadAll(r.Body)
+ require.NoError(t, err)
+ require.Contains(t, string(body), "cpu value=42")
+
+ w.WriteHeader(http.StatusNoContent)
+ return
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ }),
+ )
+ defer ts.Close()
+
+ addr := &url.URL{
+ Scheme: "http",
+ Host: ts.Listener.Addr().String(),
+ }
+
+ config := &influxdb.HTTPConfig{
+ URL: addr,
+ Bucket: "telegraf",
+ BucketTag: "bucket",
+ ExcludeBucketTag: true,
+ }
+
+ client, err := influxdb.NewHTTPClient(config)
+ require.NoError(t, err)
+
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "bucket": "foo",
+ },
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ ctx := context.Background()
+ err = client.Write(ctx, metrics)
+ require.NoError(t, err)
+ err = client.Write(ctx, metrics)
+ require.NoError(t, err)
+}
diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go
index dca02b0cb33d9..ccafbc4bd685a 100644
--- a/plugins/outputs/influxdb_v2/influxdb.go
+++ b/plugins/outputs/influxdb_v2/influxdb.go
@@ -11,7 +11,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers/influx"
)
@@ -27,6 +27,7 @@ var sampleConfig = `
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
+ ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
urls = ["http://127.0.0.1:9999"]
## Token for authentication.
@@ -42,6 +43,9 @@ var sampleConfig = `
## tag is not set the 'bucket' option is used as the default.
# bucket_tag = ""
+ ## If true, the bucket tag will not be added to the metric.
+ # exclude_bucket_tag = false
+
## Timeout for HTTP messages.
# timeout = "5s"
@@ -74,24 +78,25 @@ type Client interface {
Write(context.Context, []telegraf.Metric) error
URL() string // for logging
+ Close()
}
type InfluxDB struct {
- URLs []string `toml:"urls"`
- Token string `toml:"token"`
- Organization string `toml:"organization"`
- Bucket string `toml:"bucket"`
- BucketTag string `toml:"bucket_tag"`
- Timeout internal.Duration `toml:"timeout"`
- HTTPHeaders map[string]string `toml:"http_headers"`
- HTTPProxy string `toml:"http_proxy"`
- UserAgent string `toml:"user_agent"`
- ContentEncoding string `toml:"content_encoding"`
- UintSupport bool `toml:"influx_uint_support"`
+ URLs []string `toml:"urls"`
+ Token string `toml:"token"`
+ Organization string `toml:"organization"`
+ Bucket string `toml:"bucket"`
+ BucketTag string `toml:"bucket_tag"`
+ ExcludeBucketTag bool `toml:"exclude_bucket_tag"`
+ Timeout internal.Duration `toml:"timeout"`
+ HTTPHeaders map[string]string `toml:"http_headers"`
+ HTTPProxy string `toml:"http_proxy"`
+ UserAgent string `toml:"user_agent"`
+ ContentEncoding string `toml:"content_encoding"`
+ UintSupport bool `toml:"influx_uint_support"`
tls.ClientConfig
- clients []Client
- serializer *influx.Serializer
+ clients []Client
}
func (i *InfluxDB) Connect() error {
@@ -101,11 +106,6 @@ func (i *InfluxDB) Connect() error {
i.URLs = append(i.URLs, defaultURL)
}
- i.serializer = influx.NewSerializer()
- if i.UintSupport {
- i.serializer.SetFieldTypeSupport(influx.UintSupport)
- }
-
for _, u := range i.URLs {
parts, err := url.Parse(u)
if err != nil {
@@ -137,6 +137,9 @@ func (i *InfluxDB) Connect() error {
}
func (i *InfluxDB) Close() error {
+ for _, client := range i.clients {
+ client.Close()
+ }
return nil
}
@@ -165,7 +168,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
log.Printf("E! [outputs.influxdb_v2] when writing to [%s]: %v", client.URL(), err)
}
- return errors.New("could not write any address")
+ return err
}
func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) {
@@ -175,18 +178,19 @@ func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.U
}
config := &HTTPConfig{
- URL: url,
- Token: i.Token,
- Organization: i.Organization,
- Bucket: i.Bucket,
- BucketTag: i.BucketTag,
- Timeout: i.Timeout.Duration,
- Headers: i.HTTPHeaders,
- Proxy: proxy,
- UserAgent: i.UserAgent,
- ContentEncoding: i.ContentEncoding,
- TLSConfig: tlsConfig,
- Serializer: i.serializer,
+ URL: url,
+ Token: i.Token,
+ Organization: i.Organization,
+ Bucket: i.Bucket,
+ BucketTag: i.BucketTag,
+ ExcludeBucketTag: i.ExcludeBucketTag,
+ Timeout: i.Timeout.Duration,
+ Headers: i.HTTPHeaders,
+ Proxy: proxy,
+ UserAgent: i.UserAgent,
+ ContentEncoding: i.ContentEncoding,
+ TLSConfig: tlsConfig,
+ Serializer: i.newSerializer(),
}
c, err := NewHTTPClient(config)
@@ -197,6 +201,15 @@ func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.U
return c, nil
}
+func (i *InfluxDB) newSerializer() *influx.Serializer {
+ serializer := influx.NewSerializer()
+ if i.UintSupport {
+ serializer.SetFieldTypeSupport(influx.UintSupport)
+ }
+
+ return serializer
+}
+
func init() {
outputs.Add("influxdb_v2", func() telegraf.Output {
return &InfluxDB{
diff --git a/plugins/outputs/influxdb_v2/influxdb_test.go b/plugins/outputs/influxdb_v2/influxdb_test.go
index 3702b4309d774..25af4fe6a4c6e 100644
--- a/plugins/outputs/influxdb_v2/influxdb_test.go
+++ b/plugins/outputs/influxdb_v2/influxdb_test.go
@@ -3,7 +3,7 @@ package influxdb_v2_test
import (
"testing"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2"
"github.com/stretchr/testify/require"
diff --git a/plugins/outputs/instrumental/README.md b/plugins/outputs/instrumental/README.md
index 128599ee82197..f8b48fd1ea124 100644
--- a/plugins/outputs/instrumental/README.md
+++ b/plugins/outputs/instrumental/README.md
@@ -20,6 +20,6 @@ by whitespace. The `increment` type is only used if the metric comes in as a cou
template = "host.tags.measurement.field"
## Timeout in seconds to connect
timeout = "2s"
- ## Debug true - Print communcation to Instrumental
+ ## Debug true - Print communication to Instrumental
debug = false
```
diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go
index 117c9d4348c3e..e5decbf7f065f 100644
--- a/plugins/outputs/instrumental/instrumental.go
+++ b/plugins/outputs/instrumental/instrumental.go
@@ -27,6 +27,7 @@ type Instrumental struct {
Prefix string
DataFormat string
Template string
+ Templates []string
Timeout internal.Duration
Debug bool
@@ -50,7 +51,7 @@ var sampleConfig = `
template = "host.tags.measurement.field"
## Timeout in seconds to connect
timeout = "2s"
- ## Display Communcation to Instrumental
+ ## Display Communication to Instrumental
debug = false
`
@@ -85,7 +86,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
}
}
- s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false)
+ s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, ".", i.Templates)
if err != nil {
return err
}
@@ -110,7 +111,8 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
buf, err := s.Serialize(m)
if err != nil {
- log.Printf("E! Error serializing a metric to Instrumental: %s", err)
+ log.Printf("D! [outputs.instrumental] Could not serialize metric: %v", err)
+ continue
}
switch metricType {
diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md
index 25b173a0260f7..d1cc9f0cbb18b 100644
--- a/plugins/outputs/kafka/README.md
+++ b/plugins/outputs/kafka/README.md
@@ -10,6 +10,13 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
## Kafka topic for producer messages
topic = "telegraf"
+ ## The value of this tag will be used as the topic. If not set the 'topic'
+ ## option is used.
+ # topic_tag = ""
+
+ ## If true, the 'topic_tag' will be removed from to the metric.
+ # exclude_topic_tag = false
+
## Optional Client id
# client_id = "Telegraf"
@@ -46,13 +53,21 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
# keys = ["foo", "bar"]
# separator = "_"
- ## Telegraf tag to use as a routing key
- ## ie, if this tag exists, its value will be used as the routing key
+ ## The routing tag specifies a tagkey on the metric whose value is used as
+ ## the message key. The message key is used to determine which partition to
+ ## send the message to. This tag is prefered over the routing_key option.
routing_tag = "host"
- ## Static routing key. Used when no routing_tag is set or as a fallback
- ## when the tag specified in routing tag is not found. If set to "random",
- ## a random value will be generated for each message.
+ ## The routing key is set as the message key and used to determine which
+ ## partition to send the message to. This value is only used when no
+ ## routing_tag is set or as a fallback when the tag specified in routing tag
+ ## is not found.
+ ##
+ ## If set to "random", a random value will be generated for each message.
+ ##
+ ## When unset, no message key is added and each message is routed to a random
+ ## partition.
+ ##
## ex: routing_key = "random"
## routing_key = "telegraf"
# routing_key = ""
@@ -96,6 +111,9 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
# sasl_username = "kafka"
# sasl_password = "secret"
+ ## SASL protocol version. When connecting to Azure EventHub set to 0.
+ # sasl_version = 1
+
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go
index f2951e6d5eab8..d7071f257babc 100644
--- a/plugins/outputs/kafka/kafka.go
+++ b/plugins/outputs/kafka/kafka.go
@@ -5,14 +5,15 @@ import (
"fmt"
"log"
"strings"
+ "time"
+ "github.com/Shopify/sarama"
+ "github.com/gofrs/uuid"
"github.com/influxdata/telegraf"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/kafka"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
- uuid "github.com/satori/go.uuid"
-
- "github.com/Shopify/sarama"
)
var ValidTopicSuffixMethods = []string{
@@ -21,18 +22,22 @@ var ValidTopicSuffixMethods = []string{
"tags",
}
+var zeroTime = time.Unix(0, 0)
+
type (
Kafka struct {
- Brokers []string
- Topic string
+ Brokers []string `toml:"brokers"`
+ Topic string `toml:"topic"`
+ TopicTag string `toml:"topic_tag"`
+ ExcludeTopicTag bool `toml:"exclude_topic_tag"`
ClientID string `toml:"client_id"`
TopicSuffix TopicSuffix `toml:"topic_suffix"`
RoutingTag string `toml:"routing_tag"`
RoutingKey string `toml:"routing_key"`
- CompressionCodec int
- RequiredAcks int
- MaxRetry int
- MaxMessageBytes int `toml:"max_message_bytes"`
+ CompressionCodec int `toml:"compression_codec"`
+ RequiredAcks int `toml:"required_acks"`
+ MaxRetry int `toml:"max_retry"`
+ MaxMessageBytes int `toml:"max_message_bytes"`
Version string `toml:"version"`
@@ -44,15 +49,19 @@ type (
// TLS certificate authority
CA string
+ EnableTLS *bool `toml:"enable_tls"`
tlsint.ClientConfig
- // SASL Username
SASLUsername string `toml:"sasl_username"`
- // SASL Password
SASLPassword string `toml:"sasl_password"`
+ SASLVersion *int `toml:"sasl_version"`
+
+ Log telegraf.Logger `toml:"-"`
tlsConfig tls.Config
- producer sarama.SyncProducer
+
+ producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error)
+ producer sarama.SyncProducer
serializer serializers.Serializer
}
@@ -63,12 +72,39 @@ type (
}
)
+// DebugLogger logs messages from sarama at the debug level.
+type DebugLogger struct {
+}
+
+func (*DebugLogger) Print(v ...interface{}) {
+ args := make([]interface{}, 0, len(v)+1)
+ args = append(args, "D! [sarama] ")
+ log.Print(v...)
+}
+
+func (*DebugLogger) Printf(format string, v ...interface{}) {
+ log.Printf("D! [sarama] "+format, v...)
+}
+
+func (*DebugLogger) Println(v ...interface{}) {
+ args := make([]interface{}, 0, len(v)+1)
+ args = append(args, "D! [sarama] ")
+ log.Println(args...)
+}
+
var sampleConfig = `
## URLs of kafka brokers
brokers = ["localhost:9092"]
## Kafka topic for producer messages
topic = "telegraf"
+ ## The value of this tag will be used as the topic. If not set the 'topic'
+ ## option is used.
+ # topic_tag = ""
+
+ ## If true, the 'topic_tag' will be removed from to the metric.
+ # exclude_topic_tag = false
+
## Optional Client id
# client_id = "Telegraf"
@@ -105,13 +141,21 @@ var sampleConfig = `
# keys = ["foo", "bar"]
# separator = "_"
- ## Telegraf tag to use as a routing key
- ## ie, if this tag exists, its value will be used as the routing key
+ ## The routing tag specifies a tagkey on the metric whose value is used as
+ ## the message key. The message key is used to determine which partition to
+ ## send the message to. This tag is prefered over the routing_key option.
routing_tag = "host"
- ## Static routing key. Used when no routing_tag is set or as a fallback
- ## when the tag specified in routing tag is not found. If set to "random",
- ## a random value will be generated for each message.
+ ## The routing key is set as the message key and used to determine which
+ ## partition to send the message to. This value is only used when no
+ ## routing_tag is set or as a fallback when the tag specified in routing tag
+ ## is not found.
+ ##
+ ## If set to "random", a random value will be generated for each message.
+ ##
+ ## When unset, no message key is added and each message is routed to a random
+ ## partition.
+ ##
## ex: routing_key = "random"
## routing_key = "telegraf"
# routing_key = ""
@@ -149,6 +193,7 @@ var sampleConfig = `
# max_message_bytes = 1000000
## Optional TLS Config
+ # enable_tls = true
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
@@ -159,6 +204,9 @@ var sampleConfig = `
# sasl_username = "kafka"
# sasl_password = "secret"
+ ## SASL protocol version. When connecting to Azure EventHub set to 0.
+ # sasl_version = 1
+
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -175,14 +223,29 @@ func ValidateTopicSuffixMethod(method string) error {
return fmt.Errorf("Unknown topic suffix method provided: %s", method)
}
-func (k *Kafka) GetTopicName(metric telegraf.Metric) string {
+func (k *Kafka) GetTopicName(metric telegraf.Metric) (telegraf.Metric, string) {
+ topic := k.Topic
+ if k.TopicTag != "" {
+ if t, ok := metric.GetTag(k.TopicTag); ok {
+ topic = t
+
+ // If excluding the topic tag, a copy is required to avoid modifying
+ // the metric buffer.
+ if k.ExcludeTopicTag {
+ metric = metric.Copy()
+ metric.Accept()
+ metric.RemoveTag(k.TopicTag)
+ }
+ }
+ }
+
var topicName string
switch k.TopicSuffix.Method {
case "measurement":
- topicName = k.Topic + k.TopicSuffix.Separator + metric.Name()
+ topicName = topic + k.TopicSuffix.Separator + metric.Name()
case "tags":
var topicNameComponents []string
- topicNameComponents = append(topicNameComponents, k.Topic)
+ topicNameComponents = append(topicNameComponents, topic)
for _, tag := range k.TopicSuffix.Keys {
tagValue := metric.Tags()[tag]
if tagValue != "" {
@@ -191,9 +254,9 @@ func (k *Kafka) GetTopicName(metric telegraf.Metric) string {
}
topicName = strings.Join(topicNameComponents, k.TopicSuffix.Separator)
default:
- topicName = k.Topic
+ topicName = topic
}
- return topicName
+ return metric, topicName
}
func (k *Kafka) SetSerializer(serializer serializers.Serializer) {
@@ -237,6 +300,10 @@ func (k *Kafka) Connect() error {
k.TLSKey = k.Key
}
+ if k.EnableTLS != nil && *k.EnableTLS {
+ config.Net.TLS.Enable = true
+ }
+
tlsConfig, err := k.ClientConfig.TLSConfig()
if err != nil {
return err
@@ -244,16 +311,28 @@ func (k *Kafka) Connect() error {
if tlsConfig != nil {
config.Net.TLS.Config = tlsConfig
- config.Net.TLS.Enable = true
+
+ // To maintain backwards compatibility, if the enable_tls option is not
+ // set TLS is enabled if a non-default TLS config is used.
+ if k.EnableTLS == nil {
+ k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS")
+ config.Net.TLS.Enable = true
+ }
}
if k.SASLUsername != "" && k.SASLPassword != "" {
config.Net.SASL.User = k.SASLUsername
config.Net.SASL.Password = k.SASLPassword
config.Net.SASL.Enable = true
+
+ version, err := kafka.SASLVersion(config.Version, k.SASLVersion)
+ if err != nil {
+ return err
+ }
+ config.Net.SASL.Version = version
}
- producer, err := sarama.NewSyncProducer(k.Brokers, config)
+ producer, err := k.producerFunc(k.Brokers, config)
if err != nil {
return err
}
@@ -273,35 +352,51 @@ func (k *Kafka) Description() string {
return "Configuration for the Kafka server to send metrics to"
}
-func (k *Kafka) routingKey(metric telegraf.Metric) string {
+func (k *Kafka) routingKey(metric telegraf.Metric) (string, error) {
if k.RoutingTag != "" {
key, ok := metric.GetTag(k.RoutingTag)
if ok {
- return key
+ return key, nil
}
}
if k.RoutingKey == "random" {
- u := uuid.NewV4()
- return u.String()
+ u, err := uuid.NewV4()
+ if err != nil {
+ return "", err
+ }
+ return u.String(), nil
}
- return k.RoutingKey
+ return k.RoutingKey, nil
}
func (k *Kafka) Write(metrics []telegraf.Metric) error {
msgs := make([]*sarama.ProducerMessage, 0, len(metrics))
for _, metric := range metrics {
+ metric, topic := k.GetTopicName(metric)
+
buf, err := k.serializer.Serialize(metric)
if err != nil {
- return err
+ k.Log.Debugf("Could not serialize metric: %v", err)
+ continue
}
m := &sarama.ProducerMessage{
- Topic: k.GetTopicName(metric),
+ Topic: topic,
Value: sarama.ByteEncoder(buf),
}
- key := k.routingKey(metric)
+
+ // Negative timestamps are not allowed by the Kafka protocol.
+ if !metric.Time().Before(zeroTime) {
+ m.Timestamp = metric.Time()
+ }
+
+ key, err := k.routingKey(metric)
+ if err != nil {
+ return fmt.Errorf("could not generate routing key: %v", err)
+ }
+
if key != "" {
m.Key = sarama.StringEncoder(key)
}
@@ -314,7 +409,11 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error {
if errs, ok := err.(sarama.ProducerErrors); ok {
for _, prodErr := range errs {
if prodErr.Err == sarama.ErrMessageSizeTooLarge {
- log.Printf("E! Error writing to output [kafka]: Message too large, consider increasing `max_message_bytes`; dropping batch")
+ k.Log.Error("Message too large, consider increasing `max_message_bytes`; dropping batch")
+ return nil
+ }
+ if prodErr.Err == sarama.ErrInvalidTimestamp {
+ k.Log.Error("The timestamp of the message is out of acceptable range, consider increasing broker `message.timestamp.difference.max.ms`; dropping batch")
return nil
}
return prodErr
@@ -327,10 +426,12 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error {
}
func init() {
+ sarama.Logger = &DebugLogger{}
outputs.Add("kafka", func() telegraf.Output {
return &Kafka{
MaxRetry: 3,
RequiredAcks: -1,
+ producerFunc: sarama.NewSyncProducer,
}
})
}
diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go
index ba900e32c6eaa..070eea3f91d9c 100644
--- a/plugins/outputs/kafka/kafka_test.go
+++ b/plugins/outputs/kafka/kafka_test.go
@@ -4,6 +4,7 @@ import (
"testing"
"time"
+ "github.com/Shopify/sarama"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/serializers"
@@ -81,7 +82,7 @@ func TestTopicSuffixes(t *testing.T) {
TopicSuffix: topicSuffix,
}
- topic := k.GetTopicName(metric)
+ _, topic := k.GetTopicName(metric)
require.Equal(t, expectedTopic, topic)
}
}
@@ -150,8 +151,152 @@ func TestRoutingKey(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- key := tt.kafka.routingKey(tt.metric)
+ key, err := tt.kafka.routingKey(tt.metric)
+ require.NoError(t, err)
tt.check(t, key)
})
}
}
+
+type MockProducer struct {
+ sent []*sarama.ProducerMessage
+}
+
+func (p *MockProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
+ p.sent = append(p.sent, msg)
+ return 0, 0, nil
+}
+
+func (p *MockProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
+ p.sent = append(p.sent, msgs...)
+ return nil
+}
+
+func (p *MockProducer) Close() error {
+ return nil
+}
+
+func NewMockProducer(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) {
+ return &MockProducer{}, nil
+}
+
+func TestTopicTag(t *testing.T) {
+ tests := []struct {
+ name string
+ plugin *Kafka
+ input []telegraf.Metric
+ topic string
+ value string
+ }{
+ {
+ name: "static topic",
+ plugin: &Kafka{
+ Brokers: []string{"127.0.0.1"},
+ Topic: "telegraf",
+ producerFunc: NewMockProducer,
+ },
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ topic: "telegraf",
+ value: "cpu time_idle=42 0\n",
+ },
+ {
+ name: "topic tag overrides static topic",
+ plugin: &Kafka{
+ Brokers: []string{"127.0.0.1"},
+ Topic: "telegraf",
+ TopicTag: "topic",
+ producerFunc: NewMockProducer,
+ },
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "topic": "xyzzy",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ topic: "xyzzy",
+ value: "cpu,topic=xyzzy time_idle=42 0\n",
+ },
+ {
+ name: "missing topic tag falls back to static topic",
+ plugin: &Kafka{
+ Brokers: []string{"127.0.0.1"},
+ Topic: "telegraf",
+ TopicTag: "topic",
+ producerFunc: NewMockProducer,
+ },
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ topic: "telegraf",
+ value: "cpu time_idle=42 0\n",
+ },
+ {
+ name: "exclude topic tag removes tag",
+ plugin: &Kafka{
+ Brokers: []string{"127.0.0.1"},
+ Topic: "telegraf",
+ TopicTag: "topic",
+ ExcludeTopicTag: true,
+ producerFunc: NewMockProducer,
+ },
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "topic": "xyzzy",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ topic: "xyzzy",
+ value: "cpu time_idle=42 0\n",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s, err := serializers.NewInfluxSerializer()
+ require.NoError(t, err)
+ tt.plugin.SetSerializer(s)
+
+ err = tt.plugin.Connect()
+ require.NoError(t, err)
+
+ producer := &MockProducer{}
+ tt.plugin.producer = producer
+
+ err = tt.plugin.Write(tt.input)
+ require.NoError(t, err)
+
+ require.Equal(t, tt.topic, producer.sent[0].Topic)
+
+ encoded, err := producer.sent[0].Value.Encode()
+ require.NoError(t, err)
+ require.Equal(t, tt.value, string(encoded))
+ })
+ }
+}
diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md
index 12b6178fd9197..1931dacb91f89 100644
--- a/plugins/outputs/kinesis/README.md
+++ b/plugins/outputs/kinesis/README.md
@@ -51,7 +51,7 @@ solution to scale out.
### use_random_partitionkey [DEPRECATED]
-When true a random UUID will be generated and used as the partitionkey when sending data to Kinesis. This allows data to evenly spread across multiple shards in the stream. Due to using a random paritionKey there can be no guarantee of ordering when consuming the data off the shards.
+When true a random UUID will be generated and used as the partitionkey when sending data to Kinesis. This allows data to evenly spread across multiple shards in the stream. Due to using a random partitionKey there can be no guarantee of ordering when consuming the data off the shards.
If true then the partitionkey option will be ignored.
### partition
@@ -70,7 +70,7 @@ All metrics will be mapped to the same shard which may limit throughput.
#### tag
-This will take the value of the specified tag from each metric as the paritionKey.
+This will take the value of the specified tag from each metric as the partitionKey.
If the tag is not found the `default` value will be used or `telegraf` if unspecified
#### measurement
diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go
index 497676486293c..88620fa70d3f9 100644
--- a/plugins/outputs/kinesis/kinesis.go
+++ b/plugins/outputs/kinesis/kinesis.go
@@ -6,10 +6,9 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/kinesis"
- "github.com/satori/go.uuid"
-
+ "github.com/gofrs/uuid"
"github.com/influxdata/telegraf"
- internalaws "github.com/influxdata/telegraf/internal/config/aws"
+ internalaws "github.com/influxdata/telegraf/config/aws"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
)
@@ -71,7 +70,7 @@ var sampleConfig = `
streamname = "StreamName"
## DEPRECATED: PartitionKey as used for sharding data.
partitionkey = "PartitionKey"
- ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
+ ## DEPRECATED: If set the partitionKey will be a random UUID on every put.
## This allows for scaling across multiple shards in a stream.
## This will cause issues with ordering.
use_random_partitionkey = false
@@ -118,7 +117,7 @@ func (k *KinesisOutput) Description() string {
func (k *KinesisOutput) Connect() error {
if k.Partition == nil {
- log.Print("E! kinesis : Deprecated paritionkey configuration in use, please consider using outputs.kinesis.partition")
+ log.Print("E! kinesis : Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition")
}
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
@@ -184,7 +183,10 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string {
case "static":
return k.Partition.Key
case "random":
- u := uuid.NewV4()
+ u, err := uuid.NewV4()
+ if err != nil {
+ return k.Partition.Default
+ }
return u.String()
case "measurement":
return metric.Name()
@@ -201,7 +203,10 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string {
}
}
if k.RandomPartitionKey {
- u := uuid.NewV4()
+ u, err := uuid.NewV4()
+ if err != nil {
+ return k.Partition.Default
+ }
return u.String()
}
return k.PartitionKey
@@ -221,7 +226,8 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
values, err := k.serializer.Serialize(metric)
if err != nil {
- return err
+ log.Printf("D! [outputs.kinesis] Could not serialize metric: %v", err)
+ continue
}
partitionKey := k.getPartitionKey(metric)
diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go
index 627a459dbd582..9d4f6729be53c 100644
--- a/plugins/outputs/kinesis/kinesis_test.go
+++ b/plugins/outputs/kinesis/kinesis_test.go
@@ -3,8 +3,8 @@ package kinesis
import (
"testing"
+ "github.com/gofrs/uuid"
"github.com/influxdata/telegraf/testutil"
- uuid "github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
)
diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go
index 0603394ec1871..53bb8c1249188 100644
--- a/plugins/outputs/librato/librato.go
+++ b/plugins/outputs/librato/librato.go
@@ -32,7 +32,7 @@ type Librato struct {
var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]")
var sampleConfig = `
- ## Librator API Docs
+ ## Librato API Docs
## http://dev.librato.com/v1/metrics-authentication
## Librato API user
api_user = "telegraf@influxdb.com" # required.
diff --git a/plugins/outputs/logzio/README.md b/plugins/outputs/logzio/README.md
new file mode 100644
index 0000000000000..b8e2829e4faad
--- /dev/null
+++ b/plugins/outputs/logzio/README.md
@@ -0,0 +1,24 @@
+# Logz.io Output Plugin
+
+This plugin sends metrics to [Logz.io](https://logz.io/) over HTTPs.
+
+### Configuration:
+
+```toml
+[[outputs.logzio]]
+ ## Logz.io account token
+ token = "your Logz.io token" # required
+
+ ## Use your listener URL for your Logz.io account region.
+ # url = "https://listener.logz.io:8071"
+
+ ## Timeout for HTTP requests
+ # timeout = "5s"
+
+ ## Optional TLS Config for use on HTTP connections
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+```
\ No newline at end of file
diff --git a/plugins/outputs/logzio/logzio.go b/plugins/outputs/logzio/logzio.go
new file mode 100644
index 0000000000000..e557a6e26fdf3
--- /dev/null
+++ b/plugins/outputs/logzio/logzio.go
@@ -0,0 +1,186 @@
+package logzio
+
+import (
+ . "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "fmt"
+
+ "io/ioutil"
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/outputs"
+)
+
+const (
+ defaultLogzioRequestTimeout = time.Second * 5
+ defaultLogzioURL = "https://listener.logz.io:8071"
+
+ logzioDescription = "Send aggregate metrics to Logz.io"
+ logzioType = "telegraf"
+ logzioMaxRequestBodySize = 9 * 1024 * 1024 // 9MB
+)
+
+var sampleConfig = `
+ ## Logz.io account token
+ token = "your logz.io token" # required
+
+ ## Use your listener URL for your Logz.io account region.
+ # url = "https://listener.logz.io:8071"
+
+ ## Timeout for HTTP requests
+ # timeout = "5s"
+
+ ## Optional TLS Config for use on HTTP connections
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+`
+
+type Logzio struct {
+ Token string `toml:"token"`
+ URL string `toml:"url"`
+ Timeout internal.Duration `toml:"timeout"`
+ tls.ClientConfig
+
+ client *http.Client
+}
+
+// Connect to the Output
+func (l *Logzio) Connect() error {
+ log.Printf("D! [outputs.logzio] Connecting to logz.io output...\n")
+ if l.Token == "" || l.Token == "your logz.io token" {
+ return fmt.Errorf("token is required")
+ }
+
+ if l.URL == "" {
+ l.URL = defaultLogzioURL
+ }
+
+ if l.Timeout.Duration <= 0 {
+ l.Timeout.Duration = defaultLogzioRequestTimeout
+ }
+
+ tlsCfg, err := l.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ l.client = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ Proxy: http.ProxyFromEnvironment,
+ },
+ Timeout: l.Timeout.Duration,
+ }
+
+ log.Printf("I! [outputs.logzio] Successfuly created Logz.io sender: %s\n", l.URL)
+ return nil
+}
+
+// Close any connections to the Output
+func (l *Logzio) Close() error {
+ log.Printf("D! [outputs.logzio] Closing logz.io output\n")
+ return nil
+}
+
+// Description returns a one-sentence description on the Output
+func (l *Logzio) Description() string {
+ return logzioDescription
+}
+
+// SampleConfig returns the default configuration of the Output
+func (l *Logzio) SampleConfig() string {
+ return sampleConfig
+}
+
+// Write takes in group of points to be written to the Output
+func (l *Logzio) Write(metrics []telegraf.Metric) error {
+ if len(metrics) == 0 {
+ return nil
+ }
+
+ log.Printf("D! [outputs.logzio] Recived %d metrics\n", len(metrics))
+ var body []byte
+ for _, metric := range metrics {
+ var name = metric.Name()
+ m := make(map[string]interface{})
+
+ m["@timestamp"] = metric.Time()
+ m["measurement_name"] = name
+ if len(metric.Tags()) != 0 {
+ m["telegraf_tags"] = metric.Tags()
+ }
+ m["value_type"] = metric.Type()
+ m["type"] = logzioType
+ m[name] = metric.Fields()
+
+ serialized, err := json.Marshal(m)
+ if err != nil {
+ return fmt.Errorf("failed to marshal: %+v\n", m)
+ }
+ // Logz.io maximum request body size of 10MB. Send bulks that
+ // exceed this size (with safety buffer) via separate write requests.
+ if (len(body) + len(serialized) + 1) > logzioMaxRequestBodySize {
+ err := l.sendBulk(body)
+ if err != nil {
+ return err
+ }
+ body = nil
+ }
+ log.Printf("D! [outputs.logzio] Adding metric to the bulk: %+v\n", m)
+ body = append(body, serialized...)
+ body = append(body, '\n')
+ }
+
+ return l.sendBulk(body)
+}
+
+func (l *Logzio) sendBulk(body []byte) error {
+ if len(body) == 0 {
+ return nil
+ }
+
+ var buf Buffer
+ g := gzip.NewWriter(&buf)
+ if _, err := g.Write(body); err != nil {
+ return err
+ }
+ if err := g.Close(); err != nil {
+ return err
+ }
+ req, err := http.NewRequest("POST", fmt.Sprintf("%s/?token=%s", l.URL, l.Token), &buf)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Content-Encoding", "gzip")
+
+ resp, err := l.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ _, err = ioutil.ReadAll(resp.Body)
+ if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 {
+ return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status)
+ }
+ log.Printf("D! [outputs.logzio] Successfully sent bulk to logz.io\n")
+
+ return nil
+}
+
+func init() {
+ outputs.Add("logzio", func() telegraf.Output {
+ return &Logzio{}
+ })
+}
diff --git a/plugins/outputs/logzio/logzio_test.go b/plugins/outputs/logzio/logzio_test.go
new file mode 100644
index 0000000000000..237446374f7fb
--- /dev/null
+++ b/plugins/outputs/logzio/logzio_test.go
@@ -0,0 +1,258 @@
+package logzio
+
+import (
+ "bufio"
+ "compress/gzip"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ logzioTestToken = "123456789"
+)
+
+func TestConnectWithDefaults(t *testing.T) {
+ l := Logzio{
+ Token: logzioTestToken,
+ }
+
+ err := l.Connect()
+ require.NoError(t, err)
+ require.Equal(t, l.Timeout.Duration, defaultLogzioRequestTimeout)
+ require.Equal(t, l.URL, defaultLogzioURL)
+}
+
+func TestConnectWithoutToken(t *testing.T) {
+ l := Logzio{}
+
+ err := l.Connect()
+ require.Error(t, err)
+}
+
+func TestRequestHeaders(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, "gzip", r.Header.Get("Content-Encoding"))
+ require.Equal(t, "application/json", r.Header.Get("Content-Type"))
+ w.WriteHeader(http.StatusOK)
+ }))
+
+ l := Logzio{
+ Token: logzioTestToken,
+ URL: ts.URL,
+ }
+
+ err := l.Connect()
+ require.NoError(t, err)
+
+ err = l.Write(testutil.MockMetrics())
+ require.NoError(t, err)
+
+}
+
+func TestStatusCode(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ plugin *Logzio
+ statusCode int
+ errFunc func(t *testing.T, err error)
+ }{
+ {
+ name: "success",
+ plugin: &Logzio{
+ URL: u.String(),
+ Token: logzioTestToken,
+ },
+ statusCode: http.StatusOK,
+ errFunc: func(t *testing.T, err error) {
+ require.NoError(t, err)
+ },
+ },
+ {
+ name: "1xx status is an error",
+ plugin: &Logzio{
+ URL: u.String(),
+ Token: logzioTestToken,
+ },
+ statusCode: 103,
+ errFunc: func(t *testing.T, err error) {
+ require.Error(t, err)
+ },
+ },
+ {
+ name: "3xx status is an error",
+ plugin: &Logzio{
+ URL: u.String(),
+ Token: logzioTestToken,
+ },
+ statusCode: http.StatusMultipleChoices,
+ errFunc: func(t *testing.T, err error) {
+ require.Error(t, err)
+ },
+ },
+ {
+ name: "4xx status is an error",
+ plugin: &Logzio{
+ URL: u.String(),
+ Token: logzioTestToken,
+ },
+ statusCode: http.StatusMultipleChoices,
+ errFunc: func(t *testing.T, err error) {
+ require.Error(t, err)
+ },
+ },
+ {
+ name: "5xx status is an error",
+ plugin: &Logzio{
+ URL: u.String(),
+ Token: logzioTestToken,
+ },
+ statusCode: http.StatusServiceUnavailable,
+ errFunc: func(t *testing.T, err error) {
+ require.Error(t, err)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(tt.statusCode)
+ })
+
+ err = tt.plugin.Connect()
+ require.NoError(t, err)
+
+ err = tt.plugin.Write(testutil.MockMetrics())
+ tt.errFunc(t, err)
+ })
+ }
+}
+
+func TestWrite(t *testing.T) {
+ readBody := func(r *http.Request) ([]map[string]interface{}, error) {
+ gz, err := gzip.NewReader(r.Body)
+ if err != nil {
+ return nil, err
+ }
+ scanner := bufio.NewScanner(gz)
+
+ metrics := make([]map[string]interface{}, 0)
+ for scanner.Scan() {
+ line := scanner.Text()
+ var m map[string]interface{}
+ err = json.Unmarshal([]byte(line), &m)
+ if err != nil {
+ return nil, err
+ }
+ metrics = append(metrics, m)
+ }
+
+ return metrics, nil
+ }
+
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ plugin *Logzio
+ metrics []telegraf.Metric
+ errFunc func(t *testing.T, w http.ResponseWriter, r *http.Request)
+ }{
+ {
+ name: "single metric - no value type",
+ plugin: &Logzio{
+ URL: u.String(),
+ Token: logzioTestToken,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu-value",
+ map[string]string{},
+ map[string]interface{}{
+ "min": float64(42),
+ "max": float64(42),
+ "sum": float64(42),
+ "count": int64(1),
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ errFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ metrics, err := readBody(r)
+ require.NoError(t, err)
+ require.Len(t, metrics, 1)
+ w.WriteHeader(http.StatusOK)
+ },
+ },
+ {
+ name: "multiple metric",
+ plugin: &Logzio{
+ URL: u.String(),
+ Token: logzioTestToken,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu-value",
+ map[string]string{},
+ map[string]interface{}{
+ "min": float64(42),
+ "max": float64(42),
+ "sum": float64(42),
+ "count": int64(1),
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "cpu-value",
+ map[string]string{},
+ map[string]interface{}{
+ "min": float64(42),
+ "max": float64(42),
+ "sum": float64(42),
+ "count": int64(1),
+ },
+ time.Unix(60, 0),
+ telegraf.Histogram,
+ ),
+ },
+ errFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ metrics, err := readBody(r)
+ require.NoError(t, err)
+ require.Len(t, metrics, 2)
+ w.WriteHeader(http.StatusOK)
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ tt.errFunc(t, w, r)
+ })
+
+ err := tt.plugin.Connect()
+ require.NoError(t, err)
+
+ err = tt.plugin.Write(tt.metrics)
+ require.NoError(t, err)
+ })
+ }
+}
diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md
index 38eec7c3b1693..abb770f068d4f 100644
--- a/plugins/outputs/mqtt/README.md
+++ b/plugins/outputs/mqtt/README.md
@@ -53,11 +53,12 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt
### Optional parameters:
* `username`: The username to connect MQTT server.
* `password`: The password to connect MQTT server.
-* `client_id`: The unique client id to connect MQTT server. If this paramater is not set then a random ID is generated.
+* `client_id`: The unique client id to connect MQTT server. If this parameter is not set then a random ID is generated.
* `timeout`: Timeout for write operations. default: 5s
* `tls_ca`: TLS CA
* `tls_cert`: TLS CERT
* `tls_key`: TLS key
* `insecure_skip_verify`: Use TLS but skip chain & host verification (default: false)
+* `batch`: When true, metrics will be sent in one MQTT message per flush. Otherwise, metrics are written one metric per MQTT message.
* `retain`: Set `retain` flag when publishing
* `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)
diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go
index bacdd3b0e85d1..0e07b1bca8ab7 100644
--- a/plugins/outputs/mqtt/mqtt.go
+++ b/plugins/outputs/mqtt/mqtt.go
@@ -2,6 +2,7 @@ package mqtt
import (
"fmt"
+ "log"
"strings"
"sync"
"time"
@@ -9,7 +10,7 @@ import (
paho "github.com/eclipse/paho.mqtt.golang"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
)
@@ -150,9 +151,9 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error {
metricsmap[topic] = append(metricsmap[topic], metric)
} else {
buf, err := m.serializer.Serialize(metric)
-
if err != nil {
- return err
+ log.Printf("D! [outputs.mqtt] Could not serialize metric: %v", err)
+ continue
}
err = m.publish(topic, buf)
@@ -222,7 +223,7 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
}
if len(m.Servers) == 0 {
- return opts, fmt.Errorf("could not get host infomations")
+ return opts, fmt.Errorf("could not get host informations")
}
for _, host := range m.Servers {
server := fmt.Sprintf("%s://%s", scheme, host)
diff --git a/plugins/outputs/nats/README.md b/plugins/outputs/nats/README.md
index d9462650ad4d6..c5539900b02e0 100644
--- a/plugins/outputs/nats/README.md
+++ b/plugins/outputs/nats/README.md
@@ -2,18 +2,27 @@
This plugin writes to a (list of) specified NATS instance(s).
-```
+```toml
[[outputs.nats]]
## URLs of NATS servers
servers = ["nats://localhost:4222"]
## Optional credentials
# username = ""
# password = ""
+
+ ## Optional NATS 2.0 and NATS NGS compatible user credentials
+ # credentials = "/etc/telegraf/nats.creds"
+
## NATS subject for producer messages
subject = "telegraf"
+
+ ## Use Transport Layer Security
+ # secure = false
+
## Optional TLS Config
- ## CA certificate used to self-sign NATS server(s) TLS certificate(s)
# tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
@@ -23,15 +32,3 @@ This plugin writes to a (list of) specified NATS instance(s).
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
```
-
-### Required parameters:
-
-* `servers`: List of strings, this is for NATS clustering support. Each URL should start with `nats://`.
-* `subject`: The NATS subject to publish to.
-
-### Optional parameters:
-
-* `username`: Username for NATS
-* `password`: Password for NATS
-* `tls_ca`: TLS CA
-* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go
index d9fdb0e885f09..bf1baae339876 100644
--- a/plugins/outputs/nats/nats.go
+++ b/plugins/outputs/nats/nats.go
@@ -2,38 +2,47 @@ package nats
import (
"fmt"
-
- nats_client "github.com/nats-io/go-nats"
+ "log"
+ "strings"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/nats-io/nats.go"
)
type NATS struct {
- // Servers is the NATS server pool to connect to
- Servers []string
- // Credentials
- Username string
- Password string
- // NATS subject to publish metrics to
- Subject string
+ Servers []string `toml:"servers"`
+ Secure bool `toml:"secure"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ Credentials string `toml:"credentials"`
+ Subject string `toml:"subject"`
+
tls.ClientConfig
- conn *nats_client.Conn
+ conn *nats.Conn
serializer serializers.Serializer
}
var sampleConfig = `
## URLs of NATS servers
servers = ["nats://localhost:4222"]
+
## Optional credentials
# username = ""
# password = ""
+
+ ## Optional NATS 2.0 and NATS NGS compatible user credentials
+ # credentials = "/etc/telegraf/nats.creds"
+
## NATS subject for producer messages
subject = "telegraf"
+ ## Use Transport Layer Security
+ # secure = false
+
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
@@ -55,34 +64,26 @@ func (n *NATS) SetSerializer(serializer serializers.Serializer) {
func (n *NATS) Connect() error {
var err error
- // set default NATS connection options
- opts := nats_client.DefaultOptions
-
- // override max reconnection tries
- opts.MaxReconnect = -1
-
- // override servers, if any were specified
- opts.Servers = n.Servers
+ opts := []nats.Option{
+ nats.MaxReconnects(-1),
+ }
// override authentication, if any was specified
if n.Username != "" {
- opts.User = n.Username
- opts.Password = n.Password
+ opts = append(opts, nats.UserInfo(n.Username, n.Password))
}
- // override TLS, if it was specified
- tlsConfig, err := n.ClientConfig.TLSConfig()
- if err != nil {
- return err
- }
- if tlsConfig != nil {
- // set NATS connection TLS options
- opts.Secure = true
- opts.TLSConfig = tlsConfig
+ if n.Secure {
+ tlsConfig, err := n.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ opts = append(opts, nats.Secure(tlsConfig))
}
// try and connect
- n.conn, err = opts.Connect()
+ n.conn, err = nats.Connect(strings.Join(n.Servers, ","), opts...)
return err
}
@@ -108,7 +109,8 @@ func (n *NATS) Write(metrics []telegraf.Metric) error {
for _, metric := range metrics {
buf, err := n.serializer.Serialize(metric)
if err != nil {
- return err
+ log.Printf("D! [outputs.nats] Could not serialize metric: %v", err)
+ continue
}
err = n.conn.Publish(n.Subject, buf)
diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md
new file mode 100644
index 0000000000000..fbafd06adb8d4
--- /dev/null
+++ b/plugins/outputs/newrelic/README.md
@@ -0,0 +1,23 @@
+# New Relic output plugin
+
+This plugins writes to New Relic Insights using the [Metrics API][].
+
+To use this plugin you must first obtain an [Insights API Key][].
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration
+```toml
+[[outputs.newrelic]]
+ ## New Relic Insights API key
+ insights_key = "insights api key"
+
+ ## Prefix to add to add to metric name for easy identification.
+ # metric_prefix = ""
+
+ ## Timeout for writes to the New Relic API.
+ # timeout = "15s"
+```
+
+[Metrics API]: https://docs.newrelic.com/docs/data-ingest-apis/get-data-new-relic/metric-api/introduction-metric-api
+[Insights API Key]: https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#user-api-key
diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go
new file mode 100644
index 0000000000000..da000c222c823
--- /dev/null
+++ b/plugins/outputs/newrelic/newrelic.go
@@ -0,0 +1,158 @@
+package newrelic
+
+// newrelic.go
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/outputs"
+ "github.com/newrelic/newrelic-telemetry-sdk-go/cumulative"
+ "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry"
+)
+
+// NewRelic nr structure
+type NewRelic struct {
+ InsightsKey string `toml:"insights_key"`
+ MetricPrefix string `toml:"metric_prefix"`
+ Timeout internal.Duration `toml:"timeout"`
+
+ harvestor *telemetry.Harvester
+ dc *cumulative.DeltaCalculator
+ savedErrors map[int]interface{}
+ errorCount int
+ Client http.Client `toml:"-"`
+}
+
+// Description returns a one-sentence description on the Output
+func (nr *NewRelic) Description() string {
+ return "Send metrics to New Relic metrics endpoint"
+}
+
+// SampleConfig : return default configuration of the Output
+func (nr *NewRelic) SampleConfig() string {
+ return `
+ ## New Relic Insights API key
+ insights_key = "insights api key"
+
+ ## Prefix to add to add to metric name for easy identification.
+ # metric_prefix = ""
+
+ ## Timeout for writes to the New Relic API.
+ # timeout = "15s"
+`
+}
+
+// Connect to the Output
+func (nr *NewRelic) Connect() error {
+ if nr.InsightsKey == "" {
+ return fmt.Errorf("InsightKey is a required for newrelic")
+ }
+ var err error
+ nr.harvestor, err = telemetry.NewHarvester(telemetry.ConfigAPIKey(nr.InsightsKey),
+ telemetry.ConfigHarvestPeriod(0),
+ func(cfg *telemetry.Config) {
+ cfg.Product = "NewRelic-Telegraf-Plugin"
+ cfg.ProductVersion = "1.0"
+ cfg.HarvestTimeout = nr.Timeout.Duration
+ cfg.Client = &nr.Client
+ cfg.ErrorLogger = func(e map[string]interface{}) {
+ var errorString string
+ for k, v := range e {
+ errorString += fmt.Sprintf("%s = %s ", k, v)
+ }
+ nr.errorCount++
+ nr.savedErrors[nr.errorCount] = errorString
+ }
+ })
+ if err != nil {
+ return fmt.Errorf("unable to connect to newrelic %v", err)
+ }
+
+ nr.dc = cumulative.NewDeltaCalculator()
+ return nil
+}
+
+// Close any connections to the Output
+func (nr *NewRelic) Close() error {
+ nr.errorCount = 0
+ nr.Client.CloseIdleConnections()
+ return nil
+}
+
+// Write takes in group of points to be written to the Output
+func (nr *NewRelic) Write(metrics []telegraf.Metric) error {
+ nr.errorCount = 0
+ nr.savedErrors = make(map[int]interface{})
+
+ for _, metric := range metrics {
+ // create tag map
+ tags := make(map[string]interface{})
+ for _, tag := range metric.TagList() {
+ tags[tag.Key] = tag.Value
+ }
+ for _, field := range metric.FieldList() {
+ var mvalue float64
+ var mname string
+ if nr.MetricPrefix != "" {
+ mname = nr.MetricPrefix + "." + metric.Name() + "." + field.Key
+ } else {
+ mname = metric.Name() + "." + field.Key
+ }
+ switch n := field.Value.(type) {
+ case int64:
+ mvalue = float64(n)
+ case uint64:
+ mvalue = float64(n)
+ case float64:
+ mvalue = float64(n)
+ case bool:
+ mvalue = float64(0)
+ if n {
+ mvalue = float64(1)
+ }
+ case string:
+ // Do not log everytime we encounter string
+ // we just skip
+ continue
+ default:
+ return fmt.Errorf("Undefined field type: %T", field.Value)
+ }
+
+ switch metric.Type() {
+ case telegraf.Counter:
+ if counter, ok := nr.dc.CountMetric(mname, tags, mvalue, metric.Time()); ok {
+ nr.harvestor.RecordMetric(counter)
+ }
+ default:
+ nr.harvestor.RecordMetric(telemetry.Gauge{
+ Timestamp: metric.Time(),
+ Value: mvalue,
+ Name: mname,
+ Attributes: tags})
+ }
+ }
+ }
+ // By default, the Harvester sends metrics and spans to the New Relic
+ // backend every 5 seconds. You can force data to be sent at any time
+ // using HarvestNow.
+ nr.harvestor.HarvestNow(context.Background())
+
+ //Check if we encountered errors
+ if nr.errorCount != 0 {
+ return fmt.Errorf("unable to harvest metrics %s ", nr.savedErrors[nr.errorCount])
+ }
+ return nil
+}
+
+func init() {
+ outputs.Add("newrelic", func() telegraf.Output {
+ return &NewRelic{
+ Timeout: internal.Duration{Duration: time.Second * 15},
+ Client: http.Client{},
+ }
+ })
+}
diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go
new file mode 100644
index 0000000000000..aa23950c72611
--- /dev/null
+++ b/plugins/outputs/newrelic/newrelic_test.go
@@ -0,0 +1,180 @@
+package newrelic
+
+import (
+ "math"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBasic(t *testing.T) {
+ nr := &NewRelic{
+ MetricPrefix: "Test",
+ InsightsKey: "12345",
+ Timeout: internal.Duration{Duration: time.Second * 5},
+ }
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ err := nr.Connect()
+ require.NoError(t, err)
+
+ err = nr.Write(testutil.MockMetrics())
+ assert.Contains(t, err.Error(), "unable to harvest metrics")
+}
+
+func TestNewRelic_Write(t *testing.T) {
+ type args struct {
+ metrics []telegraf.Metric
+ }
+ tests := []struct {
+ name string
+ metrics []telegraf.Metric
+ auditMessage string
+ wantErr bool
+ }{
+ {
+ name: "Test: Basic mock metric write",
+ metrics: testutil.MockMetrics(),
+ wantErr: false,
+ auditMessage: `"metrics":[{"name":"test1.value","type":"gauge","value":1,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`,
+ },
+ {
+ name: "Test: Test string ",
+ metrics: []telegraf.Metric{
+ testutil.TestMetric("value1", "test_String"),
+ },
+ wantErr: false,
+ auditMessage: "",
+ },
+ {
+ name: "Test: Test int64 ",
+ metrics: []telegraf.Metric{
+ testutil.TestMetric(int64(15), "test_int64"),
+ },
+ wantErr: false,
+ auditMessage: `"metrics":[{"name":"test_int64.value","type":"gauge","value":15,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`,
+ },
+ {
+ name: "Test: Test uint64 ",
+ metrics: []telegraf.Metric{
+ testutil.TestMetric(uint64(20), "test_uint64"),
+ },
+ wantErr: false,
+ auditMessage: `"metrics":[{"name":"test_uint64.value","type":"gauge","value":20,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`,
+ },
+ {
+ name: "Test: Test bool true ",
+ metrics: []telegraf.Metric{
+ testutil.TestMetric(bool(true), "test_bool_true"),
+ },
+ wantErr: false,
+ auditMessage: `"metrics":[{"name":"test_bool_true.value","type":"gauge","value":1,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`,
+ },
+ {
+ name: "Test: Test bool false ",
+ metrics: []telegraf.Metric{
+ testutil.TestMetric(bool(false), "test_bool_false"),
+ },
+ wantErr: false,
+ auditMessage: `"metrics":[{"name":"test_bool_false.value","type":"gauge","value":0,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`,
+ },
+ {
+ name: "Test: Test max float64 ",
+ metrics: []telegraf.Metric{
+ testutil.TestMetric(math.MaxFloat64, "test_maxfloat64"),
+ },
+ wantErr: false,
+ auditMessage: `"metrics":[{"name":"test_maxfloat64.value","type":"gauge","value":1.7976931348623157e+308,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`,
+ },
+ {
+ name: "Test: Test NAN ",
+ metrics: []telegraf.Metric{
+ testutil.TestMetric(math.NaN, "test_NaN"),
+ },
+ wantErr: false,
+ auditMessage: ``,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var auditLog map[string]interface{}
+ nr := &NewRelic{}
+ nr.harvestor, _ = telemetry.NewHarvester(
+ telemetry.ConfigHarvestPeriod(0),
+ func(cfg *telemetry.Config) {
+ cfg.APIKey = "dummyTestKey"
+ cfg.HarvestPeriod = 0
+ cfg.HarvestTimeout = 0
+ cfg.AuditLogger = func(e map[string]interface{}) {
+ auditLog = e
+ }
+ })
+ err := nr.Write(tt.metrics)
+ assert.NoError(t, err)
+ if auditLog["data"] != nil {
+ assert.Contains(t, auditLog["data"], tt.auditMessage)
+ } else {
+ assert.Contains(t, "", tt.auditMessage)
+ }
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("NewRelic.Write() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestNewRelic_Connect(t *testing.T) {
+ tests := []struct {
+ name string
+ newrelic *NewRelic
+ wantErr bool
+ }{
+ {
+ name: "Test: No Insights key",
+ newrelic: &NewRelic{
+ MetricPrefix: "prefix",
+ },
+ wantErr: true,
+ },
+ {
+ name: "Test: Insights key",
+ newrelic: &NewRelic{
+ InsightsKey: "12312133",
+ MetricPrefix: "prefix",
+ },
+ wantErr: false,
+ },
+ {
+ name: "Test: Only Insights key",
+ newrelic: &NewRelic{
+ InsightsKey: "12312133",
+ },
+ wantErr: false,
+ },
+ {
+ name: "Test: Insights key and Timeout",
+ newrelic: &NewRelic{
+ InsightsKey: "12312133",
+ Timeout: internal.Duration{Duration: time.Second * 5},
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ nr := tt.newrelic
+ if err := nr.Connect(); (err != nil) != tt.wantErr {
+ t.Errorf("NewRelic.Connect() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go
index c826ab6485cf2..a9e2d94ac0bc0 100644
--- a/plugins/outputs/nsq/nsq.go
+++ b/plugins/outputs/nsq/nsq.go
@@ -2,12 +2,12 @@ package nsq
import (
"fmt"
-
- "github.com/nsqio/go-nsq"
+ "log"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/nsqio/go-nsq"
)
type NSQ struct {
@@ -68,7 +68,8 @@ func (n *NSQ) Write(metrics []telegraf.Metric) error {
for _, metric := range metrics {
buf, err := n.serializer.Serialize(metric)
if err != nil {
- return err
+ log.Printf("D! [outputs.nsq] Could not serialize metric: %v", err)
+ continue
}
err = n.producer.Publish(n.Topic, buf)
diff --git a/plugins/outputs/opentsdb/README.md b/plugins/outputs/opentsdb/README.md
index 2fd0bd2d86d48..f737d48ae7e94 100644
--- a/plugins/outputs/opentsdb/README.md
+++ b/plugins/outputs/opentsdb/README.md
@@ -48,7 +48,7 @@ put nine.telegraf.ping_average_response_ms 1441910366 24.006000 dc=homeoffice ho
The OpenTSDB telnet interface can be simulated with this reader:
-```
+```go
// opentsdb_telnet_mode_mock.go
package main
diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go
index 1dfd2ce382fd9..ae1e2a5362bc5 100644
--- a/plugins/outputs/opentsdb/opentsdb.go
+++ b/plugins/outputs/opentsdb/opentsdb.go
@@ -3,6 +3,7 @@ package opentsdb
import (
"fmt"
"log"
+ "math"
"net"
"net/url"
"regexp"
@@ -16,14 +17,14 @@ import (
var (
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`)
- hypenChars = strings.NewReplacer(
+ hyphenChars = strings.NewReplacer(
"@", "-",
"*", "-",
`%`, "-",
"#", "-",
"$", "-")
defaultHttpPath = "/api/put"
- defaultSeperator = "_"
+ defaultSeparator = "_"
)
type OpenTSDB struct {
@@ -136,10 +137,14 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
tags := cleanTags(m.Tags())
for fieldName, value := range m.Fields() {
- switch value.(type) {
+ switch fv := value.(type) {
case int64:
case uint64:
case float64:
+ // JSON does not support these special values
+ if math.IsNaN(fv) || math.IsInf(fv, 0) {
+ continue
+ }
default:
log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value)
continue
@@ -181,10 +186,14 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
tags := ToLineFormat(cleanTags(m.Tags()))
for fieldName, value := range m.Fields() {
- switch value.(type) {
+ switch fv := value.(type) {
case int64:
case uint64:
case float64:
+ // JSON does not support these special values
+ if math.IsNaN(fv) || math.IsInf(fv, 0) {
+ continue
+ }
default:
log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value)
continue
@@ -261,8 +270,8 @@ func (o *OpenTSDB) Close() error {
}
func sanitize(value string) string {
- // Apply special hypenation rules to preserve backwards compatibility
- value = hypenChars.Replace(value)
+ // Apply special hyphenation rules to preserve backwards compatibility
+ value = hyphenChars.Replace(value)
// Replace any remaining illegal chars
return allowedChars.ReplaceAllLiteralString(value, "_")
}
@@ -271,7 +280,7 @@ func init() {
outputs.Add("opentsdb", func() telegraf.Output {
return &OpenTSDB{
HttpPath: defaultHttpPath,
- Separator: defaultSeperator,
+ Separator: defaultSeparator,
}
})
}
diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md
index d1b4a1b0eb399..9beaa062da1eb 100644
--- a/plugins/outputs/prometheus_client/README.md
+++ b/plugins/outputs/prometheus_client/README.md
@@ -1,15 +1,23 @@
-# Prometheus Client Service Output Plugin
+# Prometheus Output Plugin
-This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all metrics on `/metrics` (default) to be polled by a Prometheus server.
+This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes
+all metrics on `/metrics` (default) to be polled by a Prometheus server.
-## Configuration
+### Configuration
```toml
-# Publish all metrics to /metrics for Prometheus to scrape
[[outputs.prometheus_client]]
## Address to listen on.
listen = ":9273"
+ ## Metric version controls the mapping from Telegraf metrics into
+ ## Prometheus format. When using the prometheus input, use the same value in
+ ## both plugins to ensure metrics are round-tripped without modification.
+ ##
+ ## example: metric_version = 1; deprecated in 1.13
+ ## metric_version = 2; recommended version
+ # metric_version = 1
+
## Use HTTP Basic Authentication.
# basic_username = "Foo"
# basic_password = "Bar"
@@ -35,7 +43,7 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all
## If set, enable TLS with the given certificate.
# tls_cert = "/etc/ssl/telegraf.crt"
# tls_key = "/etc/ssl/telegraf.key"
-
+
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
@@ -43,3 +51,9 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all
## Export metric collection time.
# export_timestamp = false
```
+
+### Metrics
+
+Prometheus metrics are produced in the same manner as the [prometheus serializer][].
+
+[prometheus serializer]: /plugins/serializers/prometheus/README.md#Metrics
diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go
index 32dcdbb891f14..53713a02ba4e6 100644
--- a/plugins/outputs/prometheus_client/prometheus_client.go
+++ b/plugins/outputs/prometheus_client/prometheus_client.go
@@ -1,92 +1,43 @@
-package prometheus_client
+package prometheus
import (
"context"
- "crypto/subtle"
"crypto/tls"
"fmt"
- "log"
"net"
"net/http"
"net/url"
- "regexp"
- "sort"
- "strconv"
- "strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
+ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1"
+ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
- invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_:]`)
- validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`)
+ defaultListen = ":9273"
+ defaultPath = "/metrics"
+ defaultExpirationInterval = internal.Duration{Duration: 60 * time.Second}
)
-// SampleID uniquely identifies a Sample
-type SampleID string
-
-// Sample represents the current value of a series.
-type Sample struct {
- // Labels are the Prometheus labels.
- Labels map[string]string
- // Value is the value in the Prometheus output. Only one of these will populated.
- Value float64
- HistogramValue map[float64]uint64
- SummaryValue map[float64]float64
- // Histograms and Summaries need a count and a sum
- Count uint64
- Sum float64
- // Metric timestamp
- Timestamp time.Time
- // Expiration is the deadline that this Sample is valid until.
- Expiration time.Time
-}
-
-// MetricFamily contains the data required to build valid prometheus Metrics.
-type MetricFamily struct {
- // Samples are the Sample belonging to this MetricFamily.
- Samples map[SampleID]*Sample
- // Need the telegraf ValueType because there isn't a Prometheus ValueType
- // representing Histogram or Summary
- TelegrafValueType telegraf.ValueType
- // LabelSet is the label counts for all Samples.
- LabelSet map[string]int
-}
-
-type PrometheusClient struct {
- Listen string
- BasicUsername string `toml:"basic_username"`
- BasicPassword string `toml:"basic_password"`
- IPRange []string `toml:"ip_range"`
- ExpirationInterval internal.Duration `toml:"expiration_interval"`
- Path string `toml:"path"`
- CollectorsExclude []string `toml:"collectors_exclude"`
- StringAsLabel bool `toml:"string_as_label"`
- ExportTimestamp bool `toml:"export_timestamp"`
-
- tlsint.ServerConfig
-
- server *http.Server
- url string
-
- sync.Mutex
- // fam is the non-expired MetricFamily by Prometheus metric name.
- fam map[string]*MetricFamily
- // now returns the current time.
- now func() time.Time
-}
-
var sampleConfig = `
## Address to listen on
listen = ":9273"
+ ## Metric version controls the mapping from Telegraf metrics into
+ ## Prometheus format. When using the prometheus input, use the same value in
+ ## both plugins to ensure metrics are round-tripped without modification.
+ ##
+ ## example: metric_version = 1; deprecated in 1.13
+ ## metric_version = 2; recommended version
+ # metric_version = 1
+
## Use HTTP Basic Authentication.
# basic_username = "Foo"
# basic_password = "Bar"
@@ -121,46 +72,42 @@ var sampleConfig = `
# export_timestamp = false
`
-func (p *PrometheusClient) auth(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if p.BasicUsername != "" && p.BasicPassword != "" {
- w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
-
- username, password, ok := r.BasicAuth()
- if !ok ||
- subtle.ConstantTimeCompare([]byte(username), []byte(p.BasicUsername)) != 1 ||
- subtle.ConstantTimeCompare([]byte(password), []byte(p.BasicPassword)) != 1 {
- http.Error(w, "Not authorized", 401)
- return
- }
- }
+type Collector interface {
+ Describe(ch chan<- *prometheus.Desc)
+ Collect(ch chan<- prometheus.Metric)
+ Add(metrics []telegraf.Metric) error
+}
- if len(p.IPRange) > 0 {
- matched := false
- remoteIPs, _, _ := net.SplitHostPort(r.RemoteAddr)
- remoteIP := net.ParseIP(remoteIPs)
- for _, iprange := range p.IPRange {
- _, ipNet, err := net.ParseCIDR(iprange)
- if err != nil {
- http.Error(w, "Config Error in ip_range setting", 500)
- return
- }
- if ipNet.Contains(remoteIP) {
- matched = true
- break
- }
- }
- if !matched {
- http.Error(w, "Not authorized", 401)
- return
- }
- }
+type PrometheusClient struct {
+ Listen string `toml:"listen"`
+ MetricVersion int `toml:"metric_version"`
+ BasicUsername string `toml:"basic_username"`
+ BasicPassword string `toml:"basic_password"`
+ IPRange []string `toml:"ip_range"`
+ ExpirationInterval internal.Duration `toml:"expiration_interval"`
+ Path string `toml:"path"`
+ CollectorsExclude []string `toml:"collectors_exclude"`
+ StringAsLabel bool `toml:"string_as_label"`
+ ExportTimestamp bool `toml:"export_timestamp"`
+ tlsint.ServerConfig
- h.ServeHTTP(w, r)
- })
+ Log telegraf.Logger `toml:"-"`
+
+ server *http.Server
+ url *url.URL
+ collector Collector
+ wg sync.WaitGroup
}
-func (p *PrometheusClient) Connect() error {
+func (p *PrometheusClient) Description() string {
+ return "Configuration for the Prometheus client to spawn"
+}
+
+func (p *PrometheusClient) SampleConfig() string {
+ return sampleConfig
+}
+
+func (p *PrometheusClient) Init() error {
defaultCollectors := map[string]bool{
"gocollector": true,
"process": true,
@@ -181,421 +128,135 @@ func (p *PrometheusClient) Connect() error {
}
}
- err := registry.Register(p)
- if err != nil {
- return err
+ switch p.MetricVersion {
+ default:
+ fallthrough
+ case 1:
+ p.Log.Warnf("Use of deprecated configuration: metric_version = 1; please update to metric_version = 2")
+ p.collector = v1.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.Log)
+ err := registry.Register(p.collector)
+ if err != nil {
+ return err
+ }
+ case 2:
+ p.collector = v2.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.ExportTimestamp)
+ err := registry.Register(p.collector)
+ if err != nil {
+ return err
+ }
}
- if p.Listen == "" {
- p.Listen = "localhost:9273"
- }
+ ipRange := make([]*net.IPNet, 0, len(p.IPRange))
+ for _, cidr := range p.IPRange {
+ _, ipNet, err := net.ParseCIDR(cidr)
+ if err != nil {
+ return fmt.Errorf("error parsing ip_range: %v", err)
+ }
- if p.Path == "" {
- p.Path = "/metrics"
+ ipRange = append(ipRange, ipNet)
}
+ authHandler := internal.AuthHandler(p.BasicUsername, p.BasicPassword, "prometheus", onAuthError)
+ rangeHandler := internal.IPRangeHandler(ipRange, onError)
+ promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})
+
mux := http.NewServeMux()
- mux.Handle(p.Path, p.auth(promhttp.HandlerFor(
- registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})))
+ if p.Path == "" {
+ p.Path = "/"
+ }
+ mux.Handle(p.Path, authHandler(rangeHandler(promHandler)))
tlsConfig, err := p.TLSConfig()
if err != nil {
return err
}
+
p.server = &http.Server{
Addr: p.Listen,
Handler: mux,
TLSConfig: tlsConfig,
}
- var listener net.Listener
- if tlsConfig != nil {
- listener, err = tls.Listen("tcp", p.Listen, tlsConfig)
+ return nil
+}
+
+func (p *PrometheusClient) listen() (net.Listener, error) {
+ if p.server.TLSConfig != nil {
+ return tls.Listen("tcp", p.Listen, p.server.TLSConfig)
} else {
- listener, err = net.Listen("tcp", p.Listen)
+ return net.Listen("tcp", p.Listen)
}
+}
+
+func (p *PrometheusClient) Connect() error {
+ listener, err := p.listen()
if err != nil {
return err
}
- p.url = createURL(tlsConfig, listener, p.Path)
+ scheme := "http"
+ if p.server.TLSConfig != nil {
+ scheme = "https"
+ }
+
+ p.url = &url.URL{
+ Scheme: scheme,
+ Host: listener.Addr().String(),
+ Path: p.Path,
+ }
+
+ p.Log.Infof("Listening on %s", p.URL())
+ p.wg.Add(1)
go func() {
+ defer p.wg.Done()
err := p.server.Serve(listener)
if err != nil && err != http.ErrServerClosed {
- log.Printf("E! Error creating prometheus metric endpoint, err: %s\n",
- err.Error())
+ p.Log.Errorf("Server error: %v", err)
}
}()
return nil
}
-// Address returns the address the plugin is listening on. If not listening
-// an empty string is returned.
-func (p *PrometheusClient) URL() string {
- return p.url
+func onAuthError(_ http.ResponseWriter) {
}
-func createURL(tlsConfig *tls.Config, listener net.Listener, path string) string {
- u := url.URL{
- Scheme: "http",
- Host: listener.Addr().String(),
- Path: path,
- }
+func onError(rw http.ResponseWriter, code int) {
+ http.Error(rw, http.StatusText(code), code)
+}
- if tlsConfig != nil {
- u.Scheme = "https"
+// Address returns the address the plugin is listening on. If not listening
+// an empty string is returned.
+func (p *PrometheusClient) URL() string {
+ if p.url != nil {
+ return p.url.String()
}
- return u.String()
+ return ""
}
func (p *PrometheusClient) Close() error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
+
err := p.server.Shutdown(ctx)
- prometheus.Unregister(p)
- p.url = ""
+ p.wg.Wait()
+ p.url = nil
+ prometheus.Unregister(p.collector)
return err
}
-func (p *PrometheusClient) SampleConfig() string {
- return sampleConfig
-}
-
-func (p *PrometheusClient) Description() string {
- return "Configuration for the Prometheus client to spawn"
-}
-
-// Implements prometheus.Collector
-func (p *PrometheusClient) Describe(ch chan<- *prometheus.Desc) {
- prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch)
-}
-
-// Expire removes Samples that have expired.
-func (p *PrometheusClient) Expire() {
- now := p.now()
- for name, family := range p.fam {
- for key, sample := range family.Samples {
- if p.ExpirationInterval.Duration != 0 && now.After(sample.Expiration) {
- for k := range sample.Labels {
- family.LabelSet[k]--
- }
- delete(family.Samples, key)
-
- if len(family.Samples) == 0 {
- delete(p.fam, name)
- }
- }
- }
- }
-}
-
-// Collect implements prometheus.Collector
-func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) {
- p.Lock()
- defer p.Unlock()
-
- p.Expire()
-
- for name, family := range p.fam {
- // Get list of all labels on MetricFamily
- var labelNames []string
- for k, v := range family.LabelSet {
- if v > 0 {
- labelNames = append(labelNames, k)
- }
- }
- desc := prometheus.NewDesc(name, "Telegraf collected metric", labelNames, nil)
-
- for _, sample := range family.Samples {
- // Get labels for this sample; unset labels will be set to the
- // empty string
- var labels []string
- for _, label := range labelNames {
- v := sample.Labels[label]
- labels = append(labels, v)
- }
-
- var metric prometheus.Metric
- var err error
- switch family.TelegrafValueType {
- case telegraf.Summary:
- metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...)
- case telegraf.Histogram:
- metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...)
- default:
- metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...)
- }
- if err != nil {
- log.Printf("E! Error creating prometheus metric, "+
- "key: %s, labels: %v,\nerr: %s\n",
- name, labels, err.Error())
- continue
- }
-
- if p.ExportTimestamp {
- metric = prometheus.NewMetricWithTimestamp(sample.Timestamp, metric)
- }
- ch <- metric
- }
- }
-}
-
-func sanitize(value string) string {
- return invalidNameCharRE.ReplaceAllString(value, "_")
-}
-
-func isValidTagName(tag string) bool {
- return validNameCharRE.MatchString(tag)
-}
-
-func getPromValueType(tt telegraf.ValueType) prometheus.ValueType {
- switch tt {
- case telegraf.Counter:
- return prometheus.CounterValue
- case telegraf.Gauge:
- return prometheus.GaugeValue
- default:
- return prometheus.UntypedValue
- }
-}
-
-// CreateSampleID creates a SampleID based on the tags of a telegraf.Metric.
-func CreateSampleID(tags map[string]string) SampleID {
- pairs := make([]string, 0, len(tags))
- for k, v := range tags {
- pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
- }
- sort.Strings(pairs)
- return SampleID(strings.Join(pairs, ","))
-}
-
-func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) {
-
- for k := range sample.Labels {
- fam.LabelSet[k]++
- }
-
- fam.Samples[sampleID] = sample
-}
-
-func (p *PrometheusClient) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) {
- var fam *MetricFamily
- var ok bool
- if fam, ok = p.fam[mname]; !ok {
- fam = &MetricFamily{
- Samples: make(map[SampleID]*Sample),
- TelegrafValueType: point.Type(),
- LabelSet: make(map[string]int),
- }
- p.fam[mname] = fam
- }
-
- addSample(fam, sample, sampleID)
-}
-
-// Sorted returns a copy of the metrics in time ascending order. A copy is
-// made to avoid modifying the input metric slice since doing so is not
-// allowed.
-func sorted(metrics []telegraf.Metric) []telegraf.Metric {
- batch := make([]telegraf.Metric, 0, len(metrics))
- for i := len(metrics) - 1; i >= 0; i-- {
- batch = append(batch, metrics[i])
- }
- sort.Slice(batch, func(i, j int) bool {
- return batch[i].Time().Before(batch[j].Time())
- })
- return batch
-}
-
func (p *PrometheusClient) Write(metrics []telegraf.Metric) error {
- p.Lock()
- defer p.Unlock()
-
- now := p.now()
-
- for _, point := range sorted(metrics) {
- tags := point.Tags()
- sampleID := CreateSampleID(tags)
-
- labels := make(map[string]string)
- for k, v := range tags {
- tName := sanitize(k)
- if !isValidTagName(tName) {
- continue
- }
- labels[tName] = v
- }
-
- // Prometheus doesn't have a string value type, so convert string
- // fields to labels if enabled.
- if p.StringAsLabel {
- for fn, fv := range point.Fields() {
- switch fv := fv.(type) {
- case string:
- tName := sanitize(fn)
- if !isValidTagName(tName) {
- continue
- }
- labels[tName] = fv
- }
- }
- }
-
- switch point.Type() {
- case telegraf.Summary:
- var mname string
- var sum float64
- var count uint64
- summaryvalue := make(map[float64]float64)
- for fn, fv := range point.Fields() {
- var value float64
- switch fv := fv.(type) {
- case int64:
- value = float64(fv)
- case uint64:
- value = float64(fv)
- case float64:
- value = fv
- default:
- continue
- }
-
- switch fn {
- case "sum":
- sum = value
- case "count":
- count = uint64(value)
- default:
- limit, err := strconv.ParseFloat(fn, 64)
- if err == nil {
- summaryvalue[limit] = value
- }
- }
- }
- sample := &Sample{
- Labels: labels,
- SummaryValue: summaryvalue,
- Count: count,
- Sum: sum,
- Timestamp: point.Time(),
- Expiration: now.Add(p.ExpirationInterval.Duration),
- }
- mname = sanitize(point.Name())
-
- if !isValidTagName(mname) {
- continue
- }
-
- p.addMetricFamily(point, sample, mname, sampleID)
-
- case telegraf.Histogram:
- var mname string
- var sum float64
- var count uint64
- histogramvalue := make(map[float64]uint64)
- for fn, fv := range point.Fields() {
- var value float64
- switch fv := fv.(type) {
- case int64:
- value = float64(fv)
- case uint64:
- value = float64(fv)
- case float64:
- value = fv
- default:
- continue
- }
-
- switch fn {
- case "sum":
- sum = value
- case "count":
- count = uint64(value)
- default:
- limit, err := strconv.ParseFloat(fn, 64)
- if err == nil {
- histogramvalue[limit] = uint64(value)
- }
- }
- }
- sample := &Sample{
- Labels: labels,
- HistogramValue: histogramvalue,
- Count: count,
- Sum: sum,
- Timestamp: point.Time(),
- Expiration: now.Add(p.ExpirationInterval.Duration),
- }
- mname = sanitize(point.Name())
-
- if !isValidTagName(mname) {
- continue
- }
-
- p.addMetricFamily(point, sample, mname, sampleID)
-
- default:
- for fn, fv := range point.Fields() {
- // Ignore string and bool fields.
- var value float64
- switch fv := fv.(type) {
- case int64:
- value = float64(fv)
- case uint64:
- value = float64(fv)
- case float64:
- value = fv
- default:
- continue
- }
-
- sample := &Sample{
- Labels: labels,
- Value: value,
- Timestamp: point.Time(),
- Expiration: now.Add(p.ExpirationInterval.Duration),
- }
-
- // Special handling of value field; supports passthrough from
- // the prometheus input.
- var mname string
- switch point.Type() {
- case telegraf.Counter:
- if fn == "counter" {
- mname = sanitize(point.Name())
- }
- case telegraf.Gauge:
- if fn == "gauge" {
- mname = sanitize(point.Name())
- }
- }
- if mname == "" {
- if fn == "value" {
- mname = sanitize(point.Name())
- } else {
- mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn))
- }
- }
- if !isValidTagName(mname) {
- continue
- }
- p.addMetricFamily(point, sample, mname, sampleID)
-
- }
- }
- }
- return nil
+ return p.collector.Add(metrics)
}
func init() {
outputs.Add("prometheus_client", func() telegraf.Output {
return &PrometheusClient{
- ExpirationInterval: internal.Duration{Duration: time.Second * 60},
+ Listen: defaultListen,
+ Path: defaultPath,
+ ExpirationInterval: defaultExpirationInterval,
StringAsLabel: true,
- fam: make(map[string]*MetricFamily),
- now: time.Now,
}
})
}
diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go
deleted file mode 100644
index 211e24030dc56..0000000000000
--- a/plugins/outputs/prometheus_client/prometheus_client_test.go
+++ /dev/null
@@ -1,693 +0,0 @@
-package prometheus_client
-
-import (
- "testing"
- "time"
-
- "github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/internal"
- "github.com/influxdata/telegraf/metric"
- prometheus_input "github.com/influxdata/telegraf/plugins/inputs/prometheus"
- "github.com/influxdata/telegraf/testutil"
- "github.com/stretchr/testify/require"
-)
-
-func setUnixTime(client *PrometheusClient, sec int64) {
- client.now = func() time.Time {
- return time.Unix(sec, 0)
- }
-}
-
-// NewClient initializes a PrometheusClient.
-func NewClient() *PrometheusClient {
- return &PrometheusClient{
- ExpirationInterval: internal.Duration{Duration: time.Second * 60},
- StringAsLabel: true,
- fam: make(map[string]*MetricFamily),
- now: time.Now,
- }
-}
-
-func TestWrite_Basic(t *testing.T) {
- now := time.Now()
- pt1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 0.0},
- now)
- var metrics = []telegraf.Metric{
- pt1,
- }
-
- client := NewClient()
- err = client.Write(metrics)
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, telegraf.Untyped, fam.TelegrafValueType)
- require.Equal(t, map[string]int{}, fam.LabelSet)
-
- sample, ok := fam.Samples[CreateSampleID(pt1.Tags())]
- require.True(t, ok)
-
- require.Equal(t, 0.0, sample.Value)
- require.True(t, now.Before(sample.Expiration))
-}
-
-func TestWrite_IntField(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 42},
- time.Now())
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- for _, v := range fam.Samples {
- require.Equal(t, 42.0, v.Value)
- }
-
-}
-
-func TestWrite_FieldNotValue(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"howdy": 0.0},
- time.Now())
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- fam, ok := client.fam["foo_howdy"]
- require.True(t, ok)
- for _, v := range fam.Samples {
- require.Equal(t, 0.0, v.Value)
- }
-}
-
-func TestWrite_SkipNonNumberField(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": "howdy"},
- time.Now())
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- _, ok := client.fam["foo"]
- require.False(t, ok)
-}
-
-func TestWrite_Counters(t *testing.T) {
- type args struct {
- measurement string
- tags map[string]string
- fields map[string]interface{}
- valueType telegraf.ValueType
- }
- var tests = []struct {
- name string
- args args
- err error
- metricName string
- valueType telegraf.ValueType
- }{
- {
- name: "field named value is not added to metric name",
- args: args{
- measurement: "foo",
- fields: map[string]interface{}{"value": 42},
- valueType: telegraf.Counter,
- },
- metricName: "foo",
- valueType: telegraf.Counter,
- },
- {
- name: "field named counter is not added to metric name",
- args: args{
- measurement: "foo",
- fields: map[string]interface{}{"counter": 42},
- valueType: telegraf.Counter,
- },
- metricName: "foo",
- valueType: telegraf.Counter,
- },
- {
- name: "field with any other name is added to metric name",
- args: args{
- measurement: "foo",
- fields: map[string]interface{}{"other": 42},
- valueType: telegraf.Counter,
- },
- metricName: "foo_other",
- valueType: telegraf.Counter,
- },
- {
- name: "uint64 fields are output",
- args: args{
- measurement: "foo",
- fields: map[string]interface{}{"value": uint64(42)},
- valueType: telegraf.Counter,
- },
- metricName: "foo",
- valueType: telegraf.Counter,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- m, err := metric.New(
- tt.args.measurement,
- tt.args.tags,
- tt.args.fields,
- time.Now(),
- tt.args.valueType,
- )
- client := NewClient()
- err = client.Write([]telegraf.Metric{m})
- require.Equal(t, tt.err, err)
-
- fam, ok := client.fam[tt.metricName]
- require.True(t, ok)
- require.Equal(t, tt.valueType, fam.TelegrafValueType)
- })
- }
-}
-
-func TestWrite_Sanitize(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo.bar:colon",
- map[string]string{"tag-with-dash": "localhost.local"},
- map[string]interface{}{"field-with-dash-and:colon": 42},
- time.Now(),
- telegraf.Counter)
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- fam, ok := client.fam["foo_bar:colon_field_with_dash_and:colon"]
- require.True(t, ok)
- require.Equal(t, map[string]int{"tag_with_dash": 1}, fam.LabelSet)
-
- sample1, ok := fam.Samples[CreateSampleID(p1.Tags())]
- require.True(t, ok)
-
- require.Equal(t, map[string]string{
- "tag_with_dash": "localhost.local"}, sample1.Labels)
-}
-
-func TestWrite_Gauge(t *testing.T) {
- type args struct {
- measurement string
- tags map[string]string
- fields map[string]interface{}
- valueType telegraf.ValueType
- }
- var tests = []struct {
- name string
- args args
- err error
- metricName string
- valueType telegraf.ValueType
- }{
- {
- name: "field named value is not added to metric name",
- args: args{
- measurement: "foo",
- fields: map[string]interface{}{"value": 42},
- valueType: telegraf.Gauge,
- },
- metricName: "foo",
- valueType: telegraf.Gauge,
- },
- {
- name: "field named gauge is not added to metric name",
- args: args{
- measurement: "foo",
- fields: map[string]interface{}{"gauge": 42},
- valueType: telegraf.Gauge,
- },
- metricName: "foo",
- valueType: telegraf.Gauge,
- },
- {
- name: "field with any other name is added to metric name",
- args: args{
- measurement: "foo",
- fields: map[string]interface{}{"other": 42},
- valueType: telegraf.Gauge,
- },
- metricName: "foo_other",
- valueType: telegraf.Gauge,
- },
- {
- name: "uint64 fields are output",
- args: args{
- measurement: "foo",
- fields: map[string]interface{}{"value": uint64(42)},
- valueType: telegraf.Counter,
- },
- metricName: "foo",
- valueType: telegraf.Counter,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- m, err := metric.New(
- tt.args.measurement,
- tt.args.tags,
- tt.args.fields,
- time.Now(),
- tt.args.valueType,
- )
- client := NewClient()
- err = client.Write([]telegraf.Metric{m})
- require.Equal(t, tt.err, err)
-
- fam, ok := client.fam[tt.metricName]
- require.True(t, ok)
- require.Equal(t, tt.valueType, fam.TelegrafValueType)
-
- })
- }
-}
-
-func TestWrite_Summary(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4},
- time.Now(),
- telegraf.Summary)
-
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 1, len(fam.Samples))
-
- sample1, ok := fam.Samples[CreateSampleID(p1.Tags())]
- require.True(t, ok)
-
- require.Equal(t, 84.0, sample1.Sum)
- require.Equal(t, uint64(42), sample1.Count)
- require.Equal(t, 3, len(sample1.SummaryValue))
-}
-
-func TestWrite_Histogram(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4},
- time.Now(),
- telegraf.Histogram)
-
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 1, len(fam.Samples))
-
- sample1, ok := fam.Samples[CreateSampleID(p1.Tags())]
- require.True(t, ok)
-
- require.Equal(t, 84.0, sample1.Sum)
- require.Equal(t, uint64(42), sample1.Count)
- require.Equal(t, 3, len(sample1.HistogramValue))
-}
-
-func TestWrite_MixedValueType(t *testing.T) {
- now := time.Now()
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 1.0},
- now,
- telegraf.Counter)
- p2, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 2.0},
- now,
- telegraf.Gauge)
- var metrics = []telegraf.Metric{p1, p2}
-
- client := NewClient()
- err = client.Write(metrics)
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 1, len(fam.Samples))
-}
-
-func TestWrite_MixedValueTypeUpgrade(t *testing.T) {
- now := time.Now()
- p1, err := metric.New(
- "foo",
- map[string]string{"a": "x"},
- map[string]interface{}{"value": 1.0},
- now,
- telegraf.Untyped)
- p2, err := metric.New(
- "foo",
- map[string]string{"a": "y"},
- map[string]interface{}{"value": 2.0},
- now,
- telegraf.Gauge)
- var metrics = []telegraf.Metric{p1, p2}
-
- client := NewClient()
- err = client.Write(metrics)
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 2, len(fam.Samples))
-}
-
-func TestWrite_MixedValueTypeDowngrade(t *testing.T) {
- now := time.Now()
- p1, err := metric.New(
- "foo",
- map[string]string{"a": "x"},
- map[string]interface{}{"value": 1.0},
- now,
- telegraf.Gauge)
- p2, err := metric.New(
- "foo",
- map[string]string{"a": "y"},
- map[string]interface{}{"value": 2.0},
- now,
- telegraf.Untyped)
- var metrics = []telegraf.Metric{p1, p2}
-
- client := NewClient()
- err = client.Write(metrics)
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 2, len(fam.Samples))
-}
-
-func TestWrite_Tags(t *testing.T) {
- now := time.Now()
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 1.0},
- now)
- p2, err := metric.New(
- "foo",
- map[string]string{"host": "localhost"},
- map[string]interface{}{"value": 2.0},
- now)
- var metrics = []telegraf.Metric{p1, p2}
-
- client := NewClient()
- err = client.Write(metrics)
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, telegraf.Untyped, fam.TelegrafValueType)
-
- require.Equal(t, map[string]int{"host": 1}, fam.LabelSet)
-
- sample1, ok := fam.Samples[CreateSampleID(p1.Tags())]
- require.True(t, ok)
-
- require.Equal(t, 1.0, sample1.Value)
- require.True(t, now.Before(sample1.Expiration))
-
- sample2, ok := fam.Samples[CreateSampleID(p2.Tags())]
- require.True(t, ok)
-
- require.Equal(t, 2.0, sample2.Value)
- require.True(t, now.Before(sample2.Expiration))
-}
-
-func TestWrite_StringFields(t *testing.T) {
- now := time.Now()
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 1.0, "status": "good"},
- now,
- telegraf.Counter)
- p2, err := metric.New(
- "bar",
- make(map[string]string),
- map[string]interface{}{"status": "needs numeric field"},
- now,
- telegraf.Gauge)
- var metrics = []telegraf.Metric{p1, p2}
-
- client := NewClient()
- err = client.Write(metrics)
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 1, fam.LabelSet["status"])
-
- fam, ok = client.fam["bar"]
- require.False(t, ok)
-}
-
-func TestDoNotWrite_StringFields(t *testing.T) {
- now := time.Now()
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 1.0, "status": "good"},
- now,
- telegraf.Counter)
- p2, err := metric.New(
- "bar",
- make(map[string]string),
- map[string]interface{}{"status": "needs numeric field"},
- now,
- telegraf.Gauge)
- var metrics = []telegraf.Metric{p1, p2}
-
- client := &PrometheusClient{
- ExpirationInterval: internal.Duration{Duration: time.Second * 60},
- StringAsLabel: false,
- fam: make(map[string]*MetricFamily),
- now: time.Now,
- }
-
- err = client.Write(metrics)
- require.NoError(t, err)
-
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 0, fam.LabelSet["status"])
-
- fam, ok = client.fam["bar"]
- require.False(t, ok)
-}
-
-func TestExpire(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 1.0},
- time.Now())
- setUnixTime(client, 0)
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- p2, err := metric.New(
- "bar",
- make(map[string]string),
- map[string]interface{}{"value": 2.0},
- time.Now())
- setUnixTime(client, 1)
- err = client.Write([]telegraf.Metric{p2})
-
- setUnixTime(client, 61)
- require.Equal(t, 2, len(client.fam))
- client.Expire()
- require.Equal(t, 1, len(client.fam))
-}
-
-func TestExpire_TagsNoDecrement(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 1.0},
- time.Now())
- setUnixTime(client, 0)
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- p2, err := metric.New(
- "foo",
- map[string]string{"host": "localhost"},
- map[string]interface{}{"value": 2.0},
- time.Now())
- setUnixTime(client, 1)
- err = client.Write([]telegraf.Metric{p2})
-
- setUnixTime(client, 61)
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 2, len(fam.Samples))
- client.Expire()
- require.Equal(t, 1, len(fam.Samples))
-
- require.Equal(t, map[string]int{"host": 1}, fam.LabelSet)
-}
-
-func TestExpire_TagsWithDecrement(t *testing.T) {
- client := NewClient()
-
- p1, err := metric.New(
- "foo",
- map[string]string{"host": "localhost"},
- map[string]interface{}{"value": 1.0},
- time.Now())
- setUnixTime(client, 0)
- err = client.Write([]telegraf.Metric{p1})
- require.NoError(t, err)
-
- p2, err := metric.New(
- "foo",
- make(map[string]string),
- map[string]interface{}{"value": 2.0},
- time.Now())
- setUnixTime(client, 1)
- err = client.Write([]telegraf.Metric{p2})
-
- setUnixTime(client, 61)
- fam, ok := client.fam["foo"]
- require.True(t, ok)
- require.Equal(t, 2, len(fam.Samples))
- client.Expire()
- require.Equal(t, 1, len(fam.Samples))
-
- require.Equal(t, map[string]int{"host": 0}, fam.LabelSet)
-}
-
-var pTesting *PrometheusClient
-
-func TestPrometheusWritePointEmptyTag(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping integration test in short mode")
- }
-
- pClient, p, err := setupPrometheus()
- require.NoError(t, err)
- defer pClient.Close()
-
- now := time.Now()
- tags := make(map[string]string)
- pt1, _ := metric.New(
- "test_point_1",
- tags,
- map[string]interface{}{"value": 0.0},
- now)
- pt2, _ := metric.New(
- "test_point_2",
- tags,
- map[string]interface{}{"value": 1.0},
- now)
- var metrics = []telegraf.Metric{
- pt1,
- pt2,
- }
- require.NoError(t, pClient.Write(metrics))
-
- expected := []struct {
- name string
- value float64
- tags map[string]string
- }{
- {"test_point_1", 0.0, tags},
- {"test_point_2", 1.0, tags},
- }
-
- var acc testutil.Accumulator
-
- require.NoError(t, p.Gather(&acc))
- for _, e := range expected {
- acc.AssertContainsFields(t, e.name,
- map[string]interface{}{"value": e.value})
- }
-
- tags = make(map[string]string)
- tags["testtag"] = "testvalue"
- pt3, _ := metric.New(
- "test_point_3",
- tags,
- map[string]interface{}{"value": 0.0},
- now)
- pt4, _ := metric.New(
- "test_point_4",
- tags,
- map[string]interface{}{"value": 1.0},
- now)
- metrics = []telegraf.Metric{
- pt3,
- pt4,
- }
- require.NoError(t, pClient.Write(metrics))
-
- expected2 := []struct {
- name string
- value float64
- }{
- {"test_point_3", 0.0},
- {"test_point_4", 1.0},
- }
-
- require.NoError(t, p.Gather(&acc))
- for _, e := range expected2 {
- acc.AssertContainsFields(t, e.name,
- map[string]interface{}{"value": e.value})
- }
-}
-
-func setupPrometheus() (*PrometheusClient, *prometheus_input.Prometheus, error) {
- if pTesting == nil {
- pTesting = NewClient()
- pTesting.Listen = "localhost:9127"
- pTesting.Path = "/metrics"
- err := pTesting.Connect()
- if err != nil {
- return nil, nil, err
- }
- } else {
- pTesting.fam = make(map[string]*MetricFamily)
- }
-
- time.Sleep(time.Millisecond * 200)
-
- p := &prometheus_input.Prometheus{
- URLs: []string{"http://localhost:9127/metrics"},
- }
-
- return pTesting, p, nil
-}
diff --git a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go
deleted file mode 100644
index bcf6b43810684..0000000000000
--- a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package prometheus_client_test
-
-import (
- "crypto/tls"
- "fmt"
- "net/http"
- "testing"
-
- "github.com/influxdata/telegraf/plugins/outputs/prometheus_client"
- "github.com/influxdata/telegraf/testutil"
- "github.com/influxdata/toml"
- "github.com/stretchr/testify/require"
-)
-
-var pki = testutil.NewPKI("../../../testutil/pki")
-
-var configWithTLS = fmt.Sprintf(`
- listen = "127.0.0.1:0"
- tls_allowed_cacerts = ["%s"]
- tls_cert = "%s"
- tls_key = "%s"
-`, pki.TLSServerConfig().TLSAllowedCACerts[0], pki.TLSServerConfig().TLSCert, pki.TLSServerConfig().TLSKey)
-
-var configWithoutTLS = `
- listen = "127.0.0.1:0"
-`
-
-type PrometheusClientTestContext struct {
- Output *prometheus_client.PrometheusClient
- Accumulator *testutil.Accumulator
- Client *http.Client
-}
-
-func TestWorksWithoutTLS(t *testing.T) {
- tc := buildTestContext(t, []byte(configWithoutTLS))
- err := tc.Output.Connect()
- require.NoError(t, err)
- defer tc.Output.Close()
-
- response, err := tc.Client.Get(tc.Output.URL())
- require.NoError(t, err)
-
- require.NoError(t, err)
- require.Equal(t, response.StatusCode, http.StatusOK)
-}
-
-func TestWorksWithTLS(t *testing.T) {
- tc := buildTestContext(t, []byte(configWithTLS))
- err := tc.Output.Connect()
- require.NoError(t, err)
- defer tc.Output.Close()
-
- response, err := tc.Client.Get(tc.Output.URL())
- require.NoError(t, err)
-
- require.NoError(t, err)
- require.Equal(t, response.StatusCode, http.StatusOK)
-
- tr := &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- }
-
- client := &http.Client{Transport: tr}
- response, err = client.Get(tc.Output.URL())
-
- require.Error(t, err)
-}
-
-func buildTestContext(t *testing.T, config []byte) *PrometheusClientTestContext {
- output := prometheus_client.NewClient()
- err := toml.Unmarshal(config, output)
- require.NoError(t, err)
-
- var (
- httpClient *http.Client
- )
-
- if len(output.TLSAllowedCACerts) != 0 {
- httpClient = buildClientWithTLS(t, output)
- } else {
- httpClient = buildClientWithoutTLS()
- }
-
- return &PrometheusClientTestContext{
- Output: output,
- Accumulator: &testutil.Accumulator{},
- Client: httpClient,
- }
-}
-
-func buildClientWithoutTLS() *http.Client {
- return &http.Client{}
-}
-
-func buildClientWithTLS(t *testing.T, output *prometheus_client.PrometheusClient) *http.Client {
- tlsConfig, err := pki.TLSClientConfig().TLSConfig()
- require.NoError(t, err)
-
- transport := &http.Transport{TLSClientConfig: tlsConfig}
- return &http.Client{Transport: transport}
-}
diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go
new file mode 100644
index 0000000000000..adf18c9f0f076
--- /dev/null
+++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go
@@ -0,0 +1,402 @@
+package prometheus
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ inputs "github.com/influxdata/telegraf/plugins/inputs/prometheus"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMetricVersion1(t *testing.T) {
+ Logger := testutil.Logger{Name: "outputs.prometheus_client"}
+ tests := []struct {
+ name string
+ output *PrometheusClient
+ metrics []telegraf.Metric
+ expected []byte
+ }{
+ {
+ name: "simple",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 1,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "prometheus untyped",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 1,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu_time_idle",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "prometheus counter",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 1,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu_time_idle",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "counter": 42.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Counter,
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle counter
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "replace characters when using string as label",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 1,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ StringAsLabel: true,
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu_time_idle",
+ map[string]string{},
+ map[string]interface{}{
+ "host:name": "example.org",
+ "counter": 42.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Counter,
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle counter
+cpu_time_idle{host_name="example.org"} 42
+`),
+ },
+ {
+ name: "prometheus gauge",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 1,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu_time_idle",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "gauge": 42.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Gauge,
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle gauge
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "prometheus histogram",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 1,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "http_request_duration_seconds",
+ map[string]string{},
+ map[string]interface{}{
+ "sum": 53423,
+ "0.05": 24054,
+ "0.1": 33444,
+ "0.2": 100392,
+ "0.5": 129389,
+ "1": 133988,
+ "+Inf": 144320,
+ "count": 144320,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ },
+ expected: []byte(`
+# HELP http_request_duration_seconds Telegraf collected metric
+# TYPE http_request_duration_seconds histogram
+http_request_duration_seconds_bucket{le="0.05"} 24054
+http_request_duration_seconds_bucket{le="0.1"} 33444
+http_request_duration_seconds_bucket{le="0.2"} 100392
+http_request_duration_seconds_bucket{le="0.5"} 129389
+http_request_duration_seconds_bucket{le="1"} 133988
+http_request_duration_seconds_bucket{le="+Inf"} 144320
+http_request_duration_seconds_sum 53423
+http_request_duration_seconds_count 144320
+`),
+ },
+ {
+ name: "prometheus summary",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 1,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "rpc_duration_seconds",
+ map[string]string{},
+ map[string]interface{}{
+ "0.01": 3102,
+ "0.05": 3272,
+ "0.5": 4773,
+ "0.9": 9001,
+ "0.99": 76656,
+ "count": 2693,
+ "sum": 17560473,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ },
+ expected: []byte(`
+# HELP rpc_duration_seconds Telegraf collected metric
+# TYPE rpc_duration_seconds summary
+rpc_duration_seconds{quantile="0.01"} 3102
+rpc_duration_seconds{quantile="0.05"} 3272
+rpc_duration_seconds{quantile="0.5"} 4773
+rpc_duration_seconds{quantile="0.9"} 9001
+rpc_duration_seconds{quantile="0.99"} 76656
+rpc_duration_seconds_sum 1.7560473e+07
+rpc_duration_seconds_count 2693
+`),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.output.Init()
+ require.NoError(t, err)
+
+ err = tt.output.Connect()
+ require.NoError(t, err)
+
+ defer func() {
+ err := tt.output.Close()
+ require.NoError(t, err)
+ }()
+
+ err = tt.output.Write(tt.metrics)
+ require.NoError(t, err)
+
+ resp, err := http.Get(tt.output.URL())
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ require.Equal(t,
+ strings.TrimSpace(string(tt.expected)),
+ strings.TrimSpace(string(body)))
+ })
+ }
+}
+
+func TestRoundTripMetricVersion1(t *testing.T) {
+ Logger := testutil.Logger{Name: "outputs.prometheus_client"}
+ tests := []struct {
+ name string
+ data []byte
+ }{
+ {
+ name: "untyped",
+ data: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "counter",
+ data: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle counter
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "gauge",
+ data: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle gauge
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "multi",
+ data: []byte(`
+# HELP cpu_time_guest Telegraf collected metric
+# TYPE cpu_time_guest gauge
+cpu_time_guest{host="one.example.org"} 42
+cpu_time_guest{host="two.example.org"} 42
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle gauge
+cpu_time_idle{host="one.example.org"} 42
+cpu_time_idle{host="two.example.org"} 42
+`),
+ },
+ {
+ name: "histogram",
+ data: []byte(`
+# HELP http_request_duration_seconds Telegraf collected metric
+# TYPE http_request_duration_seconds histogram
+http_request_duration_seconds_bucket{le="0.05"} 24054
+http_request_duration_seconds_bucket{le="0.1"} 33444
+http_request_duration_seconds_bucket{le="0.2"} 100392
+http_request_duration_seconds_bucket{le="0.5"} 129389
+http_request_duration_seconds_bucket{le="1"} 133988
+http_request_duration_seconds_bucket{le="+Inf"} 144320
+http_request_duration_seconds_sum 53423
+http_request_duration_seconds_count 144320
+`),
+ },
+ {
+ name: "summary",
+ data: []byte(`
+# HELP rpc_duration_seconds Telegraf collected metric
+# TYPE rpc_duration_seconds summary
+rpc_duration_seconds{quantile="0.01"} 3102
+rpc_duration_seconds{quantile="0.05"} 3272
+rpc_duration_seconds{quantile="0.5"} 4773
+rpc_duration_seconds{quantile="0.9"} 9001
+rpc_duration_seconds{quantile="0.99"} 76656
+rpc_duration_seconds_sum 1.7560473e+07
+rpc_duration_seconds_count 2693
+`),
+ },
+ }
+
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ url := fmt.Sprintf("http://%s", ts.Listener.Addr())
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.Write(tt.data)
+ })
+
+ input := &inputs.Prometheus{
+ URLs: []string{url},
+ URLTag: "",
+ MetricVersion: 1,
+ }
+ var acc testutil.Accumulator
+ err := input.Start(&acc)
+ require.NoError(t, err)
+ err = input.Gather(&acc)
+ require.NoError(t, err)
+ input.Stop()
+
+ metrics := acc.GetTelegrafMetrics()
+
+ output := &PrometheusClient{
+ Listen: "127.0.0.1:0",
+ Path: defaultPath,
+ MetricVersion: 1,
+ Log: Logger,
+ CollectorsExclude: []string{"gocollector", "process"},
+ }
+ err = output.Init()
+ require.NoError(t, err)
+ err = output.Connect()
+ require.NoError(t, err)
+ defer func() {
+ err = output.Close()
+ require.NoError(t, err)
+ }()
+ err = output.Write(metrics)
+ require.NoError(t, err)
+
+ resp, err := http.Get(output.URL())
+ require.NoError(t, err)
+
+ actual, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ require.Equal(t,
+ strings.TrimSpace(string(tt.data)),
+ strings.TrimSpace(string(actual)))
+ })
+ }
+}
diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go
new file mode 100644
index 0000000000000..27be9103b28bd
--- /dev/null
+++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go
@@ -0,0 +1,463 @@
+package prometheus
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ inputs "github.com/influxdata/telegraf/plugins/inputs/prometheus"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMetricVersion2(t *testing.T) {
+ Logger := testutil.Logger{Name: "outputs.prometheus_client"}
+ tests := []struct {
+ name string
+ output *PrometheusClient
+ metrics []telegraf.Metric
+ expected []byte
+ }{
+ {
+ name: "untyped telegraf metric",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 2,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "summary no quantiles",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 2,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "rpc_duration_seconds_sum": 1.7560473e+07,
+ "rpc_duration_seconds_count": 2693,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ },
+ expected: []byte(`
+# HELP rpc_duration_seconds Telegraf collected metric
+# TYPE rpc_duration_seconds summary
+rpc_duration_seconds_sum 1.7560473e+07
+rpc_duration_seconds_count 2693
+`),
+ },
+ {
+ name: "when export timestamp is true timestamp is present in the metric",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 2,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ ExportTimestamp: true,
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42 0
+`),
+ },
+ {
+ name: "strings as labels",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 2,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ StringAsLabel: true,
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "host": "example.org",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "when strings as labels is false string fields are discarded",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 2,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ StringAsLabel: false,
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "host": "example.org",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle 42
+`),
+ },
+ {
+ name: "untype prometheus metric",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 2,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "cpu_time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "telegraf histogram",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 2,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu1",
+ },
+ map[string]interface{}{
+ "usage_idle_sum": 2000.0,
+ "usage_idle_count": 20.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu1",
+ "le": "0.0",
+ },
+ map[string]interface{}{
+ "usage_idle_bucket": 0.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu1",
+ "le": "50.0",
+ },
+ map[string]interface{}{
+ "usage_idle_bucket": 7.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu1",
+ "le": "100.0",
+ },
+ map[string]interface{}{
+ "usage_idle_bucket": 20.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu1",
+ "le": "+Inf",
+ },
+ map[string]interface{}{
+ "usage_idle_bucket": 20.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_usage_idle Telegraf collected metric
+# TYPE cpu_usage_idle histogram
+cpu_usage_idle_bucket{cpu="cpu1",le="0"} 0
+cpu_usage_idle_bucket{cpu="cpu1",le="50"} 7
+cpu_usage_idle_bucket{cpu="cpu1",le="100"} 20
+cpu_usage_idle_bucket{cpu="cpu1",le="+Inf"} 20
+cpu_usage_idle_sum{cpu="cpu1"} 2000
+cpu_usage_idle_count{cpu="cpu1"} 20
+`),
+ },
+ {
+ name: "histogram no buckets",
+ output: &PrometheusClient{
+ Listen: ":0",
+ MetricVersion: 2,
+ CollectorsExclude: []string{"gocollector", "process"},
+ Path: "/metrics",
+ Log: Logger,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu1",
+ },
+ map[string]interface{}{
+ "usage_idle_sum": 2000.0,
+ "usage_idle_count": 20.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_usage_idle Telegraf collected metric
+# TYPE cpu_usage_idle histogram
+cpu_usage_idle_bucket{cpu="cpu1",le="+Inf"} 20
+cpu_usage_idle_sum{cpu="cpu1"} 2000
+cpu_usage_idle_count{cpu="cpu1"} 20
+`),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.output.Init()
+ require.NoError(t, err)
+
+ err = tt.output.Connect()
+ require.NoError(t, err)
+
+ defer func() {
+ err := tt.output.Close()
+ require.NoError(t, err)
+ }()
+
+ err = tt.output.Write(tt.metrics)
+ require.NoError(t, err)
+
+ resp, err := http.Get(tt.output.URL())
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ require.Equal(t,
+ strings.TrimSpace(string(tt.expected)),
+ strings.TrimSpace(string(body)))
+ })
+ }
+}
+
+func TestRoundTripMetricVersion2(t *testing.T) {
+ Logger := testutil.Logger{Name: "outputs.prometheus_client"}
+ tests := []struct {
+ name string
+ data []byte
+ }{
+ {
+ name: "untyped",
+ data: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "counter",
+ data: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle counter
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "gauge",
+ data: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle gauge
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "multi",
+ data: []byte(`
+# HELP cpu_time_guest Telegraf collected metric
+# TYPE cpu_time_guest gauge
+cpu_time_guest{host="one.example.org"} 42
+cpu_time_guest{host="two.example.org"} 42
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle gauge
+cpu_time_idle{host="one.example.org"} 42
+cpu_time_idle{host="two.example.org"} 42
+`),
+ },
+ {
+ name: "histogram",
+ data: []byte(`
+# HELP http_request_duration_seconds Telegraf collected metric
+# TYPE http_request_duration_seconds histogram
+http_request_duration_seconds_bucket{le="0.05"} 24054
+http_request_duration_seconds_bucket{le="0.1"} 33444
+http_request_duration_seconds_bucket{le="0.2"} 100392
+http_request_duration_seconds_bucket{le="0.5"} 129389
+http_request_duration_seconds_bucket{le="1"} 133988
+http_request_duration_seconds_bucket{le="+Inf"} 144320
+http_request_duration_seconds_sum 53423
+http_request_duration_seconds_count 144320
+`),
+ },
+ {
+ name: "summary",
+ data: []byte(`
+# HELP rpc_duration_seconds Telegraf collected metric
+# TYPE rpc_duration_seconds summary
+rpc_duration_seconds{quantile="0.01"} 3102
+rpc_duration_seconds{quantile="0.05"} 3272
+rpc_duration_seconds{quantile="0.5"} 4773
+rpc_duration_seconds{quantile="0.9"} 9001
+rpc_duration_seconds{quantile="0.99"} 76656
+rpc_duration_seconds_sum 1.7560473e+07
+rpc_duration_seconds_count 2693
+`),
+ },
+ }
+
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ url := fmt.Sprintf("http://%s", ts.Listener.Addr())
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.Write(tt.data)
+ })
+
+ input := &inputs.Prometheus{
+ URLs: []string{url},
+ URLTag: "",
+ MetricVersion: 2,
+ }
+ var acc testutil.Accumulator
+ err := input.Start(&acc)
+ require.NoError(t, err)
+ err = input.Gather(&acc)
+ require.NoError(t, err)
+ input.Stop()
+
+ metrics := acc.GetTelegrafMetrics()
+
+ output := &PrometheusClient{
+ Listen: "127.0.0.1:0",
+ Path: defaultPath,
+ MetricVersion: 2,
+ Log: Logger,
+ CollectorsExclude: []string{"gocollector", "process"},
+ }
+ err = output.Init()
+ require.NoError(t, err)
+ err = output.Connect()
+ require.NoError(t, err)
+ defer func() {
+ err = output.Close()
+ require.NoError(t, err)
+ }()
+ err = output.Write(metrics)
+ require.NoError(t, err)
+
+ resp, err := http.Get(output.URL())
+ require.NoError(t, err)
+
+ actual, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ require.Equal(t,
+ strings.TrimSpace(string(tt.data)),
+ strings.TrimSpace(string(actual)))
+ })
+ }
+}
diff --git a/plugins/outputs/prometheus_client/v1/collector.go b/plugins/outputs/prometheus_client/v1/collector.go
new file mode 100644
index 0000000000000..7932bbc59f44d
--- /dev/null
+++ b/plugins/outputs/prometheus_client/v1/collector.go
@@ -0,0 +1,392 @@
+package v1
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_:]`)
+ validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`)
+)
+
+// SampleID uniquely identifies a Sample
+type SampleID string
+
+// Sample represents the current value of a series.
+type Sample struct {
+ // Labels are the Prometheus labels.
+ Labels map[string]string
+ // Value is the value in the Prometheus output. Only one of these will populated.
+ Value float64
+ HistogramValue map[float64]uint64
+ SummaryValue map[float64]float64
+ // Histograms and Summaries need a count and a sum
+ Count uint64
+ Sum float64
+ // Metric timestamp
+ Timestamp time.Time
+ // Expiration is the deadline that this Sample is valid until.
+ Expiration time.Time
+}
+
+// MetricFamily contains the data required to build valid prometheus Metrics.
+type MetricFamily struct {
+ // Samples are the Sample belonging to this MetricFamily.
+ Samples map[SampleID]*Sample
+ // Need the telegraf ValueType because there isn't a Prometheus ValueType
+ // representing Histogram or Summary
+ TelegrafValueType telegraf.ValueType
+ // LabelSet is the label counts for all Samples.
+ LabelSet map[string]int
+}
+
+type Collector struct {
+ ExpirationInterval time.Duration
+ StringAsLabel bool
+ ExportTimestamp bool
+ Log telegraf.Logger
+
+ sync.Mutex
+ fam map[string]*MetricFamily
+}
+
+func NewCollector(expire time.Duration, stringsAsLabel bool, logger telegraf.Logger) *Collector {
+ return &Collector{
+ ExpirationInterval: expire,
+ StringAsLabel: stringsAsLabel,
+ Log: logger,
+ fam: make(map[string]*MetricFamily),
+ }
+}
+
+func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
+ prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch)
+}
+
+func (c *Collector) Collect(ch chan<- prometheus.Metric) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.Expire(time.Now(), c.ExpirationInterval)
+
+ for name, family := range c.fam {
+ // Get list of all labels on MetricFamily
+ var labelNames []string
+ for k, v := range family.LabelSet {
+ if v > 0 {
+ labelNames = append(labelNames, k)
+ }
+ }
+ desc := prometheus.NewDesc(name, "Telegraf collected metric", labelNames, nil)
+
+ for _, sample := range family.Samples {
+ // Get labels for this sample; unset labels will be set to the
+ // empty string
+ var labels []string
+ for _, label := range labelNames {
+ v := sample.Labels[label]
+ labels = append(labels, v)
+ }
+
+ var metric prometheus.Metric
+ var err error
+ switch family.TelegrafValueType {
+ case telegraf.Summary:
+ metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...)
+ case telegraf.Histogram:
+ metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...)
+ default:
+ metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...)
+ }
+ if err != nil {
+ c.Log.Errorf("Error creating prometheus metric: "+
+ "key: %s, labels: %v, err: %v",
+ name, labels, err)
+ continue
+ }
+
+ if c.ExportTimestamp {
+ metric = prometheus.NewMetricWithTimestamp(sample.Timestamp, metric)
+ }
+ ch <- metric
+ }
+ }
+}
+
+func sanitize(value string) string {
+ return invalidNameCharRE.ReplaceAllString(value, "_")
+}
+
+func isValidTagName(tag string) bool {
+ return validNameCharRE.MatchString(tag)
+}
+
+func getPromValueType(tt telegraf.ValueType) prometheus.ValueType {
+ switch tt {
+ case telegraf.Counter:
+ return prometheus.CounterValue
+ case telegraf.Gauge:
+ return prometheus.GaugeValue
+ default:
+ return prometheus.UntypedValue
+ }
+}
+
+// CreateSampleID creates a SampleID based on the tags of a telegraf.Metric.
+func CreateSampleID(tags map[string]string) SampleID {
+ pairs := make([]string, 0, len(tags))
+ for k, v := range tags {
+ pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
+ }
+ sort.Strings(pairs)
+ return SampleID(strings.Join(pairs, ","))
+}
+
+func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) {
+
+ for k := range sample.Labels {
+ fam.LabelSet[k]++
+ }
+
+ fam.Samples[sampleID] = sample
+}
+
+func (c *Collector) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) {
+ var fam *MetricFamily
+ var ok bool
+ if fam, ok = c.fam[mname]; !ok {
+ fam = &MetricFamily{
+ Samples: make(map[SampleID]*Sample),
+ TelegrafValueType: point.Type(),
+ LabelSet: make(map[string]int),
+ }
+ c.fam[mname] = fam
+ }
+
+ addSample(fam, sample, sampleID)
+}
+
+// Sorted returns a copy of the metrics in time ascending order. A copy is
+// made to avoid modifying the input metric slice since doing so is not
+// allowed.
+func sorted(metrics []telegraf.Metric) []telegraf.Metric {
+ batch := make([]telegraf.Metric, 0, len(metrics))
+ for i := len(metrics) - 1; i >= 0; i-- {
+ batch = append(batch, metrics[i])
+ }
+ sort.Slice(batch, func(i, j int) bool {
+ return batch[i].Time().Before(batch[j].Time())
+ })
+ return batch
+}
+
+func (c *Collector) Add(metrics []telegraf.Metric) error {
+ c.Lock()
+ defer c.Unlock()
+
+ now := time.Now()
+
+ for _, point := range sorted(metrics) {
+ tags := point.Tags()
+ sampleID := CreateSampleID(tags)
+
+ labels := make(map[string]string)
+ for k, v := range tags {
+ name, ok := serializer.SanitizeLabelName(k)
+ if !ok {
+ continue
+ }
+ labels[name] = v
+ }
+
+ // Prometheus doesn't have a string value type, so convert string
+ // fields to labels if enabled.
+ if c.StringAsLabel {
+ for fn, fv := range point.Fields() {
+ switch fv := fv.(type) {
+ case string:
+ name, ok := serializer.SanitizeLabelName(fn)
+ if !ok {
+ continue
+ }
+ labels[name] = fv
+ }
+ }
+ }
+
+ switch point.Type() {
+ case telegraf.Summary:
+ var mname string
+ var sum float64
+ var count uint64
+ summaryvalue := make(map[float64]float64)
+ for fn, fv := range point.Fields() {
+ var value float64
+ switch fv := fv.(type) {
+ case int64:
+ value = float64(fv)
+ case uint64:
+ value = float64(fv)
+ case float64:
+ value = fv
+ default:
+ continue
+ }
+
+ switch fn {
+ case "sum":
+ sum = value
+ case "count":
+ count = uint64(value)
+ default:
+ limit, err := strconv.ParseFloat(fn, 64)
+ if err == nil {
+ summaryvalue[limit] = value
+ }
+ }
+ }
+ sample := &Sample{
+ Labels: labels,
+ SummaryValue: summaryvalue,
+ Count: count,
+ Sum: sum,
+ Timestamp: point.Time(),
+ Expiration: now.Add(c.ExpirationInterval),
+ }
+ mname = sanitize(point.Name())
+
+ if !isValidTagName(mname) {
+ continue
+ }
+
+ c.addMetricFamily(point, sample, mname, sampleID)
+
+ case telegraf.Histogram:
+ var mname string
+ var sum float64
+ var count uint64
+ histogramvalue := make(map[float64]uint64)
+ for fn, fv := range point.Fields() {
+ var value float64
+ switch fv := fv.(type) {
+ case int64:
+ value = float64(fv)
+ case uint64:
+ value = float64(fv)
+ case float64:
+ value = fv
+ default:
+ continue
+ }
+
+ switch fn {
+ case "sum":
+ sum = value
+ case "count":
+ count = uint64(value)
+ default:
+ limit, err := strconv.ParseFloat(fn, 64)
+ if err == nil {
+ histogramvalue[limit] = uint64(value)
+ }
+ }
+ }
+ sample := &Sample{
+ Labels: labels,
+ HistogramValue: histogramvalue,
+ Count: count,
+ Sum: sum,
+ Timestamp: point.Time(),
+ Expiration: now.Add(c.ExpirationInterval),
+ }
+ mname = sanitize(point.Name())
+
+ if !isValidTagName(mname) {
+ continue
+ }
+
+ c.addMetricFamily(point, sample, mname, sampleID)
+
+ default:
+ for fn, fv := range point.Fields() {
+ // Ignore string and bool fields.
+ var value float64
+ switch fv := fv.(type) {
+ case int64:
+ value = float64(fv)
+ case uint64:
+ value = float64(fv)
+ case float64:
+ value = fv
+ default:
+ continue
+ }
+
+ sample := &Sample{
+ Labels: labels,
+ Value: value,
+ Timestamp: point.Time(),
+ Expiration: now.Add(c.ExpirationInterval),
+ }
+
+ // Special handling of value field; supports passthrough from
+ // the prometheus input.
+ var mname string
+ switch point.Type() {
+ case telegraf.Counter:
+ if fn == "counter" {
+ mname = sanitize(point.Name())
+ }
+ case telegraf.Gauge:
+ if fn == "gauge" {
+ mname = sanitize(point.Name())
+ }
+ }
+ if mname == "" {
+ if fn == "value" {
+ mname = sanitize(point.Name())
+ } else {
+ mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn))
+ }
+ }
+ if !isValidTagName(mname) {
+ continue
+ }
+ c.addMetricFamily(point, sample, mname, sampleID)
+
+ }
+ }
+ }
+ return nil
+}
+
+func (c *Collector) Expire(now time.Time, age time.Duration) {
+ if age == 0 {
+ return
+ }
+
+ for name, family := range c.fam {
+ for key, sample := range family.Samples {
+ if age != 0 && now.After(sample.Expiration) {
+ for k := range sample.Labels {
+ family.LabelSet[k]--
+ }
+ delete(family.Samples, key)
+
+ if len(family.Samples) == 0 {
+ delete(c.fam, name)
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go
new file mode 100644
index 0000000000000..b28a4deab1cc9
--- /dev/null
+++ b/plugins/outputs/prometheus_client/v2/collector.go
@@ -0,0 +1,101 @@
+package v2
+
+import (
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus"
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+type Metric struct {
+ family *dto.MetricFamily
+ metric *dto.Metric
+}
+
+func (m *Metric) Desc() *prometheus.Desc {
+ labelNames := make([]string, 0, len(m.metric.Label))
+ for _, label := range m.metric.Label {
+ labelNames = append(labelNames, *label.Name)
+ }
+
+ desc := prometheus.NewDesc(*m.family.Name, *m.family.Help, labelNames, nil)
+
+ return desc
+}
+
+func (m *Metric) Write(out *dto.Metric) error {
+ out.Label = m.metric.Label
+ out.Counter = m.metric.Counter
+ out.Untyped = m.metric.Untyped
+ out.Gauge = m.metric.Gauge
+ out.Histogram = m.metric.Histogram
+ out.Summary = m.metric.Summary
+ out.TimestampMs = m.metric.TimestampMs
+ return nil
+}
+
+type Collector struct {
+ sync.Mutex
+ expireDuration time.Duration
+ coll *serializer.Collection
+}
+
+func NewCollector(expire time.Duration, stringsAsLabel bool, exportTimestamp bool) *Collector {
+ config := serializer.FormatConfig{}
+ if stringsAsLabel {
+ config.StringHandling = serializer.StringAsLabel
+ }
+
+ if exportTimestamp {
+ config.TimestampExport = serializer.ExportTimestamp
+ }
+
+ return &Collector{
+ expireDuration: expire,
+ coll: serializer.NewCollection(config),
+ }
+}
+
+func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
+ // Sending no descriptor at all marks the Collector as "unchecked",
+ // i.e. no checks will be performed at registration time, and the
+ // Collector may yield any Metric it sees fit in its Collect method.
+ return
+}
+
+func (c *Collector) Collect(ch chan<- prometheus.Metric) {
+ c.Lock()
+ defer c.Unlock()
+
+ // Expire metrics, doing this on Collect ensure metrics are removed even if no
+ // new metrics are added to the output.
+ if c.expireDuration != 0 {
+ c.coll.Expire(time.Now(), c.expireDuration)
+ }
+
+ for _, family := range c.coll.GetProto() {
+ for _, metric := range family.Metric {
+ ch <- &Metric{family: family, metric: metric}
+ }
+ }
+}
+
+func (c *Collector) Add(metrics []telegraf.Metric) error {
+ c.Lock()
+ defer c.Unlock()
+
+ for _, metric := range metrics {
+ c.coll.Add(metric, time.Now())
+ }
+
+ // Expire metrics, doing this on Add ensure metrics are removed even if no
+ // one is querying the data.
+ if c.expireDuration != 0 {
+ c.coll.Expire(time.Now(), c.expireDuration)
+ }
+
+ return nil
+}
diff --git a/plugins/outputs/socket_writer/README.md b/plugins/outputs/socket_writer/README.md
index 149cda2a6c543..5dc9d02464be7 100644
--- a/plugins/outputs/socket_writer/README.md
+++ b/plugins/outputs/socket_writer/README.md
@@ -32,6 +32,11 @@ It can output data in any of the [supported output formats](https://github.com/i
## Defaults to the OS configuration.
# keep_alive_period = "5m"
+ ## Content encoding for message payloads, can be set to "gzip" or to
+ ## "identity" to apply no encoding.
+ ##
+ # content_encoding = "identity"
+
## Data format to generate.
## Each data format has its own unique set of configuration options, read
## more about them here:
diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go
index 8b0f56accbdea..dae7edc0e806f 100644
--- a/plugins/outputs/socket_writer/socket_writer.go
+++ b/plugins/outputs/socket_writer/socket_writer.go
@@ -1,27 +1,29 @@
package socket_writer
import (
+ "crypto/tls"
"fmt"
"log"
"net"
"strings"
- "crypto/tls"
-
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
- tlsint "github.com/influxdata/telegraf/internal/tls"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers"
)
type SocketWriter struct {
+ ContentEncoding string `toml:"content_encoding"`
Address string
KeepAlivePeriod *internal.Duration
tlsint.ClientConfig
serializers.Serializer
+ encoder internal.ContentEncoder
+
net.Conn
}
@@ -56,6 +58,11 @@ func (sw *SocketWriter) SampleConfig() string {
## Defaults to the OS configuration.
# keep_alive_period = "5m"
+ ## Content encoding for packet-based connections (i.e. UDP, unixgram).
+ ## Can be set to "gzip" or to "identity" to apply no encoding.
+ ##
+ # content_encoding = "identity"
+
## Data format to generate.
## Each data format has its own unique set of configuration options, read
## more about them here:
@@ -92,6 +99,11 @@ func (sw *SocketWriter) Connect() error {
if err := sw.setKeepAlive(c); err != nil {
log.Printf("unable to configure keep alive (%s): %s", sw.Address, err)
}
+ //set encoder
+ sw.encoder, err = internal.NewContentEncoder(sw.ContentEncoding)
+ if err != nil {
+ return err
+ }
sw.Conn = c
return nil
@@ -128,9 +140,16 @@ func (sw *SocketWriter) Write(metrics []telegraf.Metric) error {
for _, m := range metrics {
bs, err := sw.Serialize(m)
if err != nil {
- //TODO log & keep going with remaining metrics
- return err
+ log.Printf("D! [outputs.socket_writer] Could not serialize metric: %v", err)
+ continue
+ }
+
+ bs, err = sw.encoder.Encode(bs)
+ if err != nil {
+ log.Printf("D! [outputs.socket_writer] Could not encode metric: %v", err)
+ continue
}
+
if _, err := sw.Conn.Write(bs); err != nil {
//TODO log & keep going with remaining strings
if err, ok := err.(net.Error); !ok || !err.Temporary() {
diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go
index f7eb159ea6fc2..14b25e6c570ff 100644
--- a/plugins/outputs/socket_writer/socket_writer_test.go
+++ b/plugins/outputs/socket_writer/socket_writer_test.go
@@ -2,7 +2,6 @@ package socket_writer
import (
"bufio"
- "bytes"
"io/ioutil"
"net"
"os"
@@ -88,8 +87,10 @@ func testSocketWriter_stream(t *testing.T, sw *SocketWriter, lconn net.Conn) {
metrics := []telegraf.Metric{}
metrics = append(metrics, testutil.TestMetric(1, "test"))
mbs1out, _ := sw.Serialize(metrics[0])
+ mbs1out, _ = sw.encoder.Encode(mbs1out)
metrics = append(metrics, testutil.TestMetric(2, "test"))
mbs2out, _ := sw.Serialize(metrics[1])
+ mbs2out, _ = sw.encoder.Encode(mbs2out)
err := sw.Write(metrics)
require.NoError(t, err)
@@ -108,8 +109,12 @@ func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketCon
metrics := []telegraf.Metric{}
metrics = append(metrics, testutil.TestMetric(1, "test"))
mbs1out, _ := sw.Serialize(metrics[0])
+ mbs1out, _ = sw.encoder.Encode(mbs1out)
+ mbs1str := string(mbs1out)
metrics = append(metrics, testutil.TestMetric(2, "test"))
mbs2out, _ := sw.Serialize(metrics[1])
+ mbs2out, _ = sw.encoder.Encode(mbs2out)
+ mbs2str := string(mbs2out)
err := sw.Write(metrics)
require.NoError(t, err)
@@ -119,17 +124,12 @@ func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketCon
for len(mstrins) < 2 {
n, _, err := lconn.ReadFrom(buf)
require.NoError(t, err)
- for _, bs := range bytes.Split(buf[:n], []byte{'\n'}) {
- if len(bs) == 0 {
- continue
- }
- mstrins = append(mstrins, string(bs)+"\n")
- }
+ mstrins = append(mstrins, string(buf[:n]))
}
require.Len(t, mstrins, 2)
- assert.Equal(t, string(mbs1out), mstrins[0])
- assert.Equal(t, string(mbs2out), mstrins[1])
+ assert.Equal(t, mbs1str, mstrins[0])
+ assert.Equal(t, mbs2str, mstrins[1])
}
func TestSocketWriter_Write_err(t *testing.T) {
@@ -195,3 +195,17 @@ func TestSocketWriter_Write_reconnect(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, string(mbsout), string(buf[:n]))
}
+
+func TestSocketWriter_udp_gzip(t *testing.T) {
+ listener, err := net.ListenPacket("udp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ sw := newSocketWriter()
+ sw.Address = "udp://" + listener.LocalAddr().String()
+ sw.ContentEncoding = "gzip"
+
+ err = sw.Connect()
+ require.NoError(t, err)
+
+ testSocketWriter_packet(t, sw, listener)
+}
diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md
index cdf0a1591ac58..27ef3a09f6f6c 100644
--- a/plugins/outputs/stackdriver/README.md
+++ b/plugins/outputs/stackdriver/README.md
@@ -1,7 +1,11 @@
-# Stackdriver Output Plugin
+# Stackdriver Google Cloud Monitoring Output Plugin
-This plugin writes to the [Google Cloud Stackdriver API](https://cloud.google.com/monitoring/api/v3/)
-and requires [authentication](https://cloud.google.com/docs/authentication/getting-started) with Google Cloud using either a service account or user credentials. See the [Stackdriver documentation](https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services) for details on pricing.
+This plugin writes to the [Google Cloud Monitoring API][stackdriver] (formerly
+Stackdriver) and requires [authentication][] with Google Cloud using either a
+service account or user credentials
+
+This plugin accesses APIs which are [chargeable][pricing]; you might incur
+costs.
Requires `project` to specify where Stackdriver metrics will be delivered to.
@@ -24,7 +28,7 @@ Additional resource labels can be configured by `resource_labels`. By default th
## Custom resource type
# resource_type = "generic_node"
- ## Additonal resource labels
+ ## Additional resource labels
# [outputs.stackdriver.resource_labels]
# node_id = "$HOSTNAME"
# namespace = "myapp"
@@ -47,3 +51,6 @@ aggregated before then can be written. Consider using the [basicstats][]
aggregator to do this.
[basicstats]: /plugins/aggregators/basicstats/README.md
+[stackdriver]: https://cloud.google.com/monitoring/api/v3/
+[authentication]: https://cloud.google.com/docs/authentication/getting-started
+[pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services
diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go
index 572cdb4c7cdc4..3bd38614b985e 100644
--- a/plugins/outputs/stackdriver/stackdriver.go
+++ b/plugins/outputs/stackdriver/stackdriver.go
@@ -12,7 +12,9 @@ import (
monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package.
googlepb "github.com/golang/protobuf/ptypes/timestamp"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/outputs"
+ "google.golang.org/api/option"
metricpb "google.golang.org/genproto/googleapis/api/metric"
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
@@ -59,7 +61,7 @@ var sampleConfig = `
## Custom resource type
# resource_type = "generic_node"
- ## Additonal resource labels
+ ## Additional resource labels
# [outputs.stackdriver.resource_labels]
# node_id = "$HOSTNAME"
# namespace = "myapp"
@@ -88,7 +90,7 @@ func (s *Stackdriver) Connect() error {
if s.client == nil {
ctx := context.Background()
- client, err := monitoring.NewMetricClient(ctx)
+ client, err := monitoring.NewMetricClient(ctx, option.WithUserAgent(internal.ProductToken()))
if err != nil {
return err
}
diff --git a/plugins/outputs/sumologic/README.md b/plugins/outputs/sumologic/README.md
new file mode 100644
index 0000000000000..165315121f434
--- /dev/null
+++ b/plugins/outputs/sumologic/README.md
@@ -0,0 +1,70 @@
+# Sumo Logic Output Plugin
+
+This plugin sends metrics to [Sumo Logic HTTP Source](https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source)
+in HTTP messages, encoded using one of the output data formats.
+
+Currently metrics can be sent using one of the following data formats, supported
+by Sumologic HTTP Source:
+
+ * `graphite` - for Content-Type of `application/vnd.sumologic.graphite`
+ * `carbon2` - for Content-Type of `application/vnd.sumologic.carbon2`
+ * `prometheus` - for Content-Type of `application/vnd.sumologic.prometheus`
+
+### Configuration:
+
+```toml
+# A plugin that can send metrics to Sumo Logic HTTP metric collector.
+[[outputs.sumologic]]
+ ## Unique URL generated for your HTTP Metrics Source.
+ ## This is the address to send metrics to.
+ # url = "https://events.sumologic.net/receiver/v1/http/"
+
+ ## Data format to be used for sending metrics.
+ ## This will set the "Content-Type" header accordingly.
+ ## Currently supported formats:
+ ## * graphite - for Content-Type of application/vnd.sumologic.graphite
+ ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2
+ ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus
+ ##
+ ## More information can be found at:
+ ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics
+ ##
+ ## NOTE:
+ ## When unset, telegraf will by default use the influx serializer which is currently unsupported
+ ## in HTTP Source.
+ data_format = "carbon2"
+
+ ## Timeout used for HTTP request
+ # timeout = "5s"
+
+ ## HTTP method, one of: "POST" or "PUT". "POST" is used by default if unset.
+ # method = "POST"
+
+ ## Max HTTP request body size in bytes before compression (if applied).
+ ## By default 1MB is recommended.
+ ## NOTE:
+ ## Bear in mind that in some serializer a metric even though serialized to multiple
+ ## lines cannot be split any further so setting this very low might not work
+ ## as expected.
+ # max_request_body_size = 1_000_000
+
+ ## Additional, Sumo specific options.
+ ## Full list can be found here:
+ ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers
+
+ ## Desired source name.
+ ## Useful if you want to override the source name configured for the source.
+ # source_name = ""
+
+ ## Desired host name.
+ ## Useful if you want to override the source host configured for the source.
+ # source_host = ""
+
+ ## Desired source category.
+ ## Useful if you want to override the source category configured for the source.
+ # source_category = ""
+
+ ## Comma-separated key=value list of dimensions to apply to every metric.
+ ## Custom dimensions will allow you to query your metrics at a more granular level.
+ # dimensions = ""
+```
diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go
new file mode 100644
index 0000000000000..aca0fb56ae1d2
--- /dev/null
+++ b/plugins/outputs/sumologic/sumologic.go
@@ -0,0 +1,306 @@
+package sumologic
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/outputs"
+ "github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/influxdata/telegraf/plugins/serializers/carbon2"
+ "github.com/influxdata/telegraf/plugins/serializers/graphite"
+ "github.com/influxdata/telegraf/plugins/serializers/prometheus"
+)
+
+const (
+ sampleConfig = `
+ ## Unique URL generated for your HTTP Metrics Source.
+ ## This is the address to send metrics to.
+ # url = "https://events.sumologic.net/receiver/v1/http/"
+
+ ## Data format to be used for sending metrics.
+ ## This will set the "Content-Type" header accordingly.
+ ## Currently supported formats:
+ ## * graphite - for Content-Type of application/vnd.sumologic.graphite
+ ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2
+ ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus
+ ##
+ ## More information can be found at:
+ ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics
+ ##
+ ## NOTE:
+ ## When unset, telegraf will by default use the influx serializer which is currently unsupported
+ ## in HTTP Source.
+ data_format = "carbon2"
+
+ ## Timeout used for HTTP request
+ # timeout = "5s"
+
+ ## HTTP method, one of: "POST" or "PUT". "POST" is used by default if unset.
+ # method = "POST"
+
+ ## Max HTTP request body size in bytes before compression (if applied).
+ ## By default 1MB is recommended.
+ ## NOTE:
+ ## Bear in mind that in some serializer a metric even though serialized to multiple
+ ## lines cannot be split any further so setting this very low might not work
+ ## as expected.
+ # max_request_body_size = 1_000_000
+
+ ## Additional, Sumo specific options.
+ ## Full list can be found here:
+ ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers
+
+ ## Desired source name.
+ ## Useful if you want to override the source name configured for the source.
+ # source_name = ""
+
+ ## Desired host name.
+ ## Useful if you want to override the source host configured for the source.
+ # source_host = ""
+
+ ## Desired source category.
+ ## Useful if you want to override the source category configured for the source.
+ # source_category = ""
+
+ ## Comma-separated key=value list of dimensions to apply to every metric.
+ ## Custom dimensions will allow you to query your metrics at a more granular level.
+ # dimensions = ""
+`
+
+ defaultClientTimeout = 5 * time.Second
+ defaultMethod = http.MethodPost
+ defaultMaxRequestBodySize = 1_000_000
+
+ contentTypeHeader = "Content-Type"
+ carbon2ContentType = "application/vnd.sumologic.carbon2"
+ graphiteContentType = "application/vnd.sumologic.graphite"
+ prometheusContentType = "application/vnd.sumologic.prometheus"
+)
+
+type header string
+
+const (
+ sourceNameHeader header = `X-Sumo-Name`
+ sourceHostHeader header = `X-Sumo-Host`
+ sourceCategoryHeader header = `X-Sumo-Category`
+ dimensionsHeader header = `X-Sumo-Dimensions`
+)
+
+type SumoLogic struct {
+ URL string `toml:"url"`
+ Timeout internal.Duration `toml:"timeout"`
+ Method string `toml:"method"`
+ MaxRequstBodySize config.Size `toml:"max_request_body_size"`
+
+ SourceName string `toml:"source_name"`
+ SourceHost string `toml:"source_host"`
+ SourceCategory string `toml:"source_category"`
+ Dimensions string `toml:"dimensions"`
+
+ client *http.Client
+ serializer serializers.Serializer
+
+ err error
+ headers map[string]string
+}
+
+func (s *SumoLogic) SetSerializer(serializer serializers.Serializer) {
+ if s.headers == nil {
+ s.headers = make(map[string]string)
+ }
+
+ switch serializer.(type) {
+ case *carbon2.Serializer:
+ s.headers[contentTypeHeader] = carbon2ContentType
+ case *graphite.GraphiteSerializer:
+ s.headers[contentTypeHeader] = graphiteContentType
+ case *prometheus.Serializer:
+ s.headers[contentTypeHeader] = prometheusContentType
+
+ default:
+ s.err = errors.Errorf("unsupported serializer %T", serializer)
+ }
+
+ s.serializer = serializer
+}
+
+func (s *SumoLogic) createClient(ctx context.Context) (*http.Client, error) {
+ return &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ },
+ Timeout: s.Timeout.Duration,
+ }, nil
+}
+
+func (s *SumoLogic) Connect() error {
+ if s.err != nil {
+ return errors.Wrap(s.err, "sumologic: incorrect configuration")
+ }
+
+ if s.Method == "" {
+ s.Method = defaultMethod
+ }
+ s.Method = strings.ToUpper(s.Method)
+ if s.Method != http.MethodPost && s.Method != http.MethodPut {
+ return fmt.Errorf("invalid method [%s] %s", s.URL, s.Method)
+ }
+
+ if s.Timeout.Duration == 0 {
+ s.Timeout.Duration = defaultClientTimeout
+ }
+
+ client, err := s.createClient(context.Background())
+ if err != nil {
+ return err
+ }
+
+ s.client = client
+
+ return nil
+}
+
+func (s *SumoLogic) Close() error {
+ return s.err
+}
+
+func (s *SumoLogic) Description() string {
+ return "A plugin that can transmit metrics to Sumo Logic HTTP Source"
+}
+
+func (s *SumoLogic) SampleConfig() string {
+ return sampleConfig
+}
+
+func (s *SumoLogic) Write(metrics []telegraf.Metric) error {
+ if s.err != nil {
+ return errors.Wrap(s.err, "sumologic: incorrect configuration")
+ }
+ if s.serializer == nil {
+ return errors.New("sumologic: serializer unset")
+ }
+
+ reqBody, err := s.serializer.SerializeBatch(metrics)
+ if err != nil {
+ return err
+ }
+
+ if l := len(reqBody); l > int(s.MaxRequstBodySize) {
+ var (
+ // Do the rounded up integer division
+ numChunks = (l + int(s.MaxRequstBodySize) - 1) / int(s.MaxRequstBodySize)
+ chunks = make([][]byte, 0, numChunks)
+ numMetrics = len(metrics)
+ // Do the rounded up integer division
+ stepMetrics = (numMetrics + numChunks - 1) / numChunks
+ )
+
+ for i := 0; i < numMetrics; i += stepMetrics {
+ boundary := i + stepMetrics
+ if boundary > numMetrics {
+ boundary = numMetrics - 1
+ }
+
+ chunkBody, err := s.serializer.SerializeBatch(metrics[i:boundary])
+ if err != nil {
+ return err
+ }
+ chunks = append(chunks, chunkBody)
+ }
+
+ return s.writeRequestChunks(chunks)
+ }
+
+ return s.write(reqBody)
+}
+
+func (s *SumoLogic) writeRequestChunks(chunks [][]byte) error {
+ for _, reqChunk := range chunks {
+ if err := s.write(reqChunk); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *SumoLogic) write(reqBody []byte) error {
+ var (
+ err error
+ buff bytes.Buffer
+ gz = gzip.NewWriter(&buff)
+ )
+
+ if _, err = gz.Write(reqBody); err != nil {
+ return err
+ }
+
+ if err = gz.Close(); err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest(s.Method, s.URL, &buff)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Content-Encoding", "gzip")
+ req.Header.Set("User-Agent", internal.ProductToken())
+
+ // Set headers coming from the configuration.
+ for k, v := range s.headers {
+ req.Header.Set(k, v)
+ }
+
+ setHeaderIfSetInConfig(req, sourceNameHeader, s.SourceName)
+ setHeaderIfSetInConfig(req, sourceHostHeader, s.SourceHost)
+ setHeaderIfSetInConfig(req, sourceCategoryHeader, s.SourceCategory)
+ setHeaderIfSetInConfig(req, dimensionsHeader, s.Dimensions)
+
+ resp, err := s.client.Do(req)
+ if err != nil {
+ return errors.Wrapf(err, "sumologic: failed sending request to [%s]", s.URL)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ return errors.Errorf(
+ "sumologic: when writing to [%s] received status code: %d",
+ s.URL, resp.StatusCode,
+ )
+ }
+
+ return nil
+}
+
+func setHeaderIfSetInConfig(r *http.Request, h header, value string) {
+ if value != "" {
+ r.Header.Set(string(h), value)
+ }
+}
+
+func Default() *SumoLogic {
+ return &SumoLogic{
+ Timeout: internal.Duration{
+ Duration: defaultClientTimeout,
+ },
+ Method: defaultMethod,
+ MaxRequstBodySize: defaultMaxRequestBodySize,
+ headers: make(map[string]string),
+ }
+}
+
+func init() {
+ outputs.Add("sumologic", func() telegraf.Output {
+ return Default()
+ })
+}
diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go
new file mode 100644
index 0000000000000..23db47c5b5c88
--- /dev/null
+++ b/plugins/outputs/sumologic/sumologic_test.go
@@ -0,0 +1,576 @@
+package sumologic
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/influxdata/telegraf/plugins/serializers/carbon2"
+ "github.com/influxdata/telegraf/plugins/serializers/graphite"
+ "github.com/influxdata/telegraf/plugins/serializers/prometheus"
+)
+
+func getMetric(t *testing.T) telegraf.Metric {
+ m, err := metric.New(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(0, 0),
+ )
+ require.NoError(t, err)
+ return m
+}
+
+func getMetrics(t *testing.T) []telegraf.Metric {
+ const count = 10
+ var metrics = make([]telegraf.Metric, count)
+
+ for i := 0; i < count; i++ {
+ m, err := metric.New(
+ fmt.Sprintf("cpu-%d", i),
+ map[string]string{
+ "ec2_instance": "aws-129038123",
+ "image": "aws-ami-1234567890",
+ },
+ map[string]interface{}{
+ "idle": 5876876,
+ "steal": 5876876,
+ "system": 5876876,
+ "user": 5876876,
+ "temp": 70.0,
+ },
+ time.Unix(0, 0),
+ )
+ require.NoError(t, err)
+ metrics[i] = m
+ }
+ return metrics
+}
+
+func TestInvalidMethod(t *testing.T) {
+ plugin := &SumoLogic{
+ URL: "",
+ Method: http.MethodGet,
+ }
+
+ err := plugin.Connect()
+ require.Error(t, err)
+}
+
+func TestMethod(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ plugin func() *SumoLogic
+ expectedMethod string
+ connectError bool
+ }{
+ {
+ name: "default method is POST",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ return s
+ },
+ expectedMethod: http.MethodPost,
+ },
+ {
+ name: "put is okay",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.Method = http.MethodPut
+ return s
+ },
+ expectedMethod: http.MethodPut,
+ },
+ {
+ name: "get is invalid",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.Method = http.MethodGet
+ return s
+ },
+ connectError: true,
+ },
+ {
+ name: "method is case insensitive",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.Method = "poST"
+ return s
+ },
+ expectedMethod: http.MethodPost,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, tt.expectedMethod, r.Method)
+ w.WriteHeader(http.StatusOK)
+ })
+
+ serializer, err := carbon2.NewSerializer(carbon2.Carbon2FormatFieldSeparate)
+ require.NoError(t, err)
+
+ plugin := tt.plugin()
+ plugin.SetSerializer(serializer)
+ err = plugin.Connect()
+ if tt.connectError {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+
+ err = plugin.Write([]telegraf.Metric{getMetric(t)})
+ require.NoError(t, err)
+ })
+ }
+}
+
+func TestStatusCode(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ pluginFn := func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ return s
+ }
+
+ tests := []struct {
+ name string
+ plugin *SumoLogic
+ statusCode int
+ errFunc func(t *testing.T, err error)
+ }{
+ {
+ name: "success",
+ plugin: pluginFn(),
+ statusCode: http.StatusOK,
+ errFunc: func(t *testing.T, err error) {
+ require.NoError(t, err)
+ },
+ },
+ {
+ name: "1xx status is an error",
+ plugin: pluginFn(),
+ statusCode: http.StatusSwitchingProtocols,
+ errFunc: func(t *testing.T, err error) {
+ require.Error(t, err)
+ },
+ },
+ {
+ name: "3xx status is an error",
+ plugin: pluginFn(),
+ statusCode: http.StatusMultipleChoices,
+ errFunc: func(t *testing.T, err error) {
+ require.Error(t, err)
+ },
+ },
+ {
+ name: "4xx status is an error",
+ plugin: pluginFn(),
+ statusCode: http.StatusBadRequest,
+ errFunc: func(t *testing.T, err error) {
+ require.Error(t, err)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(tt.statusCode)
+ })
+
+ serializer, err := carbon2.NewSerializer(carbon2.Carbon2FormatFieldSeparate)
+ require.NoError(t, err)
+
+ tt.plugin.SetSerializer(serializer)
+ err = tt.plugin.Connect()
+ require.NoError(t, err)
+
+ err = tt.plugin.Write([]telegraf.Metric{getMetric(t)})
+ tt.errFunc(t, err)
+ })
+ }
+}
+
+func TestContentType(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ })
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ carbon2Serializer, err := carbon2.NewSerializer(carbon2.Carbon2FormatFieldSeparate)
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ plugin func() *SumoLogic
+ expectedErr bool
+ serializer serializers.Serializer
+ }{
+ {
+ name: "carbon2 is supported",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.headers = map[string]string{
+ contentTypeHeader: carbon2ContentType,
+ }
+ return s
+ },
+ serializer: carbon2Serializer,
+ expectedErr: false,
+ },
+ {
+ name: "graphite is supported",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.headers = map[string]string{
+ contentTypeHeader: graphiteContentType,
+ }
+ return s
+ },
+ serializer: &graphite.GraphiteSerializer{},
+ expectedErr: false,
+ },
+ {
+ name: "prometheus is supported",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.headers = map[string]string{
+ contentTypeHeader: prometheusContentType,
+ }
+ return s
+ },
+ serializer: &prometheus.Serializer{},
+ expectedErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ plugin := tt.plugin()
+
+ plugin.SetSerializer(tt.serializer)
+
+ err := plugin.Connect()
+ require.NoError(t, err)
+
+ err = plugin.Write([]telegraf.Metric{getMetric(t)})
+ require.NoError(t, err)
+ })
+ }
+}
+
+func TestContentEncodingGzip(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ plugin func() *SumoLogic
+ }{
+ {
+ name: "default content_encoding=gzip works",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ return s
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, "gzip", r.Header.Get("Content-Encoding"))
+
+ body, err := gzip.NewReader(r.Body)
+ require.NoError(t, err)
+
+ payload, err := ioutil.ReadAll(body)
+ require.NoError(t, err)
+
+ assert.Equal(t, string(payload), "metric=cpu field=value 42 0\n")
+
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ serializer, err := carbon2.NewSerializer(carbon2.Carbon2FormatFieldSeparate)
+ require.NoError(t, err)
+
+ plugin := tt.plugin()
+
+ plugin.SetSerializer(serializer)
+ err = plugin.Connect()
+ require.NoError(t, err)
+
+ err = plugin.Write([]telegraf.Metric{getMetric(t)})
+ require.NoError(t, err)
+ })
+ }
+}
+
+type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request)
+
+func TestDefaultUserAgent(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ t.Run("default-user-agent", func(t *testing.T) {
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent"))
+ w.WriteHeader(http.StatusOK)
+ })
+
+ plugin := &SumoLogic{
+ URL: u.String(),
+ Method: defaultMethod,
+ MaxRequstBodySize: Default().MaxRequstBodySize,
+ }
+
+ serializer, err := carbon2.NewSerializer(carbon2.Carbon2FormatFieldSeparate)
+ require.NoError(t, err)
+
+ plugin.SetSerializer(serializer)
+ err = plugin.Connect()
+ require.NoError(t, err)
+
+ err = plugin.Write([]telegraf.Metric{getMetric(t)})
+ require.NoError(t, err)
+ })
+}
+
+func TestTOMLConfig(t *testing.T) {
+ testcases := []struct {
+ name string
+ configBytes []byte
+ expectedError bool
+ }{
+ {
+ name: "carbon2 content type is supported",
+ configBytes: []byte(`
+[[outputs.sumologic]]
+ url = "https://localhost:3000"
+ data_format = "carbon2"
+ `),
+ expectedError: false,
+ },
+ {
+ name: "graphite content type is supported",
+ configBytes: []byte(`
+[[outputs.sumologic]]
+ url = "https://localhost:3000"
+ data_format = "graphite"
+ `),
+ expectedError: false,
+ },
+ {
+ name: "prometheus content type is supported",
+ configBytes: []byte(`
+[[outputs.sumologic]]
+ url = "https://localhost:3000"
+ data_format = "prometheus"
+ `),
+ expectedError: false,
+ },
+ {
+ name: "setting extra headers is not possible",
+ configBytes: []byte(`
+[[outputs.sumologic]]
+ url = "https://localhost:3000"
+ data_format = "carbon2"
+ [outputs.sumologic.headers]
+ X-Sumo-Name = "dummy"
+ X-Sumo-Host = "dummy"
+ X-Sumo-Category = "dummy"
+ X-Sumo-Dimensions = "dummy"
+ `),
+ expectedError: true,
+ },
+ {
+ name: "full example from sample config is correct",
+ configBytes: []byte(`
+[[outputs.sumologic]]
+ url = "https://localhost:3000"
+ data_format = "carbon2"
+ timeout = "5s"
+ method = "POST"
+ source_name = "name"
+ source_host = "hosta"
+ source_category = "category"
+ dimensions = "dimensions"
+ `),
+ expectedError: false,
+ },
+ {
+ name: "unknown key - sumo_metadata - in config fails",
+ configBytes: []byte(`
+[[outputs.sumologic]]
+ url = "https://localhost:3000"
+ data_format = "carbon2"
+ timeout = "5s"
+ method = "POST"
+ source_name = "name"
+ sumo_metadata = "metadata"
+ `),
+ expectedError: true,
+ },
+ }
+ for _, tt := range testcases {
+ t.Run(tt.name, func(t *testing.T) {
+ c := config.NewConfig()
+
+ if tt.expectedError {
+ require.Error(t, c.LoadConfigData(tt.configBytes))
+ } else {
+ require.NoError(t, c.LoadConfigData(tt.configBytes))
+ }
+ })
+ }
+}
+
+func TestMaxRequestBodySize(t *testing.T) {
+ ts := httptest.NewServer(http.NotFoundHandler())
+ defer ts.Close()
+
+ u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
+ require.NoError(t, err)
+
+ testcases := []struct {
+ name string
+ plugin func() *SumoLogic
+ metrics []telegraf.Metric
+ expectedError bool
+ expectedRequestCount int
+ }{
+ {
+ name: "default max request body size is 1MB and doesn't split small enough metric slices",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ return s
+ },
+ metrics: []telegraf.Metric{getMetric(t)},
+ expectedError: false,
+ expectedRequestCount: 1,
+ },
+ {
+ name: "default max request body size is 1MB and doesn't split small even medium sized metrics",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ return s
+ },
+ metrics: getMetrics(t),
+ expectedError: false,
+ expectedRequestCount: 1,
+ },
+ {
+ name: "max request body size properly splits requests - max 2500",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.MaxRequstBodySize = 2500
+ return s
+ },
+ metrics: getMetrics(t),
+ expectedError: false,
+ expectedRequestCount: 2,
+ },
+ {
+ name: "max request body size properly splits requests - max 1000",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.MaxRequstBodySize = 1000
+ return s
+ },
+ metrics: getMetrics(t),
+ expectedError: false,
+ expectedRequestCount: 5,
+ },
+ {
+ name: "max request body size properly splits requests - max 300",
+ plugin: func() *SumoLogic {
+ s := Default()
+ s.URL = u.String()
+ s.MaxRequstBodySize = 300
+ return s
+ },
+ metrics: getMetrics(t),
+ expectedError: false,
+ expectedRequestCount: 10,
+ },
+ }
+
+ for _, tt := range testcases {
+ t.Run(tt.name, func(t *testing.T) {
+ var requestCount int
+ ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ requestCount++
+ w.WriteHeader(http.StatusOK)
+ })
+
+ serializer, err := carbon2.NewSerializer(carbon2.Carbon2FormatFieldSeparate)
+ require.NoError(t, err)
+
+ plugin := tt.plugin()
+ plugin.SetSerializer(serializer)
+
+ err = plugin.Connect()
+ require.NoError(t, err)
+
+ err = plugin.Write(tt.metrics)
+ if tt.expectedError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.expectedRequestCount, requestCount)
+ }
+ })
+ }
+}
diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md
new file mode 100644
index 0000000000000..cb9bc8965707f
--- /dev/null
+++ b/plugins/outputs/syslog/README.md
@@ -0,0 +1,108 @@
+# Syslog Output Plugin
+
+The syslog output plugin sends syslog messages transmitted over
+[UDP](https://tools.ietf.org/html/rfc5426) or
+[TCP](https://tools.ietf.org/html/rfc6587) or
+[TLS](https://tools.ietf.org/html/rfc5425), with or without the octet counting framing.
+
+Syslog messages are formatted according to
+[RFC 5424](https://tools.ietf.org/html/rfc5424).
+
+### Configuration
+
+```toml
+[[outputs.syslog]]
+ ## URL to connect to
+ ## ex: address = "tcp://127.0.0.1:8094"
+ ## ex: address = "tcp4://127.0.0.1:8094"
+ ## ex: address = "tcp6://127.0.0.1:8094"
+ ## ex: address = "tcp6://[2001:db8::1]:8094"
+ ## ex: address = "udp://127.0.0.1:8094"
+ ## ex: address = "udp4://127.0.0.1:8094"
+ ## ex: address = "udp6://127.0.0.1:8094"
+ address = "tcp://127.0.0.1:8094"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+
+ ## Period between keep alive probes.
+ ## Only applies to TCP sockets.
+ ## 0 disables keep alive probes.
+ ## Defaults to the OS configuration.
+ # keep_alive_period = "5m"
+
+ ## The framing technique with which it is expected that messages are
+ ## transported (default = "octet-counting"). Whether the messages come
+ ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
+ ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
+ ## be one of "octet-counting", "non-transparent".
+ # framing = "octet-counting"
+
+ ## The trailer to be expected in case of non-transparent framing (default = "LF").
+ ## Must be one of "LF", or "NUL".
+ # trailer = "LF"
+
+ ## SD-PARAMs settings
+ ## Syslog messages can contain key/value pairs within zero or more
+ ## structured data sections. For each unrecognized metric tag/field a
+ ## SD-PARAMS is created.
+ ##
+ ## Example:
+ ## [[outputs.syslog]]
+ ## sdparam_separator = "_"
+ ## default_sdid = "default@32473"
+ ## sdids = ["foo@123", "bar@456"]
+ ##
+ ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
+ ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
+
+ ## SD-PARAMs separator between the sdid and tag/field key (default = "_")
+ # sdparam_separator = "_"
+
+ ## Default sdid used for tags/fields that don't contain a prefix defined in
+ ## the explicit sdids setting below If no default is specified, no SD-PARAMs
+ ## will be used for unrecognized field.
+ # default_sdid = "default@32473"
+
+ ## List of explicit prefixes to extract from tag/field keys and use as the
+ ## SDID, if they match (see above example for more details):
+ # sdids = ["foo@123", "bar@456"]
+
+ ## Default severity value. Severity and Facility are used to calculate the
+ ## message PRI value (RFC5424#section-6.2.1). Used when no metric field
+ ## with key "severity_code" is defined. If unset, 5 (notice) is the default
+ # default_severity_code = 5
+
+ ## Default facility value. Facility and Severity are used to calculate the
+ ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with
+ ## key "facility_code" is defined. If unset, 1 (user-level) is the default
+ # default_facility_code = 1
+
+ ## Default APP-NAME value (RFC5424#section-6.2.5)
+ ## Used when no metric tag with key "appname" is defined.
+ ## If unset, "Telegraf" is the default
+ # default_appname = "Telegraf"
+```
+
+### Metric mapping
+The output plugin expects syslog metrics tags and fields to match up with the
+ones created in the [syslog input][].
+
+The following table shows the metric tags, field and defaults used to format syslog messages.
+
+| Syslog field | Metric Tag | Metric Field | Default value |
+| --- | --- | --- | --- |
+| APP-NAME | appname | - | default_appname = "Telegraf" |
+| TIMESTAMP | - | timestamp | Metric's own timestamp |
+| VERSION | - | version | 1 |
+| PRI | - | serverity_code + (8 * facility_code)| default_severity_code=5 (notice), default_facility_code=1 (user-level)|
+| HOSTNAME | hostname OR source OR host | - | os.Hostname() |
+| MSGID | - | msgid | Metric name |
+| PROCID | - | procid | - |
+| MSG | - | msg | - |
+
+[syslog input]: /plugins/inputs/syslog#metrics
diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go
new file mode 100644
index 0000000000000..1b46d02e210b0
--- /dev/null
+++ b/plugins/outputs/syslog/syslog.go
@@ -0,0 +1,249 @@
+package syslog
+
+import (
+ "crypto/tls"
+ "fmt"
+ "log"
+ "net"
+ "strconv"
+ "strings"
+
+ "github.com/influxdata/go-syslog/v2/nontransparent"
+ "github.com/influxdata/go-syslog/v2/rfc5424"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ framing "github.com/influxdata/telegraf/internal/syslog"
+ tlsint "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/outputs"
+)
+
+type Syslog struct {
+ Address string
+ KeepAlivePeriod *internal.Duration
+ DefaultSdid string
+ DefaultSeverityCode uint8
+ DefaultFacilityCode uint8
+ DefaultAppname string
+ Sdids []string
+ Separator string `toml:"sdparam_separator"`
+ Framing framing.Framing
+ Trailer nontransparent.TrailerType
+ net.Conn
+ tlsint.ClientConfig
+ mapper *SyslogMapper
+}
+
+var sampleConfig = `
+ ## URL to connect to
+ ## ex: address = "tcp://127.0.0.1:8094"
+ ## ex: address = "tcp4://127.0.0.1:8094"
+ ## ex: address = "tcp6://127.0.0.1:8094"
+ ## ex: address = "tcp6://[2001:db8::1]:8094"
+ ## ex: address = "udp://127.0.0.1:8094"
+ ## ex: address = "udp4://127.0.0.1:8094"
+ ## ex: address = "udp6://127.0.0.1:8094"
+ address = "tcp://127.0.0.1:8094"
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+
+ ## Period between keep alive probes.
+ ## Only applies to TCP sockets.
+ ## 0 disables keep alive probes.
+ ## Defaults to the OS configuration.
+ # keep_alive_period = "5m"
+
+ ## The framing technique with which it is expected that messages are
+ ## transported (default = "octet-counting"). Whether the messages come
+ ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
+ ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
+ ## be one of "octet-counting", "non-transparent".
+ # framing = "octet-counting"
+
+ ## The trailer to be expected in case of non-transparent framing (default = "LF").
+ ## Must be one of "LF", or "NUL".
+ # trailer = "LF"
+
+ ## SD-PARAMs settings
+ ## Syslog messages can contain key/value pairs within zero or more
+ ## structured data sections. For each unrecognized metric tag/field a
+ ## SD-PARAMS is created.
+ ##
+ ## Example:
+ ## [[outputs.syslog]]
+ ## sdparam_separator = "_"
+ ## default_sdid = "default@32473"
+ ## sdids = ["foo@123", "bar@456"]
+ ##
+ ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
+ ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
+
+ ## SD-PARAMs separator between the sdid and tag/field key (default = "_")
+ # sdparam_separator = "_"
+
+ ## Default sdid used for tags/fields that don't contain a prefix defined in
+ ## the explicit sdids setting below If no default is specified, no SD-PARAMs
+ ## will be used for unrecognized field.
+ # default_sdid = "default@32473"
+
+ ## List of explicit prefixes to extract from tag/field keys and use as the
+ ## SDID, if they match (see above example for more details):
+ # sdids = ["foo@123", "bar@456"]
+
+ ## Default severity value. Severity and Facility are used to calculate the
+ ## message PRI value (RFC5424#section-6.2.1). Used when no metric field
+ ## with key "severity_code" is defined. If unset, 5 (notice) is the default
+ # default_severity_code = 5
+
+ ## Default facility value. Facility and Severity are used to calculate the
+ ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with
+ ## key "facility_code" is defined. If unset, 1 (user-level) is the default
+ # default_facility_code = 1
+
+ ## Default APP-NAME value (RFC5424#section-6.2.5)
+ ## Used when no metric tag with key "appname" is defined.
+ ## If unset, "Telegraf" is the default
+ # default_appname = "Telegraf"
+`
+
+func (s *Syslog) Connect() error {
+ s.initializeSyslogMapper()
+
+ spl := strings.SplitN(s.Address, "://", 2)
+ if len(spl) != 2 {
+ return fmt.Errorf("invalid address: %s", s.Address)
+ }
+
+ tlsCfg, err := s.ClientConfig.TLSConfig()
+ if err != nil {
+ return err
+ }
+
+ var c net.Conn
+ if tlsCfg == nil {
+ c, err = net.Dial(spl[0], spl[1])
+ } else {
+ c, err = tls.Dial(spl[0], spl[1], tlsCfg)
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := s.setKeepAlive(c); err != nil {
+ log.Printf("unable to configure keep alive (%s): %s", s.Address, err)
+ }
+
+ s.Conn = c
+ return nil
+}
+
+func (s *Syslog) setKeepAlive(c net.Conn) error {
+ if s.KeepAlivePeriod == nil {
+ return nil
+ }
+ tcpc, ok := c.(*net.TCPConn)
+ if !ok {
+ return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(s.Address, "://", 2)[0])
+ }
+ if s.KeepAlivePeriod.Duration == 0 {
+ return tcpc.SetKeepAlive(false)
+ }
+ if err := tcpc.SetKeepAlive(true); err != nil {
+ return err
+ }
+ return tcpc.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration)
+}
+
+func (s *Syslog) Close() error {
+ if s.Conn == nil {
+ return nil
+ }
+ err := s.Conn.Close()
+ s.Conn = nil
+ return err
+}
+
+func (s *Syslog) SampleConfig() string {
+ return sampleConfig
+}
+
+func (s *Syslog) Description() string {
+ return "Configuration for Syslog server to send metrics to"
+}
+
+func (s *Syslog) Write(metrics []telegraf.Metric) (err error) {
+ if s.Conn == nil {
+ // previous write failed with permanent error and socket was closed.
+ if err = s.Connect(); err != nil {
+ return err
+ }
+ }
+ for _, metric := range metrics {
+ var msg *rfc5424.SyslogMessage
+ if msg, err = s.mapper.MapMetricToSyslogMessage(metric); err != nil {
+ log.Printf("E! [outputs.syslog] Failed to create syslog message: %v", err)
+ continue
+ }
+ var msgBytesWithFraming []byte
+ if msgBytesWithFraming, err = s.getSyslogMessageBytesWithFraming(msg); err != nil {
+ log.Printf("E! [outputs.syslog] Failed to convert syslog message with framing: %v", err)
+ continue
+ }
+ if _, err = s.Conn.Write(msgBytesWithFraming); err != nil {
+ if netErr, ok := err.(net.Error); !ok || !netErr.Temporary() {
+ s.Close()
+ s.Conn = nil
+ return fmt.Errorf("closing connection: %v", netErr)
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *Syslog) getSyslogMessageBytesWithFraming(msg *rfc5424.SyslogMessage) ([]byte, error) {
+ var msgString string
+ var err error
+ if msgString, err = msg.String(); err != nil {
+ return nil, err
+ }
+ msgBytes := []byte(msgString)
+
+ if s.Framing == framing.OctetCounting {
+ return append([]byte(strconv.Itoa(len(msgBytes))+" "), msgBytes...), nil
+ }
+ // Non-transparent framing
+ return append(msgBytes, byte(s.Trailer)), nil
+}
+
+func (s *Syslog) initializeSyslogMapper() {
+ if s.mapper != nil {
+ return
+ }
+ s.mapper = newSyslogMapper()
+ s.mapper.DefaultFacilityCode = s.DefaultFacilityCode
+ s.mapper.DefaultSeverityCode = s.DefaultSeverityCode
+ s.mapper.DefaultAppname = s.DefaultAppname
+ s.mapper.Separator = s.Separator
+ s.mapper.DefaultSdid = s.DefaultSdid
+ s.mapper.Sdids = s.Sdids
+}
+
+func newSyslog() *Syslog {
+ return &Syslog{
+ Framing: framing.OctetCounting,
+ Trailer: nontransparent.LF,
+ Separator: "_",
+ DefaultSeverityCode: uint8(5), // notice
+ DefaultFacilityCode: uint8(1), // user-level
+ DefaultAppname: "Telegraf",
+ }
+}
+
+func init() {
+ outputs.Add("syslog", func() telegraf.Output { return newSyslog() })
+}
diff --git a/plugins/outputs/syslog/syslog_mapper.go b/plugins/outputs/syslog/syslog_mapper.go
new file mode 100644
index 0000000000000..4e4848205ca28
--- /dev/null
+++ b/plugins/outputs/syslog/syslog_mapper.go
@@ -0,0 +1,199 @@
+package syslog
+
+import (
+ "errors"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/go-syslog/v2/rfc5424"
+ "github.com/influxdata/telegraf"
+)
+
+type SyslogMapper struct {
+ DefaultSdid string
+ DefaultSeverityCode uint8
+ DefaultFacilityCode uint8
+ DefaultAppname string
+ Sdids []string
+ Separator string
+ reservedKeys map[string]bool
+}
+
+// MapMetricToSyslogMessage maps metrics tags/fields to syslog messages
+func (sm *SyslogMapper) MapMetricToSyslogMessage(metric telegraf.Metric) (*rfc5424.SyslogMessage, error) {
+ msg := &rfc5424.SyslogMessage{}
+
+ sm.mapPriority(metric, msg)
+ sm.mapStructuredData(metric, msg)
+ sm.mapAppname(metric, msg)
+ mapHostname(metric, msg)
+ mapTimestamp(metric, msg)
+ mapMsgID(metric, msg)
+ mapVersion(metric, msg)
+ mapProcID(metric, msg)
+ mapMsg(metric, msg)
+
+ if !msg.Valid() {
+ return nil, errors.New("metric could not produce valid syslog message")
+ }
+ return msg, nil
+}
+
+func (sm *SyslogMapper) mapStructuredData(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ for _, tag := range metric.TagList() {
+ sm.mapStructuredDataItem(tag.Key, tag.Value, msg)
+ }
+ for _, field := range metric.FieldList() {
+ sm.mapStructuredDataItem(field.Key, formatValue(field.Value), msg)
+ }
+}
+
+func (sm *SyslogMapper) mapStructuredDataItem(key string, value string, msg *rfc5424.SyslogMessage) {
+ if sm.reservedKeys[key] {
+ return
+ }
+ isExplicitSdid := false
+ for _, sdid := range sm.Sdids {
+ k := strings.TrimLeft(key, sdid+sm.Separator)
+ if len(key) > len(k) {
+ isExplicitSdid = true
+ msg.SetParameter(sdid, k, value)
+ break
+ }
+ }
+ if !isExplicitSdid && len(sm.DefaultSdid) > 0 {
+ k := strings.TrimPrefix(key, sm.DefaultSdid+sm.Separator)
+ msg.SetParameter(sm.DefaultSdid, k, value)
+ }
+}
+
+func (sm *SyslogMapper) mapAppname(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ if value, ok := metric.GetTag("appname"); ok {
+ msg.SetAppname(formatValue(value))
+ } else {
+ //Use default appname
+ msg.SetAppname(sm.DefaultAppname)
+ }
+}
+
+func mapMsgID(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ if value, ok := metric.GetField("msgid"); ok {
+ msg.SetMsgID(formatValue(value))
+ } else {
+ // We default to metric name
+ msg.SetMsgID(metric.Name())
+ }
+}
+
+func mapVersion(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ if value, ok := metric.GetField("version"); ok {
+ switch v := value.(type) {
+ case uint64:
+ msg.SetVersion(uint16(v))
+ return
+ }
+ }
+ msg.SetVersion(1)
+}
+
+func mapMsg(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ if value, ok := metric.GetField("msg"); ok {
+ msg.SetMessage(formatValue(value))
+ }
+}
+
+func mapProcID(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ if value, ok := metric.GetField("procid"); ok {
+ msg.SetProcID(formatValue(value))
+ }
+}
+
+func (sm *SyslogMapper) mapPriority(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ severityCode := sm.DefaultSeverityCode
+ facilityCode := sm.DefaultFacilityCode
+
+ if value, ok := getFieldCode(metric, "severity_code"); ok {
+ severityCode = *value
+ }
+
+ if value, ok := getFieldCode(metric, "facility_code"); ok {
+ facilityCode = *value
+ }
+
+ priority := (8 * facilityCode) + severityCode
+ msg.SetPriority(priority)
+}
+
+func mapHostname(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ // Try with hostname, then with source, then with host tags, then take OS Hostname
+ if value, ok := metric.GetTag("hostname"); ok {
+ msg.SetHostname(formatValue(value))
+ } else if value, ok := metric.GetTag("source"); ok {
+ msg.SetHostname(formatValue(value))
+ } else if value, ok := metric.GetTag("host"); ok {
+ msg.SetHostname(formatValue(value))
+ } else if value, err := os.Hostname(); err == nil {
+ msg.SetHostname(value)
+ }
+}
+
+func mapTimestamp(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
+ timestamp := metric.Time()
+ if value, ok := metric.GetField("timestamp"); ok {
+ switch v := value.(type) {
+ case int64:
+ timestamp = time.Unix(0, v).UTC()
+ }
+ }
+ msg.SetTimestamp(timestamp.Format(time.RFC3339))
+}
+
+func formatValue(value interface{}) string {
+ switch v := value.(type) {
+ case string:
+ return v
+ case bool:
+ if v {
+ return "1"
+ }
+ return "0"
+ case uint64:
+ return strconv.FormatUint(v, 10)
+ case int64:
+ return strconv.FormatInt(v, 10)
+ case float64:
+ if math.IsNaN(v) {
+ return ""
+ }
+
+ if math.IsInf(v, 0) {
+ return ""
+ }
+ return strconv.FormatFloat(v, 'f', -1, 64)
+ }
+
+ return ""
+}
+
+func getFieldCode(metric telegraf.Metric, fieldKey string) (*uint8, bool) {
+ if value, ok := metric.GetField(fieldKey); ok {
+ if v, err := strconv.ParseUint(formatValue(value), 10, 8); err == nil {
+ r := uint8(v)
+ return &r, true
+ }
+ }
+ return nil, false
+}
+
+func newSyslogMapper() *SyslogMapper {
+ return &SyslogMapper{
+ reservedKeys: map[string]bool{
+ "version": true, "severity_code": true, "facility_code": true,
+ "procid": true, "msgid": true, "msg": true, "timestamp": true, "sdid": true,
+ "hostname": true, "source": true, "host": true, "severity": true,
+ "facility": true, "appname": true},
+ }
+}
diff --git a/plugins/outputs/syslog/syslog_mapper_test.go b/plugins/outputs/syslog/syslog_mapper_test.go
new file mode 100644
index 0000000000000..300d5fcabe561
--- /dev/null
+++ b/plugins/outputs/syslog/syslog_mapper_test.go
@@ -0,0 +1,200 @@
+package syslog
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/metric"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSyslogMapperWithDefaults(t *testing.T) {
+ s := newSyslog()
+ s.initializeSyslogMapper()
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ hostname, err := os.Hostname()
+ assert.NoError(t, err)
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ str, _ := syslogMessage.String()
+ assert.Equal(t, "<13>1 2010-11-10T23:00:00Z "+hostname+" Telegraf - testmetric -", str, "Wrong syslog message")
+}
+
+func TestSyslogMapperWithHostname(t *testing.T) {
+ s := newSyslog()
+ s.initializeSyslogMapper()
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{
+ "hostname": "testhost",
+ "source": "sourcevalue",
+ "host": "hostvalue",
+ },
+ map[string]interface{}{},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ str, _ := syslogMessage.String()
+ assert.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", str, "Wrong syslog message")
+}
+func TestSyslogMapperWithHostnameSourceFallback(t *testing.T) {
+ s := newSyslog()
+ s.initializeSyslogMapper()
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{
+ "source": "sourcevalue",
+ "host": "hostvalue",
+ },
+ map[string]interface{}{},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ str, _ := syslogMessage.String()
+ assert.Equal(t, "<13>1 2010-11-10T23:00:00Z sourcevalue Telegraf - testmetric -", str, "Wrong syslog message")
+}
+
+func TestSyslogMapperWithHostnameHostFallback(t *testing.T) {
+ s := newSyslog()
+ s.initializeSyslogMapper()
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{
+ "host": "hostvalue",
+ },
+ map[string]interface{}{},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ str, _ := syslogMessage.String()
+ assert.Equal(t, "<13>1 2010-11-10T23:00:00Z hostvalue Telegraf - testmetric -", str, "Wrong syslog message")
+}
+
+func TestSyslogMapperWithDefaultSdid(t *testing.T) {
+ s := newSyslog()
+ s.DefaultSdid = "default@32473"
+ s.initializeSyslogMapper()
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{
+ "appname": "testapp",
+ "hostname": "testhost",
+ "tag1": "bar",
+ "default@32473_tag2": "foobar",
+ },
+ map[string]interface{}{
+ "severity_code": uint64(3),
+ "facility_code": uint64(3),
+ "msg": "Test message",
+ "procid": uint64(25),
+ "version": uint16(2),
+ "msgid": int64(555),
+ "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(),
+ "value1": int64(2),
+ "default@32473_value2": "foo",
+ "value3": float64(1.2),
+ },
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ str, _ := syslogMessage.String()
+ assert.Equal(t, "<27>2 2010-11-10T23:30:00Z testhost testapp 25 555 [default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"foo\" value3=\"1.2\"] Test message", str, "Wrong syslog message")
+}
+
+func TestSyslogMapperWithDefaultSdidAndOtherSdids(t *testing.T) {
+ s := newSyslog()
+ s.DefaultSdid = "default@32473"
+ s.Sdids = []string{"bar@123", "foo@456"}
+ s.initializeSyslogMapper()
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{
+ "appname": "testapp",
+ "hostname": "testhost",
+ "tag1": "bar",
+ "default@32473_tag2": "foobar",
+ "bar@123_tag3": "barfoobar",
+ },
+ map[string]interface{}{
+ "severity_code": uint64(1),
+ "facility_code": uint64(3),
+ "msg": "Test message",
+ "procid": uint64(25),
+ "version": uint16(2),
+ "msgid": int64(555),
+ "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(),
+ "value1": int64(2),
+ "default@32473_value2": "default",
+ "bar@123_value3": int64(2),
+ "foo@456_value4": "foo",
+ },
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ str, _ := syslogMessage.String()
+ assert.Equal(t, "<25>2 2010-11-10T23:30:00Z testhost testapp 25 555 [bar@123 tag3=\"barfoobar\" value3=\"2\"][default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"default\"][foo@456 value4=\"foo\"] Test message", str, "Wrong syslog message")
+}
+
+func TestSyslogMapperWithNoSdids(t *testing.T) {
+ // Init mapper
+ s := newSyslog()
+ s.initializeSyslogMapper()
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{
+ "appname": "testapp",
+ "hostname": "testhost",
+ "tag1": "bar",
+ "default@32473_tag2": "foobar",
+ "bar@123_tag3": "barfoobar",
+ "foo@456_tag4": "foobarfoo",
+ },
+ map[string]interface{}{
+ "severity_code": uint64(2),
+ "facility_code": uint64(3),
+ "msg": "Test message",
+ "procid": uint64(25),
+ "version": uint16(2),
+ "msgid": int64(555),
+ "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(),
+ "value1": int64(2),
+ "default@32473_value2": "default",
+ "bar@123_value3": int64(2),
+ "foo@456_value4": "foo",
+ },
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ str, _ := syslogMessage.String()
+ assert.Equal(t, "<26>2 2010-11-10T23:30:00Z testhost testapp 25 555 - Test message", str, "Wrong syslog message")
+}
diff --git a/plugins/outputs/syslog/syslog_test.go b/plugins/outputs/syslog/syslog_test.go
new file mode 100644
index 0000000000000..7581a7b5380d5
--- /dev/null
+++ b/plugins/outputs/syslog/syslog_test.go
@@ -0,0 +1,205 @@
+package syslog
+
+import (
+ "net"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ framing "github.com/influxdata/telegraf/internal/syslog"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) {
+ // Init plugin
+ s := newSyslog()
+ s.initializeSyslogMapper()
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{
+ "hostname": "testhost",
+ },
+ map[string]interface{}{},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
+ require.NoError(t, err)
+
+ assert.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octect counting framing")
+}
+
+func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) {
+ // Init plugin
+ s := newSyslog()
+ s.initializeSyslogMapper()
+ s.Framing = framing.NonTransparent
+
+ // Init metrics
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{
+ "hostname": "testhost",
+ },
+ map[string]interface{}{},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1)
+ require.NoError(t, err)
+ messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
+ require.NoError(t, err)
+
+ assert.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing")
+}
+
+func TestSyslogWriteWithTcp(t *testing.T) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ s := newSyslog()
+ s.Address = "tcp://" + listener.Addr().String()
+
+ err = s.Connect()
+ require.NoError(t, err)
+
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+
+ testSyslogWriteWithStream(t, s, lconn)
+}
+
+func TestSyslogWriteWithUdp(t *testing.T) {
+ listener, err := net.ListenPacket("udp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ s := newSyslog()
+ s.Address = "udp://" + listener.LocalAddr().String()
+
+ err = s.Connect()
+ require.NoError(t, err)
+
+ testSyslogWriteWithPacket(t, s, listener)
+}
+
+func testSyslogWriteWithStream(t *testing.T, s *Syslog, lconn net.Conn) {
+ metrics := []telegraf.Metric{}
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC))
+
+ metrics = append(metrics, m1)
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0])
+ require.NoError(t, err)
+ messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
+ require.NoError(t, err)
+
+ err = s.Write(metrics)
+ require.NoError(t, err)
+
+ buf := make([]byte, 256)
+ n, err := lconn.Read(buf)
+ require.NoError(t, err)
+ assert.Equal(t, string(messageBytesWithFraming), string(buf[:n]))
+}
+
+func testSyslogWriteWithPacket(t *testing.T, s *Syslog, lconn net.PacketConn) {
+ s.Framing = framing.NonTransparent
+ metrics := []telegraf.Metric{}
+ m1, _ := metric.New(
+ "testmetric",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC))
+
+ metrics = append(metrics, m1)
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0])
+ require.NoError(t, err)
+ messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
+ require.NoError(t, err)
+
+ err = s.Write(metrics)
+ require.NoError(t, err)
+
+ buf := make([]byte, 256)
+ n, _, err := lconn.ReadFrom(buf)
+ require.NoError(t, err)
+ assert.Equal(t, string(messageBytesWithFraming), string(buf[:n]))
+}
+
+func TestSyslogWriteErr(t *testing.T) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ s := newSyslog()
+ s.Address = "tcp://" + listener.Addr().String()
+
+ err = s.Connect()
+ require.NoError(t, err)
+ s.Conn.(*net.TCPConn).SetReadBuffer(256)
+
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+ lconn.(*net.TCPConn).SetWriteBuffer(256)
+
+ metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")}
+
+ // close the socket to generate an error
+ lconn.Close()
+ s.Conn.Close()
+ err = s.Write(metrics)
+ require.Error(t, err)
+ assert.Nil(t, s.Conn)
+}
+
+func TestSyslogWriteReconnect(t *testing.T) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ s := newSyslog()
+ s.Address = "tcp://" + listener.Addr().String()
+
+ err = s.Connect()
+ require.NoError(t, err)
+ s.Conn.(*net.TCPConn).SetReadBuffer(256)
+
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+ lconn.(*net.TCPConn).SetWriteBuffer(256)
+ lconn.Close()
+ s.Conn = nil
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ var lerr error
+ go func() {
+ lconn, lerr = listener.Accept()
+ wg.Done()
+ }()
+
+ metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")}
+ err = s.Write(metrics)
+ require.NoError(t, err)
+
+ wg.Wait()
+ assert.NoError(t, lerr)
+
+ syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0])
+ require.NoError(t, err)
+ messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
+ require.NoError(t, err)
+ buf := make([]byte, 256)
+ n, err := lconn.Read(buf)
+ require.NoError(t, err)
+ assert.Equal(t, string(messageBytesWithFraming), string(buf[:n]))
+}
diff --git a/plugins/outputs/warp10/README.md b/plugins/outputs/warp10/README.md
new file mode 100644
index 0000000000000..07e6cd25b92be
--- /dev/null
+++ b/plugins/outputs/warp10/README.md
@@ -0,0 +1,50 @@
+# Warp10 Output Plugin
+
+The `warp10` output plugin writes metrics to [Warp 10][].
+
+### Configuration
+
+```toml
+[[outputs.warp10]]
+ # Prefix to add to the measurement.
+ prefix = "telegraf."
+
+ # URL of the Warp 10 server
+ warp_url = "http://localhost:8080"
+
+ # Write token to access your app on warp 10
+ token = "Token"
+
+ # Warp 10 query timeout
+ # timeout = "15s"
+
+ ## Print Warp 10 error body
+ # print_error_body = false
+
+ ## Max string error size
+ # max_string_error_size = 511
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+```
+
+### Output Format
+
+Metrics are converted and sent using the [Geo Time Series][] (GTS) input format.
+
+The class name of the reading is produced by combining the value of the
+`prefix` option, the measurement name, and the field key. A dot (`.`)
+character is used as the joining character.
+
+The GTS form provides support for the Telegraf integer, float, boolean, and
+string types directly. Unsigned integer fields will be capped to the largest
+64-bit integer (2^63-1) in case of overflow.
+
+Timestamps are sent in microsecond precision.
+
+[Warp 10]: https://www.warp10.io
+[Geo Time Series]: https://www.warp10.io/content/03_Documentation/03_Interacting_with_Warp_10/03_Ingesting_data/02_GTS_input_format
diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go
new file mode 100644
index 0000000000000..73eefbf722deb
--- /dev/null
+++ b/plugins/outputs/warp10/warp10.go
@@ -0,0 +1,291 @@
+package warp10
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/common/tls"
+ "github.com/influxdata/telegraf/plugins/outputs"
+)
+
+const (
+ defaultClientTimeout = 15 * time.Second
+)
+
+// Warp10 output plugin
+type Warp10 struct {
+ Prefix string `toml:"prefix"`
+ WarpURL string `toml:"warp_url"`
+ Token string `toml:"token"`
+ Timeout internal.Duration `toml:"timeout"`
+ PrintErrorBody bool `toml:"print_error_body"`
+ MaxStringErrorSize int `toml:"max_string_error_size"`
+ client *http.Client
+ tls.ClientConfig
+}
+
+var sampleConfig = `
+ # Prefix to add to the measurement.
+ prefix = "telegraf."
+
+ # URL of the Warp 10 server
+ warp_url = "http://localhost:8080"
+
+ # Write token to access your app on warp 10
+ token = "Token"
+
+ # Warp 10 query timeout
+ # timeout = "15s"
+
+ ## Print Warp 10 error body
+ # print_error_body = false
+
+ ## Max string error size
+ # max_string_error_size = 511
+
+ ## Optional TLS Config
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+`
+
+// MetricLine Warp 10 metrics
+type MetricLine struct {
+ Metric string
+ Timestamp int64
+ Value string
+ Tags string
+}
+
+func (w *Warp10) createClient() (*http.Client, error) {
+ tlsCfg, err := w.ClientConfig.TLSConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ if w.Timeout.Duration == 0 {
+ w.Timeout.Duration = defaultClientTimeout
+ }
+
+ client := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsCfg,
+ Proxy: http.ProxyFromEnvironment,
+ },
+ Timeout: w.Timeout.Duration,
+ }
+
+ return client, nil
+}
+
+// Connect to warp10
+func (w *Warp10) Connect() error {
+ client, err := w.createClient()
+ if err != nil {
+ return err
+ }
+
+ w.client = client
+ return nil
+}
+
+// GenWarp10Payload compute Warp 10 metrics payload
+func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric) string {
+ collectString := make([]string, 0)
+ for _, mm := range metrics {
+
+ for _, field := range mm.FieldList() {
+
+ metric := &MetricLine{
+ Metric: fmt.Sprintf("%s%s", w.Prefix, mm.Name()+"."+field.Key),
+ Timestamp: mm.Time().UnixNano() / 1000,
+ }
+
+ metricValue, err := buildValue(field.Value)
+ if err != nil {
+ log.Printf("E! [outputs.warp10] Could not encode value: %v", err)
+ continue
+ }
+ metric.Value = metricValue
+
+ tagsSlice := buildTags(mm.TagList())
+ metric.Tags = strings.Join(tagsSlice, ",")
+
+ messageLine := fmt.Sprintf("%d// %s{%s} %s\n", metric.Timestamp, metric.Metric, metric.Tags, metric.Value)
+
+ collectString = append(collectString, messageLine)
+ }
+ }
+ return fmt.Sprint(strings.Join(collectString, ""))
+}
+
+// Write metrics to Warp10
+func (w *Warp10) Write(metrics []telegraf.Metric) error {
+ payload := w.GenWarp10Payload(metrics)
+ if payload == "" {
+ return nil
+ }
+
+ req, err := http.NewRequest("POST", w.WarpURL+"/api/v0/update", bytes.NewBufferString(payload))
+ req.Header.Set("X-Warp10-Token", w.Token)
+ req.Header.Set("Content-Type", "text/plain")
+
+ resp, err := w.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ if w.PrintErrorBody {
+ body, _ := ioutil.ReadAll(resp.Body)
+ return fmt.Errorf(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize))
+ }
+
+ if len(resp.Status) < w.MaxStringErrorSize {
+ return fmt.Errorf(w.WarpURL + ": " + resp.Status)
+ }
+
+ return fmt.Errorf(w.WarpURL + ": " + resp.Status[0:w.MaxStringErrorSize])
+ }
+
+ return nil
+}
+
+func buildTags(tags []*telegraf.Tag) []string {
+
+ tagsString := make([]string, len(tags)+1)
+ indexSource := 0
+ for index, tag := range tags {
+ tagsString[index] = fmt.Sprintf("%s=%s", tag.Key, tag.Value)
+ indexSource = index
+ }
+ indexSource++
+ tagsString[indexSource] = fmt.Sprintf("source=telegraf")
+ sort.Strings(tagsString)
+ return tagsString
+}
+
+func buildValue(v interface{}) (string, error) {
+ var retv string
+ switch p := v.(type) {
+ case int64:
+ retv = intToString(p)
+ case string:
+ retv = fmt.Sprintf("'%s'", strings.Replace(p, "'", "\\'", -1))
+ case bool:
+ retv = boolToString(p)
+ case uint64:
+ if p <= uint64(math.MaxInt64) {
+ retv = strconv.FormatInt(int64(p), 10)
+ } else {
+ retv = strconv.FormatInt(math.MaxInt64, 10)
+ }
+ case float64:
+ retv = floatToString(float64(p))
+ default:
+ return "", fmt.Errorf("unsupported type: %T", v)
+ }
+ return retv, nil
+}
+
+func intToString(inputNum int64) string {
+ return strconv.FormatInt(inputNum, 10)
+}
+
+func boolToString(inputBool bool) string {
+ return strconv.FormatBool(inputBool)
+}
+
+func uIntToString(inputNum uint64) string {
+ return strconv.FormatUint(inputNum, 10)
+}
+
+func floatToString(inputNum float64) string {
+ return strconv.FormatFloat(inputNum, 'f', 6, 64)
+}
+
+// SampleConfig get config
+func (w *Warp10) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description get description
+func (w *Warp10) Description() string {
+ return "Write metrics to Warp 10"
+}
+
+// Close close
+func (w *Warp10) Close() error {
+ return nil
+}
+
+// Init Warp10 struct
+func (w *Warp10) Init() error {
+ if w.MaxStringErrorSize <= 0 {
+ w.MaxStringErrorSize = 511
+ }
+ return nil
+}
+
+func init() {
+ outputs.Add("warp10", func() telegraf.Output {
+ return &Warp10{}
+ })
+}
+
+// HandleError read http error body and return a corresponding error
+func (w *Warp10) HandleError(body string, maxStringSize int) string {
+ if body == "" {
+ return "Empty return"
+ }
+
+ if strings.Contains(body, "Invalid token") {
+ return "Invalid token"
+ }
+
+ if strings.Contains(body, "Write token missing") {
+ return "Write token missing"
+ }
+
+ if strings.Contains(body, "Token Expired") {
+ return "Token Expired"
+ }
+
+ if strings.Contains(body, "Token revoked") {
+ return "Token revoked"
+ }
+
+ if strings.Contains(body, "exceed your Monthly Active Data Streams limit") || strings.Contains(body, "exceed the Monthly Active Data Streams limit") {
+ return "Exceeded Monthly Active Data Streams limit"
+ }
+
+ if strings.Contains(body, "Daily Data Points limit being already exceeded") {
+ return "Exceeded Daily Data Points limit"
+ }
+
+ if strings.Contains(body, "Application suspended or closed") {
+ return "Application suspended or closed"
+ }
+
+ if strings.Contains(body, "broken pipe") {
+ return "broken pipe"
+ }
+
+ if len(body) < maxStringSize {
+ return body
+ }
+ return body[0:maxStringSize]
+}
diff --git a/plugins/outputs/warp10/warp10_test.go b/plugins/outputs/warp10/warp10_test.go
new file mode 100644
index 0000000000000..5b543b34c0d8b
--- /dev/null
+++ b/plugins/outputs/warp10/warp10_test.go
@@ -0,0 +1,105 @@
+package warp10
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+type ErrorTest struct {
+ Message string
+ Expected string
+}
+
+func TestWriteWarp10(t *testing.T) {
+ w := Warp10{
+ Prefix: "unit.test",
+ WarpURL: "http://localhost:8090",
+ Token: "WRITE",
+ }
+
+ payload := w.GenWarp10Payload(testutil.MockMetrics())
+ require.Exactly(t, "1257894000000000// unit.testtest1.value{source=telegraf,tag1=value1} 1.000000\n", payload)
+}
+
+func TestHandleWarp10Error(t *testing.T) {
+ w := Warp10{
+ Prefix: "unit.test",
+ WarpURL: "http://localhost:8090",
+ Token: "WRITE",
+ }
+ tests := [...]*ErrorTest{
+ {
+ Message: `
+
+
+
+ Error 500 io.warp10.script.WarpScriptException: Invalid token.
+
+ HTTP ERROR 500
+ Problem accessing /api/v0/update. Reason:
+
io.warp10.script.WarpScriptException: Invalid token.
+
+
+ `,
+ Expected: fmt.Sprintf("Invalid token"),
+ },
+ {
+ Message: `
+
+
+
+ Error 500 io.warp10.script.WarpScriptException: Token Expired.
+
+ HTTP ERROR 500
+ Problem accessing /api/v0/update. Reason:
+
io.warp10.script.WarpScriptException: Token Expired.
+
+
+ `,
+ Expected: fmt.Sprintf("Token Expired"),
+ },
+ {
+ Message: `
+
+
+
+ Error 500 io.warp10.script.WarpScriptException: Token revoked.
+
+ HTTP ERROR 500
+ Problem accessing /api/v0/update. Reason:
+
io.warp10.script.WarpScriptException: Token revoked.
+
+
+ `,
+ Expected: fmt.Sprintf("Token revoked"),
+ },
+ {
+ Message: `
+
+
+
+ Error 500 io.warp10.script.WarpScriptException: Write token missing.
+
+ HTTP ERROR 500
+ Problem accessing /api/v0/update. Reason:
+
io.warp10.script.WarpScriptException: Write token missing.
+
+
+ `,
+ Expected: "Write token missing",
+ },
+ {
+ Message: `Error 503: server unavailable `,
+ Expected: "Error 503: server unavailable ",
+ },
+ }
+
+ for _, handledError := range tests {
+ payload := w.HandleError(handledError.Message, 511)
+ require.Exactly(t, handledError.Expected, payload)
+ }
+
+}
diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md
index 71a760900e58a..2daca328cd577 100644
--- a/plugins/outputs/wavefront/README.md
+++ b/plugins/outputs/wavefront/README.md
@@ -33,7 +33,7 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro
#convert_paths = true
## Use Strict rules to sanitize metric and tag names from invalid characters
- ## When enabled forward slash (/) and comma (,) will be accpeted
+ ## When enabled forward slash (/) and comma (,) will be accepted
#use_strict = false
## Use Regex to sanitize metric and tag names from invalid characters
@@ -45,6 +45,10 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro
## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
#convert_bool = true
+
+ ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any
+ ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility.
+ #truncate_tags = false
```
@@ -71,7 +75,7 @@ source of the metric.
### Wavefront Data format
The expected input for Wavefront is specified in the following way:
```
- [] = [tagk1=tagv1 ...tagkN=tagvN]
+ [] = [tagk1=tagv1 ...tagkN=tagvN]
```
More information about the Wavefront data format is available [here](https://community.wavefront.com/docs/DOC-1031)
diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go
index 65666d627cacc..523549fb127e2 100644
--- a/plugins/outputs/wavefront/wavefront.go
+++ b/plugins/outputs/wavefront/wavefront.go
@@ -2,7 +2,6 @@ package wavefront
import (
"fmt"
- "log"
"regexp"
"strings"
@@ -11,6 +10,8 @@ import (
wavefront "github.com/wavefronthq/wavefront-sdk-go/senders"
)
+const maxTagLength = 254
+
type Wavefront struct {
Url string
Token string
@@ -23,10 +24,12 @@ type Wavefront struct {
ConvertBool bool
UseRegex bool
UseStrict bool
+ TruncateTags bool
SourceOverride []string
StringToNumber map[string][]map[string]float64
sender wavefront.Sender
+ Log telegraf.Logger
}
// catch many of the invalid chars that could appear in a metric or tag name
@@ -81,7 +84,7 @@ var sampleConfig = `
#convert_paths = true
## Use Strict rules to sanitize metric and tag names from invalid characters
- ## When enabled forward slash (/) and comma (,) will be accpeted
+ ## When enabled forward slash (/) and comma (,) will be accepted
#use_strict = false
## Use Regex to sanitize metric and tag names from invalid characters
@@ -94,6 +97,10 @@ var sampleConfig = `
## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
#convert_bool = true
+ ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any
+ ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility.
+ #truncate_tags = false
+
## Define a mapping, namespaced by metric prefix, from string values to numeric values
## deprecated in 1.9; use the enum processor plugin
#[[outputs.wavefront.string_to_number.elasticsearch]]
@@ -113,11 +120,11 @@ type MetricPoint struct {
func (w *Wavefront) Connect() error {
if len(w.StringToNumber) > 0 {
- log.Print("W! [outputs.wavefront] The string_to_number option is deprecated; please use the enum processor instead")
+ w.Log.Warn("The string_to_number option is deprecated; please use the enum processor instead")
}
if w.Url != "" {
- log.Printf("D! [outputs.wavefront] connecting over http/https using Url: %s", w.Url)
+ w.Log.Debug("connecting over http/https using Url: %s", w.Url)
sender, err := wavefront.NewDirectSender(&wavefront.DirectConfiguration{
Server: w.Url,
Token: w.Token,
@@ -128,14 +135,14 @@ func (w *Wavefront) Connect() error {
}
w.sender = sender
} else {
- log.Printf("D! Output [wavefront] connecting over tcp using Host: %s and Port: %d", w.Host, w.Port)
+ w.Log.Debugf("connecting over tcp using Host: %q and Port: %d", w.Host, w.Port)
sender, err := wavefront.NewProxySender(&wavefront.ProxyConfiguration{
Host: w.Host,
MetricsPort: w.Port,
FlushIntervalSeconds: 5,
})
if err != nil {
- return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Host: %s and Port: %d", w.Host, w.Port)
+ return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Host: %q and Port: %d", w.Host, w.Port)
}
w.sender = sender
}
@@ -152,18 +159,17 @@ func (w *Wavefront) Connect() error {
func (w *Wavefront) Write(metrics []telegraf.Metric) error {
for _, m := range metrics {
- for _, point := range buildMetrics(m, w) {
+ for _, point := range w.buildMetrics(m) {
err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags)
if err != nil {
return fmt.Errorf("Wavefront sending error: %s", err.Error())
}
}
}
-
return nil
}
-func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint {
+func (w *Wavefront) buildMetrics(m telegraf.Metric) []*MetricPoint {
ret := []*MetricPoint{}
for fieldName, value := range m.Fields() {
@@ -193,12 +199,12 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint {
metricValue, buildError := buildValue(value, metric.Metric, w)
if buildError != nil {
- log.Printf("D! [outputs.wavefront] %s\n", buildError.Error())
+ w.Log.Debugf("Error building tags: %s\n", buildError.Error())
continue
}
metric.Value = metricValue
- source, tags := buildTags(m.Tags(), w)
+ source, tags := w.buildTags(m.Tags())
metric.Source = source
metric.Tags = tags
@@ -207,7 +213,7 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint {
return ret
}
-func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string) {
+func (w *Wavefront) buildTags(mTags map[string]string) (string, map[string]string) {
// Remove all empty tags.
for k, v := range mTags {
@@ -259,6 +265,16 @@ func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string
key = sanitizedChars.Replace(k)
}
val := tagValueReplacer.Replace(v)
+ if w.TruncateTags {
+ if len(key) > maxTagLength {
+ w.Log.Warnf("Tag key length > 254. Skipping tag: %s", key)
+ continue
+ }
+ if len(key)+len(val) > maxTagLength {
+ w.Log.Debugf("Key+value length > 254: %s", key)
+ val = val[:maxTagLength-len(key)]
+ }
+ }
tags[key] = val
}
@@ -296,7 +312,6 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) {
default:
return 0, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name)
}
-
return 0, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name)
}
@@ -320,6 +335,7 @@ func init() {
MetricSeparator: ".",
ConvertPaths: true,
ConvertBool: true,
+ TruncateTags: false,
}
})
}
diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go
index 776c3698f8cdd..40707e6d6c8b0 100644
--- a/plugins/outputs/wavefront/wavefront_test.go
+++ b/plugins/outputs/wavefront/wavefront_test.go
@@ -4,6 +4,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
"reflect"
"strings"
"testing"
@@ -21,6 +22,7 @@ func defaultWavefront() *Wavefront {
ConvertPaths: true,
ConvertBool: true,
UseRegex: false,
+ Log: testutil.Logger{},
}
}
@@ -64,7 +66,7 @@ func TestBuildMetrics(t *testing.T) {
}
for _, mt := range metricTests {
- ml := buildMetrics(mt.metric, w)
+ ml := w.buildMetrics(mt.metric)
for i, line := range ml {
if mt.metricPoints[i].Metric != line.Metric || mt.metricPoints[i].Value != line.Value {
t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricPoints[i].Metric, mt.metricPoints[i].Value, line.Metric, line.Value)
@@ -104,7 +106,7 @@ func TestBuildMetricsStrict(t *testing.T) {
}
for _, mt := range metricTests {
- ml := buildMetrics(mt.metric, w)
+ ml := w.buildMetrics(mt.metric)
for i, line := range ml {
if mt.metricPoints[i].Metric != line.Metric || mt.metricPoints[i].Value != line.Value {
t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricPoints[i].Metric, mt.metricPoints[i].Value, line.Metric, line.Value)
@@ -143,7 +145,7 @@ func TestBuildMetricsWithSimpleFields(t *testing.T) {
}
for _, mt := range metricTests {
- ml := buildMetrics(mt.metric, w)
+ ml := w.buildMetrics(mt.metric)
for i, line := range ml {
if mt.metricLines[i].Metric != line.Metric || mt.metricLines[i].Value != line.Value {
t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricLines[i].Metric, mt.metricLines[i].Value, line.Metric, line.Value)
@@ -195,7 +197,7 @@ func TestBuildTags(t *testing.T) {
}
for _, tt := range tagtests {
- source, tags := buildTags(tt.ptIn, w)
+ source, tags := w.buildTags(tt.ptIn)
if source != tt.outSource {
t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outSource, source)
}
@@ -247,7 +249,7 @@ func TestBuildTagsWithSource(t *testing.T) {
}
for _, tt := range tagtests {
- source, tags := buildTags(tt.ptIn, w)
+ source, tags := w.buildTags(tt.ptIn)
if source != tt.outSource {
t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outSource, source)
}
@@ -316,6 +318,42 @@ func TestBuildValueString(t *testing.T) {
}
+func TestTagLimits(t *testing.T) {
+ w := defaultWavefront()
+ w.TruncateTags = true
+
+ // Should fail (all tags skipped)
+ template := make(map[string]string)
+ template[strings.Repeat("x", 255)] = "whatever"
+ _, tags := w.buildTags(template)
+ require.Empty(t, tags, "All tags should have been skipped")
+
+ // Should truncate value
+ template = make(map[string]string)
+ longKey := strings.Repeat("x", 253)
+ template[longKey] = "whatever"
+ _, tags = w.buildTags(template)
+ require.Contains(t, tags, longKey, "Should contain truncated long key")
+ require.Equal(t, "w", tags[longKey])
+
+ // Should not truncate
+ template = make(map[string]string)
+ longKey = strings.Repeat("x", 251)
+ template[longKey] = "Hi!"
+ _, tags = w.buildTags(template)
+ require.Contains(t, tags, longKey, "Should contain non truncated long key")
+ require.Equal(t, "Hi!", tags[longKey])
+
+ // Turn off truncating and make sure it leaves the tags intact
+ w.TruncateTags = false
+ template = make(map[string]string)
+ longKey = strings.Repeat("x", 255)
+ template[longKey] = longKey
+ _, tags = w.buildTags(template)
+ require.Contains(t, tags, longKey, "Should contain non truncated long key")
+ require.Equal(t, longKey, tags[longKey])
+}
+
// Benchmarks to test performance of string replacement via Regex and Replacer
var testString = "this_is*my!test/string\\for=replacement"
diff --git a/plugins/parsers/collectd/README.md b/plugins/parsers/collectd/README.md
index 06f14d6d4bb5a..cc7daa4f6af42 100644
--- a/plugins/parsers/collectd/README.md
+++ b/plugins/parsers/collectd/README.md
@@ -20,8 +20,8 @@ You can also change the path to the typesdb or add additional typesdb using
### Configuration
```toml
-[[inputs.file]]
- files = ["example"]
+[[inputs.socket_listener]]
+ service_address = "udp://:25826"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
@@ -42,3 +42,16 @@ You can also change the path to the typesdb or add additional typesdb using
## "split" is the default behavior for backward compatability with previous versions of influxdb.
collectd_parse_multivalue = "split"
```
+
+### Example Output
+
+```
+memory,type=memory,type_instance=buffered value=2520051712 1560455990829955922
+memory,type=memory,type_instance=used value=3710791680 1560455990829955922
+memory,type=memory,type_instance=buffered value=2520047616 1560455980830417318
+memory,type=memory,type_instance=cached value=9472626688 1560455980830417318
+memory,type=memory,type_instance=slab_recl value=2088894464 1560455980830417318
+memory,type=memory,type_instance=slab_unrecl value=146984960 1560455980830417318
+memory,type=memory,type_instance=free value=2978258944 1560455980830417318
+memory,type=memory,type_instance=used value=3707047936 1560455980830417318
+```
diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go
index 42a4d4c7a567f..a218341569e23 100644
--- a/plugins/parsers/collectd/parser_test.go
+++ b/plugins/parsers/collectd/parser_test.go
@@ -110,7 +110,7 @@ var multiMetric = testCase{
func TestNewCollectdParser(t *testing.T) {
parser, err := NewCollectdParser("", "", []string{}, "join")
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, parser.popts.SecurityLevel, network.None)
require.NotNil(t, parser.popts.PasswordLookup)
require.Nil(t, parser.popts.TypesDB)
@@ -121,14 +121,14 @@ func TestParse(t *testing.T) {
for _, tc := range cases {
buf, err := writeValueList(tc.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
bytes, err := buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
parser := &CollectdParser{}
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err := parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
assertEqualMetrics(t, tc.expected, metrics)
}
@@ -136,30 +136,30 @@ func TestParse(t *testing.T) {
func TestParseMultiValueSplit(t *testing.T) {
buf, err := writeValueList(multiMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
bytes, err := buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
parser := &CollectdParser{ParseMultiValue: "split"}
metrics, err := parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
assert.Equal(t, 2, len(metrics))
}
func TestParse_DefaultTags(t *testing.T) {
buf, err := writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
bytes, err := buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
parser := &CollectdParser{}
parser.SetDefaultTags(map[string]string{
"foo": "bar",
})
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err := parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, "bar", metrics[0].Tags()["foo"])
}
@@ -178,45 +178,45 @@ func TestParse_SignSecurityLevel(t *testing.T) {
// Signed data
buf, err := writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
buf.Sign("user0", "bar")
bytes, err := buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err := parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
assertEqualMetrics(t, singleMetric.expected, metrics)
// Encrypted data
buf, err = writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
buf.Encrypt("user0", "bar")
bytes, err = buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err = parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
assertEqualMetrics(t, singleMetric.expected, metrics)
// Plain text data skipped
buf, err = writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
bytes, err = buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err = parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, []telegraf.Metric{}, metrics)
// Wrong password error
buf, err = writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
buf.Sign("x", "y")
bytes, err = buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err = parser.Parse(bytes)
- require.NotNil(t, err)
+ require.Error(t, err)
}
func TestParse_EncryptSecurityLevel(t *testing.T) {
@@ -233,57 +233,57 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
// Signed data skipped
buf, err := writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
buf.Sign("user0", "bar")
bytes, err := buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err := parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, []telegraf.Metric{}, metrics)
// Encrypted data
buf, err = writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
buf.Encrypt("user0", "bar")
bytes, err = buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err = parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
assertEqualMetrics(t, singleMetric.expected, metrics)
// Plain text data skipped
buf, err = writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
bytes, err = buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err = parser.Parse(bytes)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, []telegraf.Metric{}, metrics)
// Wrong password error
buf, err = writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
buf.Sign("x", "y")
bytes, err = buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
metrics, err = parser.Parse(bytes)
- require.NotNil(t, err)
+ require.Error(t, err)
}
func TestParseLine(t *testing.T) {
buf, err := writeValueList(singleMetric.vl)
- require.Nil(t, err)
+ require.NoError(t, err)
bytes, err := buf.Bytes()
- require.Nil(t, err)
+ require.NoError(t, err)
parser, err := NewCollectdParser("", "", []string{}, "split")
- require.Nil(t, err)
+ require.NoError(t, err)
metric, err := parser.ParseLine(string(bytes))
- require.Nil(t, err)
+ require.NoError(t, err)
assertEqualMetrics(t, singleMetric.expected, []telegraf.Metric{metric})
}
diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md
index ec1ffa1cac4e9..b44d2fc2d2576 100644
--- a/plugins/parsers/csv/README.md
+++ b/plugins/parsers/csv/README.md
@@ -29,6 +29,7 @@ values.
## For assigning explicit data types to columns.
## Supported types: "int", "float", "bool", "string".
+ ## Specify types in order by column (e.g. `["string", "int", "float"]`)
## If this is not specified, type conversion will be done on the types above.
csv_column_types = []
@@ -39,7 +40,7 @@ values.
## These columns will be skipped in the header as well.
csv_skip_columns = 0
- ## The seperator between csv fields
+ ## The separator between csv fields
## By default, the parser assumes a comma (",")
csv_delimiter = ","
@@ -55,16 +56,23 @@ values.
## will be added as fields.
csv_tag_columns = []
- ## The column to extract the name of the metric from
+ ## The column to extract the name of the metric from. Will not be
+ ## included as field in metric.
csv_measurement_column = ""
## The column to extract time information for the metric
- ## `csv_timestamp_format` must be specified if this is used
+ ## `csv_timestamp_format` must be specified if this is used.
+ ## Will not be included as field in metric.
csv_timestamp_column = ""
## The format of time data extracted from `csv_timestamp_column`
## this must be specified if `csv_timestamp_column` is specified
csv_timestamp_format = ""
+
+ ## The timezone of time data extracted from `csv_timestamp_column`
+ ## in case of there is no timezone information.
+ ## It follows the IANA Time Zone database.
+ csv_timezone = ""
```
#### csv_timestamp_column, csv_timestamp_format
@@ -86,10 +94,13 @@ on how to set the time format.
One metric is created for each row with the columns added as fields. The type
of the field is automatically determined based on the contents of the value.
+In addition to the options above, you can use [metric filtering][] to skip over
+columns and rows.
+
### Examples
Config:
-```
+```toml
[[inputs.file]]
files = ["example"]
data_format = "csv"
@@ -108,3 +119,5 @@ Output:
```
cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000
```
+
+[metric filtering]: /docs/CONFIGURATION.md#metric-filtering
diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go
index f8bf93e702536..76d8306ea6e46 100644
--- a/plugins/parsers/csv/parser.go
+++ b/plugins/parsers/csv/parser.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/csv"
"fmt"
+ "io"
"strconv"
"strings"
"time"
@@ -13,29 +14,68 @@ import (
"github.com/influxdata/telegraf/metric"
)
+type TimeFunc func() time.Time
+
+type Config struct {
+ ColumnNames []string `toml:"csv_column_names"`
+ ColumnTypes []string `toml:"csv_column_types"`
+ Comment string `toml:"csv_comment"`
+ Delimiter string `toml:"csv_delimiter"`
+ HeaderRowCount int `toml:"csv_header_row_count"`
+ MeasurementColumn string `toml:"csv_measurement_column"`
+ MetricName string `toml:"metric_name"`
+ SkipColumns int `toml:"csv_skip_columns"`
+ SkipRows int `toml:"csv_skip_rows"`
+ TagColumns []string `toml:"csv_tag_columns"`
+ TimestampColumn string `toml:"csv_timestamp_column"`
+ TimestampFormat string `toml:"csv_timestamp_format"`
+ Timezone string `toml:"csv_timezone"`
+ TrimSpace bool `toml:"csv_trim_space"`
+
+ TimeFunc func() time.Time
+ DefaultTags map[string]string
+}
+
+// Parser is a CSV parser, you should use NewParser to create a new instance.
type Parser struct {
- MetricName string
- HeaderRowCount int
- SkipRows int
- SkipColumns int
- Delimiter string
- Comment string
- TrimSpace bool
- ColumnNames []string
- ColumnTypes []string
- TagColumns []string
- MeasurementColumn string
- TimestampColumn string
- TimestampFormat string
- DefaultTags map[string]string
- TimeFunc func() time.Time
+ *Config
+}
+
+func NewParser(c *Config) (*Parser, error) {
+ if c.HeaderRowCount == 0 && len(c.ColumnNames) == 0 {
+ return nil, fmt.Errorf("`csv_header_row_count` must be defined if `csv_column_names` is not specified")
+ }
+
+ if c.Delimiter != "" {
+ runeStr := []rune(c.Delimiter)
+ if len(runeStr) > 1 {
+ return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", c.Delimiter)
+ }
+ }
+
+ if c.Comment != "" {
+ runeStr := []rune(c.Comment)
+ if len(runeStr) > 1 {
+ return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", c.Comment)
+ }
+ }
+
+ if len(c.ColumnNames) > 0 && len(c.ColumnTypes) > 0 && len(c.ColumnNames) != len(c.ColumnTypes) {
+ return nil, fmt.Errorf("csv_column_names field count doesn't match with csv_column_types")
+ }
+
+ if c.TimeFunc == nil {
+ c.TimeFunc = time.Now
+ }
+
+ return &Parser{Config: c}, nil
}
-func (p *Parser) SetTimeFunc(fn metric.TimeFunc) {
+func (p *Parser) SetTimeFunc(fn TimeFunc) {
p.TimeFunc = fn
}
-func (p *Parser) compile(r *bytes.Reader) (*csv.Reader, error) {
+func (p *Parser) compile(r io.Reader) (*csv.Reader, error) {
csvReader := csv.NewReader(r)
// ensures that the reader reads records of different lengths without an error
csvReader.FieldsPerRecord = -1
@@ -45,6 +85,7 @@ func (p *Parser) compile(r *bytes.Reader) (*csv.Reader, error) {
if p.Comment != "" {
csvReader.Comment = []rune(p.Comment)[0]
}
+ csvReader.TrimLeadingSpace = p.TrimSpace
return csvReader, nil
}
@@ -56,7 +97,10 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
}
// skip first rows
for i := 0; i < p.SkipRows; i++ {
- csvReader.Read()
+ _, err := csvReader.Read()
+ if err != nil {
+ return nil, err
+ }
}
// if there is a header and nothing in DataColumns
// set DataColumns to names extracted from the header
@@ -84,7 +128,10 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
} else {
// if columns are named, just skip header rows
for i := 0; i < p.HeaderRowCount; i++ {
- csvReader.Read()
+ _, err := csvReader.Read()
+ if err != nil {
+ return nil, err
+ }
}
}
@@ -204,15 +251,21 @@ outer:
// will default to plugin name
measurementName := p.MetricName
- if recordFields[p.MeasurementColumn] != nil && recordFields[p.MeasurementColumn] != "" {
- measurementName = fmt.Sprintf("%v", recordFields[p.MeasurementColumn])
+ if p.MeasurementColumn != "" {
+ if recordFields[p.MeasurementColumn] != nil && recordFields[p.MeasurementColumn] != "" {
+ measurementName = fmt.Sprintf("%v", recordFields[p.MeasurementColumn])
+ }
}
- metricTime, err := parseTimestamp(p.TimeFunc, recordFields, p.TimestampColumn, p.TimestampFormat)
+ metricTime, err := parseTimestamp(p.TimeFunc, recordFields, p.TimestampColumn, p.TimestampFormat, p.Timezone)
if err != nil {
return nil, err
}
+ // Exclude `TimestampColumn` and `MeasurementColumn`
+ delete(recordFields, p.TimestampColumn)
+ delete(recordFields, p.MeasurementColumn)
+
m, err := metric.New(measurementName, tags, recordFields, metricTime)
if err != nil {
return nil, err
@@ -224,30 +277,26 @@ outer:
// will be the current timestamp, else it will try to parse the time according
// to the format.
func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface{},
- timestampColumn, timestampFormat string,
-) (metricTime time.Time, err error) {
- metricTime = timeFunc()
-
+ timestampColumn, timestampFormat string, Timezone string,
+) (time.Time, error) {
if timestampColumn != "" {
if recordFields[timestampColumn] == nil {
- err = fmt.Errorf("timestamp column: %v could not be found", timestampColumn)
- return
+ return time.Time{}, fmt.Errorf("timestamp column: %v could not be found", timestampColumn)
}
- tStr := fmt.Sprintf("%v", recordFields[timestampColumn])
-
switch timestampFormat {
case "":
- err = fmt.Errorf("timestamp format must be specified")
- return
+ return time.Time{}, fmt.Errorf("timestamp format must be specified")
default:
- metricTime, err = internal.ParseTimestamp(tStr, timestampFormat)
+ metricTime, err := internal.ParseTimestamp(timestampFormat, recordFields[timestampColumn], Timezone)
if err != nil {
- return
+ return time.Time{}, err
}
+ return metricTime, err
}
}
- return
+
+ return timeFunc(), nil
}
// SetDefaultTags set the DefaultTags
diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go
index 93ae6bcdd168f..c0f489365eb75 100644
--- a/plugins/parsers/csv/parser_test.go
+++ b/plugins/parsers/csv/parser_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
@@ -15,22 +16,28 @@ var DefaultTime = func() time.Time {
}
func TestBasicCSV(t *testing.T) {
- p := Parser{
- ColumnNames: []string{"first", "second", "third"},
- TagColumns: []string{"third"},
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ ColumnNames: []string{"first", "second", "third"},
+ TagColumns: []string{"third"},
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
- _, err := p.ParseLine("1.4,true,hi")
+ _, err = p.ParseLine("1.4,true,hi")
require.NoError(t, err)
}
func TestHeaderConcatenationCSV(t *testing.T) {
- p := Parser{
- HeaderRowCount: 2,
- MeasurementColumn: "3",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 2,
+ MeasurementColumn: "3",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `first,second
1,2,3
3.4,70,test_name`
@@ -41,12 +48,15 @@ func TestHeaderConcatenationCSV(t *testing.T) {
}
func TestHeaderOverride(t *testing.T) {
- p := Parser{
- HeaderRowCount: 1,
- ColumnNames: []string{"first", "second", "third"},
- MeasurementColumn: "third",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ ColumnNames: []string{"first", "second", "third"},
+ MeasurementColumn: "third",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `line1,line2,line3
3.4,70,test_name`
metrics, err := p.Parse([]byte(testCSV))
@@ -55,14 +65,16 @@ func TestHeaderOverride(t *testing.T) {
}
func TestTimestamp(t *testing.T) {
- p := Parser{
- HeaderRowCount: 1,
- ColumnNames: []string{"first", "second", "third"},
- MeasurementColumn: "third",
- TimestampColumn: "first",
- TimestampFormat: "02/01/06 03:04:05 PM",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ ColumnNames: []string{"first", "second", "third"},
+ MeasurementColumn: "third",
+ TimestampColumn: "first",
+ TimestampFormat: "02/01/06 03:04:05 PM",
+ TimeFunc: DefaultTime,
+ },
+ )
testCSV := `line1,line2,line3
23/05/09 04:05:06 PM,70,test_name
07/11/09 04:05:06 PM,80,test_name2`
@@ -74,29 +86,35 @@ func TestTimestamp(t *testing.T) {
}
func TestTimestampError(t *testing.T) {
- p := Parser{
- HeaderRowCount: 1,
- ColumnNames: []string{"first", "second", "third"},
- MeasurementColumn: "third",
- TimestampColumn: "first",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ ColumnNames: []string{"first", "second", "third"},
+ MeasurementColumn: "third",
+ TimestampColumn: "first",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `line1,line2,line3
23/05/09 04:05:06 PM,70,test_name
07/11/09 04:05:06 PM,80,test_name2`
- _, err := p.Parse([]byte(testCSV))
+ _, err = p.Parse([]byte(testCSV))
require.Equal(t, fmt.Errorf("timestamp format must be specified"), err)
}
func TestTimestampUnixFormat(t *testing.T) {
- p := Parser{
- HeaderRowCount: 1,
- ColumnNames: []string{"first", "second", "third"},
- MeasurementColumn: "third",
- TimestampColumn: "first",
- TimestampFormat: "unix",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ ColumnNames: []string{"first", "second", "third"},
+ MeasurementColumn: "third",
+ TimestampColumn: "first",
+ TimestampFormat: "unix",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `line1,line2,line3
1243094706,70,test_name
1257609906,80,test_name2`
@@ -107,14 +125,17 @@ func TestTimestampUnixFormat(t *testing.T) {
}
func TestTimestampUnixMSFormat(t *testing.T) {
- p := Parser{
- HeaderRowCount: 1,
- ColumnNames: []string{"first", "second", "third"},
- MeasurementColumn: "third",
- TimestampColumn: "first",
- TimestampFormat: "unix_ms",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ ColumnNames: []string{"first", "second", "third"},
+ MeasurementColumn: "third",
+ TimestampColumn: "first",
+ TimestampFormat: "unix_ms",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `line1,line2,line3
1243094706123,70,test_name
1257609906123,80,test_name2`
@@ -125,12 +146,15 @@ func TestTimestampUnixMSFormat(t *testing.T) {
}
func TestQuotedCharacter(t *testing.T) {
- p := Parser{
- HeaderRowCount: 1,
- ColumnNames: []string{"first", "second", "third"},
- MeasurementColumn: "third",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ ColumnNames: []string{"first", "second", "third"},
+ MeasurementColumn: "third",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `line1,line2,line3
"3,4",70,test_name`
@@ -140,13 +164,16 @@ func TestQuotedCharacter(t *testing.T) {
}
func TestDelimiter(t *testing.T) {
- p := Parser{
- HeaderRowCount: 1,
- Delimiter: "%",
- ColumnNames: []string{"first", "second", "third"},
- MeasurementColumn: "third",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ Delimiter: "%",
+ ColumnNames: []string{"first", "second", "third"},
+ MeasurementColumn: "third",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `line1%line2%line3
3,4%70%test_name`
@@ -156,13 +183,16 @@ func TestDelimiter(t *testing.T) {
}
func TestValueConversion(t *testing.T) {
- p := Parser{
- HeaderRowCount: 0,
- Delimiter: ",",
- ColumnNames: []string{"first", "second", "third", "fourth"},
- MetricName: "test_value",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 0,
+ Delimiter: ",",
+ ColumnNames: []string{"first", "second", "third", "fourth"},
+ MetricName: "test_value",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `3.3,4,true,hello`
expectedTags := make(map[string]string)
@@ -198,13 +228,16 @@ func TestValueConversion(t *testing.T) {
}
func TestSkipComment(t *testing.T) {
- p := Parser{
- HeaderRowCount: 0,
- Comment: "#",
- ColumnNames: []string{"first", "second", "third", "fourth"},
- MetricName: "test_value",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 0,
+ Comment: "#",
+ ColumnNames: []string{"first", "second", "third", "fourth"},
+ MetricName: "test_value",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `#3.3,4,true,hello
4,9.9,true,name_this`
@@ -221,13 +254,16 @@ func TestSkipComment(t *testing.T) {
}
func TestTrimSpace(t *testing.T) {
- p := Parser{
- HeaderRowCount: 0,
- TrimSpace: true,
- ColumnNames: []string{"first", "second", "third", "fourth"},
- MetricName: "test_value",
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 0,
+ TrimSpace: true,
+ ColumnNames: []string{"first", "second", "third", "fourth"},
+ MetricName: "test_value",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := ` 3.3, 4, true,hello`
expectedFields := map[string]interface{}{
@@ -242,33 +278,65 @@ func TestTrimSpace(t *testing.T) {
require.Equal(t, expectedFields, metrics[0].Fields())
}
-func TestSkipRows(t *testing.T) {
- p := Parser{
- HeaderRowCount: 1,
- SkipRows: 1,
- TagColumns: []string{"line1"},
- MeasurementColumn: "line3",
- TimeFunc: DefaultTime,
+func TestTrimSpaceDelimitedBySpace(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ Delimiter: " ",
+ HeaderRowCount: 1,
+ TrimSpace: true,
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
+ testCSV := ` first second third fourth
+abcdefgh 0 2 false
+ abcdef 3.3 4 true
+ f 0 2 false`
+
+ expectedFields := map[string]interface{}{
+ "first": "abcdef",
+ "second": 3.3,
+ "third": int64(4),
+ "fourth": true,
}
+
+ metrics, err := p.Parse([]byte(testCSV))
+ require.NoError(t, err)
+ require.Equal(t, expectedFields, metrics[1].Fields())
+}
+
+func TestSkipRows(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ SkipRows: 1,
+ TagColumns: []string{"line1"},
+ MeasurementColumn: "line3",
+ TimeFunc: DefaultTime,
+ },
+ )
testCSV := `garbage nonsense
line1,line2,line3
hello,80,test_name2`
expectedFields := map[string]interface{}{
"line2": int64(80),
- "line3": "test_name2",
}
metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err)
+ require.Equal(t, "test_name2", metrics[0].Name())
require.Equal(t, expectedFields, metrics[0].Fields())
}
func TestSkipColumns(t *testing.T) {
- p := Parser{
- SkipColumns: 1,
- ColumnNames: []string{"line1", "line2"},
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ SkipColumns: 1,
+ ColumnNames: []string{"line1", "line2"},
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `hello,80,test_name`
expectedFields := map[string]interface{}{
@@ -281,11 +349,14 @@ func TestSkipColumns(t *testing.T) {
}
func TestSkipColumnsWithHeader(t *testing.T) {
- p := Parser{
- SkipColumns: 1,
- HeaderRowCount: 2,
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ SkipColumns: 1,
+ HeaderRowCount: 2,
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
testCSV := `col,col,col
1,2,3
trash,80,test_name`
@@ -297,11 +368,14 @@ func TestSkipColumnsWithHeader(t *testing.T) {
}
func TestParseStream(t *testing.T) {
- p := Parser{
- MetricName: "csv",
- HeaderRowCount: 1,
- TimeFunc: DefaultTime,
- }
+ p, err := NewParser(
+ &Config{
+ MetricName: "csv",
+ HeaderRowCount: 1,
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
csvHeader := "a,b,c"
csvBody := "1,2,3"
@@ -322,3 +396,200 @@ func TestParseStream(t *testing.T) {
DefaultTime(),
), metric)
}
+
+func TestTimestampUnixFloatPrecision(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ MetricName: "csv",
+ ColumnNames: []string{"time", "value"},
+ TimestampColumn: "time",
+ TimestampFormat: "unix",
+ TimeFunc: DefaultTime,
+ },
+ )
+ require.NoError(t, err)
+ data := `1551129661.95456123352050781250,42`
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "csv",
+ map[string]string{},
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Unix(1551129661, 954561233),
+ ),
+ }
+
+ metrics, err := p.Parse([]byte(data))
+ require.NoError(t, err)
+ testutil.RequireMetricsEqual(t, expected, metrics)
+}
+
+func TestSkipMeasurementColumn(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ MetricName: "csv",
+ HeaderRowCount: 1,
+ TimestampColumn: "timestamp",
+ TimestampFormat: "unix",
+ TimeFunc: DefaultTime,
+ TrimSpace: true,
+ },
+ )
+ require.NoError(t, err)
+ data := `id,value,timestamp
+ 1,5,1551129661.954561233`
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "csv",
+ map[string]string{},
+ map[string]interface{}{
+ "id": 1,
+ "value": 5,
+ },
+ time.Unix(1551129661, 954561233),
+ ),
+ }
+
+ metrics, err := p.Parse([]byte(data))
+ require.NoError(t, err)
+ testutil.RequireMetricsEqual(t, expected, metrics)
+}
+
+func TestSkipTimestampColumn(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ MetricName: "csv",
+ HeaderRowCount: 1,
+ TimestampColumn: "timestamp",
+ TimestampFormat: "unix",
+ TimeFunc: DefaultTime,
+ TrimSpace: true,
+ },
+ )
+ require.NoError(t, err)
+ data := `id,value,timestamp
+ 1,5,1551129661.954561233`
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "csv",
+ map[string]string{},
+ map[string]interface{}{
+ "id": 1,
+ "value": 5,
+ },
+ time.Unix(1551129661, 954561233),
+ ),
+ }
+
+ metrics, err := p.Parse([]byte(data))
+ require.NoError(t, err)
+ testutil.RequireMetricsEqual(t, expected, metrics)
+}
+
+func TestTimestampTimezone(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ HeaderRowCount: 1,
+ ColumnNames: []string{"first", "second", "third"},
+ MeasurementColumn: "third",
+ TimestampColumn: "first",
+ TimestampFormat: "02/01/06 03:04:05 PM",
+ TimeFunc: DefaultTime,
+ Timezone: "Asia/Jakarta",
+ },
+ )
+ require.NoError(t, err)
+ testCSV := `line1,line2,line3
+23/05/09 11:05:06 PM,70,test_name
+07/11/09 11:05:06 PM,80,test_name2`
+ metrics, err := p.Parse([]byte(testCSV))
+
+ require.NoError(t, err)
+ require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706000000000))
+ require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000))
+}
+
+func TestEmptyMeasurementName(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ MetricName: "csv",
+ HeaderRowCount: 1,
+ ColumnNames: []string{"", "b"},
+ MeasurementColumn: "",
+ },
+ )
+ require.NoError(t, err)
+ testCSV := `,b
+1,2`
+ metrics, err := p.Parse([]byte(testCSV))
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric("csv",
+ map[string]string{},
+ map[string]interface{}{
+ "b": 2,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime())
+}
+
+func TestNumericMeasurementName(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ MetricName: "csv",
+ HeaderRowCount: 1,
+ ColumnNames: []string{"a", "b"},
+ MeasurementColumn: "a",
+ },
+ )
+ require.NoError(t, err)
+ testCSV := `a,b
+1,2`
+ metrics, err := p.Parse([]byte(testCSV))
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric("1",
+ map[string]string{},
+ map[string]interface{}{
+ "b": 2,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime())
+}
+
+func TestStaticMeasurementName(t *testing.T) {
+ p, err := NewParser(
+ &Config{
+ MetricName: "csv",
+ HeaderRowCount: 1,
+ ColumnNames: []string{"a", "b"},
+ },
+ )
+ require.NoError(t, err)
+ testCSV := `a,b
+1,2`
+ metrics, err := p.Parse([]byte(testCSV))
+ require.NoError(t, err)
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric("csv",
+ map[string]string{},
+ map[string]interface{}{
+ "a": 1,
+ "b": 2,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime())
+}
diff --git a/plugins/parsers/dropwizard/README.md b/plugins/parsers/dropwizard/README.md
index f0ff6d15c7513..436518a67e110 100644
--- a/plugins/parsers/dropwizard/README.md
+++ b/plugins/parsers/dropwizard/README.md
@@ -1,6 +1,6 @@
# Dropwizard
-The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining a custom [template pattern][templates]. All field value types are supported, `string`, `number` and `boolean`.
+The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overridden by defining a custom [template pattern][templates]. All field value types are supported, `string`, `number` and `boolean`.
[templates]: /docs/TEMPLATE_PATTERN.md
[dropwizard]: http://metrics.dropwizard.io/3.1.0/manual/json/
diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go
index 95ce3bffd7bb6..d8dcc92040aa4 100644
--- a/plugins/parsers/dropwizard/parser.go
+++ b/plugins/parsers/dropwizard/parser.go
@@ -17,6 +17,8 @@ import (
var fieldEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
var keyEscaper = strings.NewReplacer(" ", "\\ ", ",", "\\,", "=", "\\=")
+type TimeFunc func() time.Time
+
// Parser parses json inputs containing dropwizard metrics,
// either top-level or embedded inside a json field.
// This parser is using gjson for retrieving paths within the json file.
@@ -48,7 +50,7 @@ type parser struct {
separator string
templateEngine *templating.Engine
- timeFunc metric.TimeFunc
+ timeFunc TimeFunc
// seriesParser parses line protocol measurement + tags
seriesParser *influx.Parser
@@ -267,6 +269,6 @@ func (p *parser) readDWMetrics(metricType string, dwms interface{}, metrics []te
return metrics
}
-func (p *parser) SetTimeFunc(f metric.TimeFunc) {
+func (p *parser) SetTimeFunc(f TimeFunc) {
p.timeFunc = f
}
diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go
index df33562dbbd8c..df75c7f252969 100644
--- a/plugins/parsers/dropwizard/parser_test.go
+++ b/plugins/parsers/dropwizard/parser_test.go
@@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/require"
)
-var TimeFunc = func() time.Time {
+var testTimeFunc = func() time.Time {
return time.Unix(0, 0)
}
@@ -528,7 +528,7 @@ func TestDropWizard(t *testing.T) {
map[string]interface{}{
"value": 42.0,
},
- TimeFunc(),
+ testTimeFunc(),
),
),
},
@@ -547,7 +547,7 @@ func TestDropWizard(t *testing.T) {
map[string]interface{}{
"value": 42.0,
},
- TimeFunc(),
+ testTimeFunc(),
),
),
},
@@ -573,7 +573,7 @@ func TestDropWizard(t *testing.T) {
map[string]interface{}{
"value": 42.0,
},
- TimeFunc(),
+ testTimeFunc(),
),
),
},
@@ -584,7 +584,7 @@ func TestDropWizard(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
parser := NewParser()
- parser.SetTimeFunc(TimeFunc)
+ parser.SetTimeFunc(testTimeFunc)
metrics, err := parser.Parse(tt.input)
tt.errFunc(t, err)
diff --git a/plugins/parsers/form_urlencoded/README.md b/plugins/parsers/form_urlencoded/README.md
new file mode 100644
index 0000000000000..e3700f44e2311
--- /dev/null
+++ b/plugins/parsers/form_urlencoded/README.md
@@ -0,0 +1,57 @@
+# Form Urlencoded
+
+
+The `form-urlencoded` data format parses `application/x-www-form-urlencoded`
+data, such as commonly used in the [query string][].
+
+A common use case is to pair it with [http_listener_v2][] input plugin to parse
+request body or query params.
+
+### Configuration
+
+```toml
+[[inputs.http_listener_v2]]
+ ## Address and port to host HTTP listener on
+ service_address = ":8080"
+
+ ## Part of the request to consume. Available options are "body" and
+ ## "query".
+ data_source = "body"
+
+ ## Data format to consume.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "form_urlencoded"
+
+ ## Array of key names which should be collected as tags.
+ ## By default, keys with string value are ignored if not marked as tags.
+ form_urlencoded_tag_keys = ["tag1"]
+```
+
+### Examples
+
+#### Basic parsing
+
+Config:
+```toml
+[[inputs.http_listener_v2]]
+ name_override = "mymetric"
+ service_address = ":8080"
+ data_source = "query"
+ data_format = "form_urlencoded"
+ form_urlencoded_tag_keys = ["tag1"]
+```
+
+Request:
+```bash
+curl -i -XGET 'http://localhost:8080/telegraf?tag1=foo&field1=0.42&field2=42'
+```
+
+Output:
+```
+mymetric,tag1=foo field1=0.42,field2=42
+```
+
+[query string]: https://en.wikipedia.org/wiki/Query_string
+[http_listener_v2]: /plugins/inputs/http_listener_v2
diff --git a/plugins/parsers/form_urlencoded/parser.go b/plugins/parsers/form_urlencoded/parser.go
new file mode 100644
index 0000000000000..f38d87a80eac0
--- /dev/null
+++ b/plugins/parsers/form_urlencoded/parser.go
@@ -0,0 +1,125 @@
+package form_urlencoded
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+)
+
+var (
+ // ErrNoMetric is returned when no metric is found in input line
+ ErrNoMetric = fmt.Errorf("no metric in line")
+)
+
+// Parser decodes "application/x-www-form-urlencoded" data into metrics
+type Parser struct {
+ MetricName string
+ DefaultTags map[string]string
+ TagKeys []string
+ AllowedKeys []string
+}
+
+// Parse converts a slice of bytes in "application/x-www-form-urlencoded" format into metrics
+func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
+ buf = bytes.TrimSpace(buf)
+ if len(buf) == 0 {
+ return make([]telegraf.Metric, 0), nil
+ }
+
+ values, err := url.ParseQuery(string(buf))
+ if err != nil {
+ return nil, err
+ }
+
+ if len(p.AllowedKeys) > 0 {
+ values = p.filterAllowedKeys(values)
+ }
+
+ tags := p.extractTags(values)
+ fields := p.parseFields(values)
+
+ for key, value := range p.DefaultTags {
+ tags[key] = value
+ }
+
+ metric, err := metric.New(p.MetricName, tags, fields, time.Now().UTC())
+ if err != nil {
+ return nil, err
+ }
+
+ return []telegraf.Metric{metric}, nil
+}
+
+// ParseLine delegates a single line of text to the Parse function
+func (p Parser) ParseLine(line string) (telegraf.Metric, error) {
+ metrics, err := p.Parse([]byte(line))
+ if err != nil {
+ return nil, err
+ }
+
+ if len(metrics) < 1 {
+ return nil, ErrNoMetric
+ }
+
+ return metrics[0], nil
+}
+
+// SetDefaultTags sets the default tags for every metric
+func (p *Parser) SetDefaultTags(tags map[string]string) {
+ p.DefaultTags = tags
+}
+
+func (p Parser) filterAllowedKeys(original url.Values) url.Values {
+ result := make(url.Values)
+
+ for _, key := range p.AllowedKeys {
+ value, exists := original[key]
+ if !exists {
+ continue
+ }
+
+ result[key] = value
+ }
+
+ return result
+}
+
+func (p Parser) extractTags(values url.Values) map[string]string {
+ tags := make(map[string]string)
+ for _, key := range p.TagKeys {
+ value, exists := values[key]
+
+ if !exists || len(key) == 0 {
+ continue
+ }
+
+ tags[key] = value[0]
+ delete(values, key)
+ }
+
+ return tags
+}
+
+func (p Parser) parseFields(values url.Values) map[string]interface{} {
+ fields := make(map[string]interface{})
+
+ for key, value := range values {
+ if len(key) == 0 || len(value) == 0 {
+ continue
+ }
+
+ field, err := strconv.ParseFloat(value[0], 64)
+ if err != nil {
+ continue
+ }
+
+ fields[key] = field
+ }
+
+ return fields
+}
diff --git a/plugins/parsers/form_urlencoded/parser_test.go b/plugins/parsers/form_urlencoded/parser_test.go
new file mode 100644
index 0000000000000..931d5a4caeea8
--- /dev/null
+++ b/plugins/parsers/form_urlencoded/parser_test.go
@@ -0,0 +1,172 @@
+package form_urlencoded
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ validFormData = "tag1=foo&tag2=bar&tag3=baz&field1=42&field2=69"
+ encodedFormData = "tag1=%24%24%24&field1=1e%2B3"
+ notEscapedProperlyFormData = "invalid=%Y5"
+ blankKeyFormData = "=42&field2=69"
+ emptyFormData = ""
+)
+
+func TestParseValidFormData(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ }
+
+ metrics, err := parser.Parse([]byte(validFormData))
+ require.NoError(t, err)
+ require.Len(t, metrics, 1)
+ require.Equal(t, "form_urlencoded_test", metrics[0].Name())
+ require.Equal(t, map[string]string{}, metrics[0].Tags())
+ require.Equal(t, map[string]interface{}{
+ "field1": float64(42),
+ "field2": float64(69),
+ }, metrics[0].Fields())
+}
+
+func TestParseLineValidFormData(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ }
+
+ metric, err := parser.ParseLine(validFormData)
+ require.NoError(t, err)
+ require.Equal(t, "form_urlencoded_test", metric.Name())
+ require.Equal(t, map[string]string{}, metric.Tags())
+ require.Equal(t, map[string]interface{}{
+ "field1": float64(42),
+ "field2": float64(69),
+ }, metric.Fields())
+}
+
+func TestParseValidFormDataWithTags(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ TagKeys: []string{"tag1", "tag2"},
+ }
+
+ metrics, err := parser.Parse([]byte(validFormData))
+ require.NoError(t, err)
+ require.Len(t, metrics, 1)
+ require.Equal(t, "form_urlencoded_test", metrics[0].Name())
+ require.Equal(t, map[string]string{
+ "tag1": "foo",
+ "tag2": "bar",
+ }, metrics[0].Tags())
+ require.Equal(t, map[string]interface{}{
+ "field1": float64(42),
+ "field2": float64(69),
+ }, metrics[0].Fields())
+}
+
+func TestParseValidFormDataDefaultTags(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ TagKeys: []string{"tag1", "tag2"},
+ DefaultTags: map[string]string{"tag4": "default"},
+ }
+
+ metrics, err := parser.Parse([]byte(validFormData))
+ require.NoError(t, err)
+ require.Len(t, metrics, 1)
+ require.Equal(t, "form_urlencoded_test", metrics[0].Name())
+ require.Equal(t, map[string]string{
+ "tag1": "foo",
+ "tag2": "bar",
+ "tag4": "default",
+ }, metrics[0].Tags())
+ require.Equal(t, map[string]interface{}{
+ "field1": float64(42),
+ "field2": float64(69),
+ }, metrics[0].Fields())
+}
+
+func TestParseValidFormDataDefaultTagsOverride(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ TagKeys: []string{"tag1", "tag2"},
+ DefaultTags: map[string]string{"tag1": "default"},
+ }
+
+ metrics, err := parser.Parse([]byte(validFormData))
+ require.NoError(t, err)
+ require.Len(t, metrics, 1)
+ require.Equal(t, "form_urlencoded_test", metrics[0].Name())
+ require.Equal(t, map[string]string{
+ "tag1": "default",
+ "tag2": "bar",
+ }, metrics[0].Tags())
+ require.Equal(t, map[string]interface{}{
+ "field1": float64(42),
+ "field2": float64(69),
+ }, metrics[0].Fields())
+}
+
+func TestParseEncodedFormData(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ TagKeys: []string{"tag1"},
+ }
+
+ metrics, err := parser.Parse([]byte(encodedFormData))
+ require.NoError(t, err)
+ require.Len(t, metrics, 1)
+ require.Equal(t, "form_urlencoded_test", metrics[0].Name())
+ require.Equal(t, map[string]string{
+ "tag1": "$$$",
+ }, metrics[0].Tags())
+ require.Equal(t, map[string]interface{}{
+ "field1": float64(1000),
+ }, metrics[0].Fields())
+}
+
+func TestParseInvalidFormDataError(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ }
+
+ metrics, err := parser.Parse([]byte(notEscapedProperlyFormData))
+ require.Error(t, err)
+ require.Len(t, metrics, 0)
+}
+
+func TestParseInvalidFormDataEmptyKey(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ }
+
+ // Empty key for field
+ metrics, err := parser.Parse([]byte(blankKeyFormData))
+ require.NoError(t, err)
+ require.Len(t, metrics, 1)
+ require.Equal(t, map[string]string{}, metrics[0].Tags())
+ require.Equal(t, map[string]interface{}{
+ "field2": float64(69),
+ }, metrics[0].Fields())
+
+ // Empty key for tag
+ parser.TagKeys = []string{""}
+ metrics, err = parser.Parse([]byte(blankKeyFormData))
+ require.NoError(t, err)
+ require.Len(t, metrics, 1)
+ require.Equal(t, map[string]string{}, metrics[0].Tags())
+ require.Equal(t, map[string]interface{}{
+ "field2": float64(69),
+ }, metrics[0].Fields())
+}
+
+func TestParseInvalidFormDataEmptyString(t *testing.T) {
+ parser := Parser{
+ MetricName: "form_urlencoded_test",
+ }
+
+ metrics, err := parser.Parse([]byte(emptyFormData))
+ require.NoError(t, err)
+ require.Len(t, metrics, 0)
+}
diff --git a/plugins/parsers/graphite/config.go b/plugins/parsers/graphite/config.go
index 7a5c759e72f7f..915077c06b299 100644
--- a/plugins/parsers/graphite/config.go
+++ b/plugins/parsers/graphite/config.go
@@ -7,7 +7,7 @@ import (
const (
// DefaultSeparator is the default join character to use when joining multiple
- // measurment parts in a template.
+ // measurement parts in a template.
DefaultSeparator = "."
)
diff --git a/plugins/parsers/graphite/errors.go b/plugins/parsers/graphite/errors.go
deleted file mode 100644
index 2cd2f5583bc5d..0000000000000
--- a/plugins/parsers/graphite/errors.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package graphite
-
-import "fmt"
-
-// An UnsupposedValueError is returned when a parsed value is not
-// supposed.
-type UnsupposedValueError struct {
- Field string
- Value float64
-}
-
-func (err *UnsupposedValueError) Error() string {
- return fmt.Sprintf(`field "%s" value: "%v" is unsupported`, err.Field, err.Value)
-}
diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go
index fc32bd83d9249..f50217711c15c 100644
--- a/plugins/parsers/graphite/parser.go
+++ b/plugins/parsers/graphite/parser.go
@@ -1,18 +1,16 @@
package graphite
import (
- "bufio"
"bytes"
+ "errors"
"fmt"
- "io"
"math"
"strconv"
"strings"
"time"
- "github.com/influxdata/telegraf/internal/templating"
-
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal/templating"
"github.com/influxdata/telegraf/metric"
)
@@ -63,42 +61,36 @@ func NewGraphiteParser(
func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) {
// parse even if the buffer begins with a newline
- buf = bytes.TrimPrefix(buf, []byte("\n"))
- // add newline to end if not exists:
- if len(buf) > 0 && !bytes.HasSuffix(buf, []byte("\n")) {
- buf = append(buf, []byte("\n")...)
+ if len(buf) != 0 && buf[0] == '\n' {
+ buf = buf[1:]
}
- metrics := make([]telegraf.Metric, 0)
+ var metrics []telegraf.Metric
+ var errs []string
- var errStr string
- buffer := bytes.NewBuffer(buf)
- reader := bufio.NewReader(buffer)
for {
- // Read up to the next newline.
- buf, err := reader.ReadBytes('\n')
- if err == io.EOF {
- break
- }
- if err != nil && err != io.EOF {
- return metrics, err
+ n := bytes.IndexByte(buf, '\n')
+ var line []byte
+ if n >= 0 {
+ line = bytes.TrimSpace(buf[:n:n])
+ } else {
+ line = bytes.TrimSpace(buf) // last line
}
-
- // Trim the buffer, even though there should be no padding
- line := strings.TrimSpace(string(buf))
- if line == "" {
- continue
+ if len(line) != 0 {
+ metric, err := p.ParseLine(string(line))
+ if err == nil {
+ metrics = append(metrics, metric)
+ } else {
+ errs = append(errs, err.Error())
+ }
}
- metric, err := p.ParseLine(line)
- if err == nil {
- metrics = append(metrics, metric)
- } else {
- errStr += err.Error() + "\n"
+ if n < 0 {
+ break
}
+ buf = buf[n+1:]
}
-
- if errStr != "" {
- return metrics, fmt.Errorf(strings.TrimSpace(errStr))
+ if len(errs) != 0 {
+ return metrics, errors.New(strings.Join(errs, "\n"))
}
return metrics, nil
}
@@ -128,10 +120,6 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) {
return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err)
}
- if math.IsNaN(v) || math.IsInf(v, 0) {
- return nil, &UnsupposedValueError{Field: fields[0], Value: v}
- }
-
fieldValues := map[string]interface{}{}
if field != "" {
fieldValues[field] = v
diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go
index d84551addd0f8..9254574b604e6 100644
--- a/plugins/parsers/graphite/parser_test.go
+++ b/plugins/parsers/graphite/parser_test.go
@@ -1,14 +1,14 @@
package graphite
import (
- "reflect"
+ "math"
"strconv"
"testing"
"time"
"github.com/influxdata/telegraf/internal/templating"
"github.com/influxdata/telegraf/metric"
-
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -355,14 +355,40 @@ func TestParse(t *testing.T) {
func TestParseNaN(t *testing.T) {
p, err := NewGraphiteParser("", []string{"measurement*"}, nil)
- assert.NoError(t, err)
+ require.NoError(t, err)
- _, err = p.ParseLine("servers.localhost.cpu_load NaN 1435077219")
- assert.Error(t, err)
+ m, err := p.ParseLine("servers.localhost.cpu_load NaN 1435077219")
+ require.NoError(t, err)
- if _, ok := err.(*UnsupposedValueError); !ok {
- t.Fatalf("expected *ErrUnsupportedValue, got %v", reflect.TypeOf(err))
- }
+ expected := testutil.MustMetric(
+ "servers.localhost.cpu_load",
+ map[string]string{},
+ map[string]interface{}{
+ "value": math.NaN(),
+ },
+ time.Unix(1435077219, 0),
+ )
+
+ testutil.RequireMetricEqual(t, expected, m)
+}
+
+func TestParseInf(t *testing.T) {
+ p, err := NewGraphiteParser("", []string{"measurement*"}, nil)
+ require.NoError(t, err)
+
+ m, err := p.ParseLine("servers.localhost.cpu_load +Inf 1435077219")
+ require.NoError(t, err)
+
+ expected := testutil.MustMetric(
+ "servers.localhost.cpu_load",
+ map[string]string{},
+ map[string]interface{}{
+ "value": math.Inf(1),
+ },
+ time.Unix(1435077219, 0),
+ )
+
+ testutil.RequireMetricEqual(t, expected, m)
}
func TestFilterMatchDefault(t *testing.T) {
diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md
index 6263eecc91050..80936a41dbf62 100644
--- a/plugins/parsers/grok/README.md
+++ b/plugins/parsers/grok/README.md
@@ -50,6 +50,7 @@ You must capture at least one field per line.
- ts-httpd ("02/Jan/2006:15:04:05 -0700")
- ts-epoch (seconds since unix epoch, may contain decimal)
- ts-epochnano (nanoseconds since unix epoch)
+ - ts-epochmilli (milliseconds since unix epoch)
- ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year)
- ts-"CUSTOM"
@@ -159,6 +160,21 @@ Wed Apr 12 13:10:34 PST 2017 value=42
'''
```
+This example input and config parses a file using a custom timestamp conversion that doesn't match any specific standard:
+
+```
+21/02/2017 13:10:34 value=42
+```
+
+```toml
+[[inputs.file]]
+ grok_patterns = ['%{MY_TIMESTAMP:timestamp:ts-"02/01/2006 15:04:05"} value=%{NUMBER:value:int}']
+
+ grok_custom_patterns = '''
+ MY_TIMESTAMP (?:\d{2}.\d{2}.\d{4} \d{2}:\d{2}:\d{2})
+ '''
+```
+
For cases where the timestamp itself is without offset, the `timezone` config var is available
to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times
are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp
diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go
index cecb69f94cc43..810190b9d2f12 100644
--- a/plugins/parsers/grok/parser.go
+++ b/plugins/parsers/grok/parser.go
@@ -28,12 +28,13 @@ var timeLayouts = map[string]string{
"ts-rfc3339": "2006-01-02T15:04:05Z07:00",
"ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00",
"ts-httpd": "02/Jan/2006:15:04:05 -0700",
- // These three are not exactly "layouts", but they are special cases that
+ // These four are not exactly "layouts", but they are special cases that
// will get handled in the ParseLine function.
- "ts-epoch": "EPOCH",
- "ts-epochnano": "EPOCH_NANO",
- "ts-syslog": "SYSLOG_TIMESTAMP",
- "ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts.
+ "ts-epoch": "EPOCH",
+ "ts-epochnano": "EPOCH_NANO",
+ "ts-epochmilli": "EPOCH_MILLI",
+ "ts-syslog": "SYSLOG_TIMESTAMP",
+ "ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts.
}
const (
@@ -45,6 +46,7 @@ const (
DURATION = "duration"
DROP = "drop"
EPOCH = "EPOCH"
+ EPOCH_MILLI = "EPOCH_MILLI"
EPOCH_NANO = "EPOCH_NANO"
SYSLOG_TIMESTAMP = "SYSLOG_TIMESTAMP"
GENERIC_TIMESTAMP = "GENERIC_TIMESTAMP"
@@ -248,7 +250,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
case MEASUREMENT:
p.Measurement = v
case INT:
- iv, err := strconv.ParseInt(v, 10, 64)
+ iv, err := strconv.ParseInt(v, 0, 64)
if err != nil {
log.Printf("E! Error parsing %s to int: %s", v, err)
} else {
@@ -297,6 +299,13 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
ts = ts.Add(time.Duration(nanosec) * time.Nanosecond)
}
timestamp = ts
+ case EPOCH_MILLI:
+ ms, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ log.Printf("E! Error parsing %s to int: %s", v, err)
+ } else {
+ timestamp = time.Unix(0, ms*int64(time.Millisecond))
+ }
case EPOCH_NANO:
iv, err := strconv.ParseInt(v, 10, 64)
if err != nil {
diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go
index 2b8815264d854..1c409e8a542b6 100644
--- a/plugins/parsers/grok/parser_test.go
+++ b/plugins/parsers/grok/parser_test.go
@@ -277,6 +277,28 @@ func TestParsePatternsWithoutCustom(t *testing.T) {
assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
}
+func TestParseEpochMilli(t *testing.T) {
+ p := &Parser{
+ Patterns: []string{"%{MYAPP}"},
+ CustomPatterns: `
+ MYAPP %{POSINT:ts:ts-epochmilli} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
+ `,
+ }
+ assert.NoError(t, p.Compile())
+
+ metricA, err := p.ParseLine(`1568540909963 response_time=20821 mymetric=10890.645`)
+ require.NotNil(t, metricA)
+ assert.NoError(t, err)
+ assert.Equal(t,
+ map[string]interface{}{
+ "response_time": int64(20821),
+ "metric": float64(10890.645),
+ },
+ metricA.Fields())
+ assert.Equal(t, map[string]string{}, metricA.Tags())
+ assert.Equal(t, time.Unix(0, 1568540909963000000), metricA.Time())
+}
+
func TestParseEpochNano(t *testing.T) {
p := &Parser{
Patterns: []string{"%{MYAPP}"},
@@ -378,7 +400,7 @@ func TestParseEpochDecimal(t *testing.T) {
if tt.noMatch {
require.Nil(t, m)
- require.Nil(t, err)
+ require.NoError(t, err)
return
}
@@ -649,6 +671,31 @@ func TestParseErrors_WrongTimeLayout(t *testing.T) {
testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0)))
}
+func TestParseInteger_Base16(t *testing.T) {
+ p := &Parser{
+ Patterns: []string{"%{TEST_LOG_C}"},
+ CustomPatterns: `
+ DURATION %{NUMBER}[nuµm]?s
+ BASE10OR16NUM (?:%{BASE10NUM}|%{BASE16NUM})
+ TEST_LOG_C %{NUMBER:myfloat} %{BASE10OR16NUM:response_code:int} %{IPORHOST:clientip} %{DURATION:rt}
+ `,
+ }
+ assert.NoError(t, p.Compile())
+
+ metricA, err := p.ParseLine(`1.25 0xc8 192.168.1.1 5.432µs`)
+ require.NotNil(t, metricA)
+ assert.NoError(t, err)
+ assert.Equal(t,
+ map[string]interface{}{
+ "clientip": "192.168.1.1",
+ "response_code": int64(200),
+ "myfloat": "1.25",
+ "rt": "5.432µs",
+ },
+ metricA.Fields())
+ assert.Equal(t, map[string]string{}, metricA.Tags())
+}
+
func TestTsModder(t *testing.T) {
tsm := &tsModder{}
diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go
index 928671cc92791..ae08d5a7c0870 100644
--- a/plugins/parsers/influx/handler.go
+++ b/plugins/parsers/influx/handler.go
@@ -10,43 +10,53 @@ import (
"github.com/influxdata/telegraf/metric"
)
+// MetricHandler implements the Handler interface and produces telegraf.Metric.
type MetricHandler struct {
- builder *metric.Builder
- err error
- precision time.Duration
+ err error
+ timePrecision time.Duration
+ timeFunc TimeFunc
+ metric telegraf.Metric
}
func NewMetricHandler() *MetricHandler {
return &MetricHandler{
- builder: metric.NewBuilder(),
- precision: time.Nanosecond,
+ timePrecision: time.Nanosecond,
+ timeFunc: time.Now,
}
}
-func (h *MetricHandler) SetTimeFunc(f metric.TimeFunc) {
- h.builder.TimeFunc = f
+func (h *MetricHandler) SetTimePrecision(p time.Duration) {
+ h.timePrecision = p
+ // When the timestamp is omitted from the metric, the timestamp
+ // comes from the server clock, truncated to the nearest unit of
+ // measurement provided in precision.
+ //
+ // When a timestamp is provided in the metric, precision is
+ // overloaded to hold the unit of measurement of the timestamp.
}
-func (h *MetricHandler) SetTimePrecision(precision time.Duration) {
- h.builder.TimePrecision = precision
- h.precision = precision
+func (h *MetricHandler) SetTimeFunc(f TimeFunc) {
+ h.timeFunc = f
}
func (h *MetricHandler) Metric() (telegraf.Metric, error) {
- m, err := h.builder.Metric()
- h.builder.Reset()
- return m, err
+ if h.metric.Time().IsZero() {
+ h.metric.SetTime(h.timeFunc().Truncate(h.timePrecision))
+ }
+ return h.metric, nil
}
func (h *MetricHandler) SetMeasurement(name []byte) error {
- h.builder.SetName(nameUnescape(name))
- return nil
+ var err error
+ h.metric, err = metric.New(nameUnescape(name),
+ nil, nil, time.Time{})
+ return err
}
func (h *MetricHandler) AddTag(key []byte, value []byte) error {
tk := unescape(key)
tv := unescape(value)
- h.builder.AddTag(tk, tv)
+ h.metric.AddTag(tk, tv)
return nil
}
@@ -59,7 +69,7 @@ func (h *MetricHandler) AddInt(key []byte, value []byte) error {
}
return err
}
- h.builder.AddField(fk, fv)
+ h.metric.AddField(fk, fv)
return nil
}
@@ -72,7 +82,7 @@ func (h *MetricHandler) AddUint(key []byte, value []byte) error {
}
return err
}
- h.builder.AddField(fk, fv)
+ h.metric.AddField(fk, fv)
return nil
}
@@ -85,14 +95,14 @@ func (h *MetricHandler) AddFloat(key []byte, value []byte) error {
}
return err
}
- h.builder.AddField(fk, fv)
+ h.metric.AddField(fk, fv)
return nil
}
func (h *MetricHandler) AddString(key []byte, value []byte) error {
fk := unescape(key)
fv := stringFieldUnescape(value)
- h.builder.AddField(fk, fv)
+ h.metric.AddField(fk, fv)
return nil
}
@@ -102,7 +112,7 @@ func (h *MetricHandler) AddBool(key []byte, value []byte) error {
if err != nil {
return errors.New("unparseable bool")
}
- h.builder.AddField(fk, fv)
+ h.metric.AddField(fk, fv)
return nil
}
@@ -114,11 +124,9 @@ func (h *MetricHandler) SetTimestamp(tm []byte) error {
}
return err
}
- ns := v * int64(h.precision)
- h.builder.SetTime(time.Unix(0, ns))
- return nil
-}
-func (h *MetricHandler) Reset() {
- h.builder.Reset()
+ //time precision is overloaded to mean time unit here
+ ns := v * int64(h.timePrecision)
+ h.metric.SetTime(time.Unix(0, ns))
+ return nil
}
diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go
index b185eeabe6b81..332b73592486e 100644
--- a/plugins/parsers/influx/machine.go
+++ b/plugins/parsers/influx/machine.go
@@ -4,8 +4,17 @@ package influx
import (
"errors"
+ "io"
)
+type readErr struct {
+ Err error
+}
+
+func (e *readErr) Error() string {
+ return e.Err.Error()
+}
+
var (
ErrNameParse = errors.New("expected measurement name")
ErrFieldParse = errors.New("expected field")
@@ -16,22 +25,22 @@ var (
)
-//line plugins/parsers/influx/machine.go.rl:304
+//line plugins/parsers/influx/machine.go.rl:318
-//line plugins/parsers/influx/machine.go:24
-const LineProtocol_start int = 259
-const LineProtocol_first_final int = 259
+//line plugins/parsers/influx/machine.go:33
+const LineProtocol_start int = 269
+const LineProtocol_first_final int = 269
const LineProtocol_error int = 0
-const LineProtocol_en_main int = 259
-const LineProtocol_en_discard_line int = 247
-const LineProtocol_en_align int = 715
-const LineProtocol_en_series int = 250
+const LineProtocol_en_main int = 269
+const LineProtocol_en_discard_line int = 257
+const LineProtocol_en_align int = 739
+const LineProtocol_en_series int = 260
-//line plugins/parsers/influx/machine.go.rl:307
+//line plugins/parsers/influx/machine.go.rl:321
type Handler interface {
SetMeasurement(name []byte) error
@@ -45,14 +54,17 @@ type Handler interface {
}
type machine struct {
- data []byte
- cs int
- p, pe, eof int
- pb int
- lineno int
- sol int
- handler Handler
- initState int
+ data []byte
+ cs int
+ p, pe, eof int
+ pb int
+ lineno int
+ sol int
+ handler Handler
+ initState int
+ key []byte
+ beginMetric bool
+ finishMetric bool
}
func NewMachine(handler Handler) *machine {
@@ -62,24 +74,24 @@ func NewMachine(handler Handler) *machine {
}
-//line plugins/parsers/influx/machine.go.rl:337
+//line plugins/parsers/influx/machine.go.rl:354
-//line plugins/parsers/influx/machine.go.rl:338
+//line plugins/parsers/influx/machine.go.rl:355
-//line plugins/parsers/influx/machine.go.rl:339
+//line plugins/parsers/influx/machine.go.rl:356
-//line plugins/parsers/influx/machine.go.rl:340
+//line plugins/parsers/influx/machine.go.rl:357
-//line plugins/parsers/influx/machine.go.rl:341
+//line plugins/parsers/influx/machine.go.rl:358
-//line plugins/parsers/influx/machine.go.rl:342
+//line plugins/parsers/influx/machine.go.rl:359
-//line plugins/parsers/influx/machine.go:78
+//line plugins/parsers/influx/machine.go:90
{
( m.cs) = LineProtocol_start
}
-//line plugins/parsers/influx/machine.go.rl:343
+//line plugins/parsers/influx/machine.go.rl:360
return m
}
@@ -91,22 +103,22 @@ func NewSeriesMachine(handler Handler) *machine {
}
-//line plugins/parsers/influx/machine.go.rl:354
+//line plugins/parsers/influx/machine.go.rl:371
-//line plugins/parsers/influx/machine.go.rl:355
+//line plugins/parsers/influx/machine.go.rl:372
-//line plugins/parsers/influx/machine.go.rl:356
+//line plugins/parsers/influx/machine.go.rl:373
-//line plugins/parsers/influx/machine.go.rl:357
+//line plugins/parsers/influx/machine.go.rl:374
-//line plugins/parsers/influx/machine.go.rl:358
+//line plugins/parsers/influx/machine.go.rl:375
-//line plugins/parsers/influx/machine.go:105
+//line plugins/parsers/influx/machine.go:117
{
( m.cs) = LineProtocol_start
}
-//line plugins/parsers/influx/machine.go.rl:359
+//line plugins/parsers/influx/machine.go.rl:376
return m
}
@@ -119,14 +131,17 @@ func (m *machine) SetData(data []byte) {
m.sol = 0
m.pe = len(data)
m.eof = len(data)
+ m.key = nil
+ m.beginMetric = false
+ m.finishMetric = false
-//line plugins/parsers/influx/machine.go:125
+//line plugins/parsers/influx/machine.go:140
{
( m.cs) = LineProtocol_start
}
-//line plugins/parsers/influx/machine.go.rl:373
+//line plugins/parsers/influx/machine.go.rl:393
m.cs = m.initState
}
@@ -139,12 +154,17 @@ func (m *machine) Next() error {
return EOF
}
- var err error
- var key []byte
- foundMetric := false
+ m.key = nil
+ m.beginMetric = false
+ m.finishMetric = false
+
+ return m.exec()
+}
+func (m *machine) exec() error {
+ var err error
-//line plugins/parsers/influx/machine.go:148
+//line plugins/parsers/influx/machine.go:168
{
if ( m.p) == ( m.pe) {
goto _test_eof
@@ -153,8 +173,8 @@ func (m *machine) Next() error {
_again:
switch ( m.cs) {
- case 259:
- goto st259
+ case 269:
+ goto st269
case 1:
goto st1
case 2:
@@ -169,16 +189,16 @@ _again:
goto st5
case 6:
goto st6
+ case 270:
+ goto st270
+ case 271:
+ goto st271
+ case 272:
+ goto st272
case 7:
goto st7
case 8:
goto st8
- case 260:
- goto st260
- case 261:
- goto st261
- case 262:
- goto st262
case 9:
goto st9
case 10:
@@ -225,46 +245,22 @@ _again:
goto st30
case 31:
goto st31
- case 32:
- goto st32
- case 33:
- goto st33
- case 263:
- goto st263
- case 264:
- goto st264
- case 34:
- goto st34
- case 35:
- goto st35
- case 265:
- goto st265
- case 266:
- goto st266
- case 267:
- goto st267
- case 36:
- goto st36
- case 268:
- goto st268
- case 269:
- goto st269
- case 270:
- goto st270
- case 271:
- goto st271
- case 272:
- goto st272
case 273:
goto st273
case 274:
goto st274
+ case 32:
+ goto st32
+ case 33:
+ goto st33
case 275:
goto st275
case 276:
goto st276
case 277:
goto st277
+ case 34:
+ goto st34
case 278:
goto st278
case 279:
@@ -281,26 +277,12 @@ _again:
goto st284
case 285:
goto st285
- case 37:
- goto st37
- case 38:
- goto st38
case 286:
goto st286
case 287:
goto st287
case 288:
goto st288
- case 39:
- goto st39
- case 40:
- goto st40
- case 41:
- goto st41
- case 42:
- goto st42
- case 43:
- goto st43
case 289:
goto st289
case 290:
@@ -309,20 +291,32 @@ _again:
goto st291
case 292:
goto st292
- case 44:
- goto st44
case 293:
goto st293
case 294:
goto st294
case 295:
goto st295
+ case 35:
+ goto st35
+ case 36:
+ goto st36
case 296:
goto st296
case 297:
goto st297
case 298:
goto st298
+ case 37:
+ goto st37
+ case 38:
+ goto st38
+ case 39:
+ goto st39
+ case 40:
+ goto st40
+ case 41:
+ goto st41
case 299:
goto st299
case 300:
@@ -331,6 +325,8 @@ _again:
goto st301
case 302:
goto st302
+ case 42:
+ goto st42
case 303:
goto st303
case 304:
@@ -355,6 +351,30 @@ _again:
goto st313
case 314:
goto st314
+ case 315:
+ goto st315
+ case 316:
+ goto st316
+ case 317:
+ goto st317
+ case 318:
+ goto st318
+ case 319:
+ goto st319
+ case 320:
+ goto st320
+ case 321:
+ goto st321
+ case 322:
+ goto st322
+ case 323:
+ goto st323
+ case 324:
+ goto st324
+ case 43:
+ goto st43
+ case 44:
+ goto st44
case 45:
goto st45
case 46:
@@ -371,16 +391,16 @@ _again:
goto st51
case 52:
goto st52
+ case 325:
+ goto st325
+ case 326:
+ goto st326
+ case 327:
+ goto st327
case 53:
goto st53
case 54:
goto st54
- case 315:
- goto st315
- case 316:
- goto st316
- case 317:
- goto st317
case 55:
goto st55
case 56:
@@ -389,36 +409,12 @@ _again:
goto st57
case 58:
goto st58
- case 59:
- goto st59
- case 60:
- goto st60
- case 318:
- goto st318
- case 319:
- goto st319
- case 61:
- goto st61
- case 320:
- goto st320
- case 321:
- goto st321
- case 322:
- goto st322
- case 323:
- goto st323
- case 324:
- goto st324
- case 325:
- goto st325
- case 326:
- goto st326
- case 327:
- goto st327
case 328:
goto st328
case 329:
goto st329
+ case 59:
+ goto st59
case 330:
goto st330
case 331:
@@ -439,16 +435,12 @@ _again:
goto st338
case 339:
goto st339
- case 62:
- goto st62
case 340:
goto st340
case 341:
goto st341
case 342:
goto st342
- case 63:
- goto st63
case 343:
goto st343
case 344:
@@ -463,12 +455,16 @@ _again:
goto st348
case 349:
goto st349
+ case 60:
+ goto st60
case 350:
goto st350
case 351:
goto st351
case 352:
goto st352
+ case 61:
+ goto st61
case 353:
goto st353
case 354:
@@ -489,68 +485,68 @@ _again:
goto st361
case 362:
goto st362
- case 64:
- goto st64
- case 65:
- goto st65
- case 66:
- goto st66
- case 67:
- goto st67
- case 68:
- goto st68
case 363:
goto st363
- case 69:
- goto st69
- case 70:
- goto st70
- case 71:
- goto st71
- case 72:
- goto st72
- case 73:
- goto st73
case 364:
goto st364
case 365:
goto st365
case 366:
goto st366
- case 74:
- goto st74
- case 75:
- goto st75
case 367:
goto st367
case 368:
goto st368
- case 76:
- goto st76
case 369:
goto st369
- case 77:
- goto st77
case 370:
goto st370
case 371:
goto st371
case 372:
goto st372
+ case 62:
+ goto st62
+ case 63:
+ goto st63
+ case 64:
+ goto st64
+ case 65:
+ goto st65
+ case 66:
+ goto st66
case 373:
goto st373
+ case 67:
+ goto st67
+ case 68:
+ goto st68
+ case 69:
+ goto st69
+ case 70:
+ goto st70
+ case 71:
+ goto st71
case 374:
goto st374
case 375:
goto st375
case 376:
goto st376
+ case 72:
+ goto st72
+ case 73:
+ goto st73
+ case 74:
+ goto st74
case 377:
goto st377
case 378:
goto st378
case 379:
goto st379
+ case 75:
+ goto st75
case 380:
goto st380
case 381:
@@ -571,34 +567,6 @@ _again:
goto st388
case 389:
goto st389
- case 78:
- goto st78
- case 79:
- goto st79
- case 80:
- goto st80
- case 81:
- goto st81
- case 82:
- goto st82
- case 83:
- goto st83
- case 84:
- goto st84
- case 85:
- goto st85
- case 86:
- goto st86
- case 87:
- goto st87
- case 88:
- goto st88
- case 89:
- goto st89
- case 90:
- goto st90
- case 91:
- goto st91
case 390:
goto st390
case 391:
@@ -607,62 +575,90 @@ _again:
goto st392
case 393:
goto st393
- case 92:
- goto st92
- case 93:
- goto st93
- case 94:
- goto st94
- case 95:
- goto st95
case 394:
goto st394
case 395:
goto st395
- case 96:
- goto st96
- case 97:
- goto st97
case 396:
goto st396
- case 98:
- goto st98
- case 99:
- goto st99
case 397:
goto st397
case 398:
goto st398
- case 100:
- goto st100
case 399:
goto st399
+ case 76:
+ goto st76
+ case 77:
+ goto st77
+ case 78:
+ goto st78
+ case 79:
+ goto st79
+ case 80:
+ goto st80
+ case 81:
+ goto st81
+ case 82:
+ goto st82
+ case 83:
+ goto st83
+ case 84:
+ goto st84
+ case 85:
+ goto st85
+ case 86:
+ goto st86
+ case 87:
+ goto st87
+ case 88:
+ goto st88
+ case 89:
+ goto st89
case 400:
goto st400
- case 101:
- goto st101
- case 102:
- goto st102
case 401:
goto st401
case 402:
goto st402
case 403:
goto st403
+ case 90:
+ goto st90
+ case 91:
+ goto st91
+ case 92:
+ goto st92
+ case 93:
+ goto st93
case 404:
goto st404
case 405:
goto st405
+ case 94:
+ goto st94
+ case 95:
+ goto st95
case 406:
goto st406
+ case 96:
+ goto st96
+ case 97:
+ goto st97
case 407:
goto st407
case 408:
goto st408
+ case 98:
+ goto st98
case 409:
goto st409
case 410:
goto st410
+ case 99:
+ goto st99
+ case 100:
+ goto st100
case 411:
goto st411
case 412:
@@ -679,26 +675,18 @@ _again:
goto st417
case 418:
goto st418
- case 103:
- goto st103
case 419:
goto st419
case 420:
goto st420
case 421:
goto st421
- case 104:
- goto st104
- case 105:
- goto st105
case 422:
goto st422
case 423:
goto st423
case 424:
goto st424
- case 106:
- goto st106
case 425:
goto st425
case 426:
@@ -707,18 +695,26 @@ _again:
goto st427
case 428:
goto st428
+ case 101:
+ goto st101
case 429:
goto st429
case 430:
goto st430
case 431:
goto st431
+ case 102:
+ goto st102
+ case 103:
+ goto st103
case 432:
goto st432
case 433:
goto st433
case 434:
goto st434
+ case 104:
+ goto st104
case 435:
goto st435
case 436:
@@ -739,8 +735,6 @@ _again:
goto st443
case 444:
goto st444
- case 107:
- goto st107
case 445:
goto st445
case 446:
@@ -761,6 +755,8 @@ _again:
goto st453
case 454:
goto st454
+ case 105:
+ goto st105
case 455:
goto st455
case 456:
@@ -785,26 +781,12 @@ _again:
goto st465
case 466:
goto st466
- case 108:
- goto st108
- case 109:
- goto st109
- case 110:
- goto st110
- case 111:
- goto st111
- case 112:
- goto st112
case 467:
goto st467
- case 113:
- goto st113
case 468:
goto st468
case 469:
goto st469
- case 114:
- goto st114
case 470:
goto st470
case 471:
@@ -819,70 +801,84 @@ _again:
goto st475
case 476:
goto st476
+ case 106:
+ goto st106
+ case 107:
+ goto st107
+ case 108:
+ goto st108
+ case 109:
+ goto st109
+ case 110:
+ goto st110
case 477:
goto st477
+ case 111:
+ goto st111
case 478:
goto st478
- case 115:
- goto st115
- case 116:
- goto st116
- case 117:
- goto st117
case 479:
goto st479
- case 118:
- goto st118
- case 119:
- goto st119
- case 120:
- goto st120
+ case 112:
+ goto st112
case 480:
goto st480
- case 121:
- goto st121
- case 122:
- goto st122
case 481:
goto st481
case 482:
goto st482
- case 123:
- goto st123
- case 124:
- goto st124
- case 125:
- goto st125
- case 126:
- goto st126
case 483:
goto st483
case 484:
goto st484
case 485:
goto st485
- case 127:
- goto st127
case 486:
goto st486
case 487:
goto st487
case 488:
goto st488
+ case 113:
+ goto st113
+ case 114:
+ goto st114
+ case 115:
+ goto st115
case 489:
goto st489
+ case 116:
+ goto st116
+ case 117:
+ goto st117
+ case 118:
+ goto st118
case 490:
goto st490
+ case 119:
+ goto st119
+ case 120:
+ goto st120
case 491:
goto st491
case 492:
goto st492
+ case 121:
+ goto st121
+ case 122:
+ goto st122
+ case 123:
+ goto st123
+ case 124:
+ goto st124
case 493:
goto st493
case 494:
goto st494
case 495:
goto st495
+ case 125:
+ goto st125
case 496:
goto st496
case 497:
@@ -903,10 +899,6 @@ _again:
goto st504
case 505:
goto st505
- case 128:
- goto st128
- case 129:
- goto st129
case 506:
goto st506
case 507:
@@ -925,82 +917,70 @@ _again:
goto st513
case 514:
goto st514
- case 130:
- goto st130
- case 131:
- goto st131
- case 132:
- goto st132
case 515:
goto st515
- case 133:
- goto st133
- case 134:
- goto st134
- case 135:
- goto st135
+ case 126:
+ goto st126
+ case 127:
+ goto st127
case 516:
goto st516
- case 136:
- goto st136
- case 137:
- goto st137
case 517:
goto st517
case 518:
goto st518
- case 138:
- goto st138
- case 139:
- goto st139
- case 140:
- goto st140
case 519:
goto st519
case 520:
goto st520
- case 141:
- goto st141
case 521:
goto st521
- case 142:
- goto st142
case 522:
goto st522
case 523:
goto st523
case 524:
goto st524
+ case 128:
+ goto st128
+ case 129:
+ goto st129
+ case 130:
+ goto st130
case 525:
goto st525
+ case 131:
+ goto st131
+ case 132:
+ goto st132
+ case 133:
+ goto st133
case 526:
goto st526
+ case 134:
+ goto st134
+ case 135:
+ goto st135
case 527:
goto st527
case 528:
goto st528
+ case 136:
+ goto st136
+ case 137:
+ goto st137
+ case 138:
+ goto st138
case 529:
goto st529
- case 143:
- goto st143
- case 144:
- goto st144
- case 145:
- goto st145
case 530:
goto st530
- case 146:
- goto st146
- case 147:
- goto st147
- case 148:
- goto st148
+ case 139:
+ goto st139
case 531:
goto st531
- case 149:
- goto st149
- case 150:
- goto st150
+ case 140:
+ goto st140
case 532:
goto st532
case 533:
@@ -1017,10 +997,26 @@ _again:
goto st538
case 539:
goto st539
- case 540:
- goto st540
+ case 141:
+ goto st141
+ case 142:
+ goto st142
+ case 143:
+ goto st143
+ case 540:
+ goto st540
+ case 144:
+ goto st144
+ case 145:
+ goto st145
+ case 146:
+ goto st146
case 541:
goto st541
+ case 147:
+ goto st147
+ case 148:
+ goto st148
case 542:
goto st542
case 543:
@@ -1041,24 +1037,16 @@ _again:
goto st550
case 551:
goto st551
- case 151:
- goto st151
- case 152:
- goto st152
case 552:
goto st552
case 553:
goto st553
case 554:
goto st554
- case 153:
- goto st153
case 555:
goto st555
case 556:
goto st556
- case 154:
- goto st154
case 557:
goto st557
case 558:
@@ -1069,16 +1057,24 @@ _again:
goto st560
case 561:
goto st561
+ case 149:
+ goto st149
+ case 150:
+ goto st150
case 562:
goto st562
case 563:
goto st563
case 564:
goto st564
+ case 151:
+ goto st151
case 565:
goto st565
case 566:
goto st566
+ case 152:
+ goto st152
case 567:
goto st567
case 568:
@@ -1095,14 +1091,8 @@ _again:
goto st573
case 574:
goto st574
- case 155:
- goto st155
- case 156:
- goto st156
case 575:
goto st575
- case 157:
- goto st157
case 576:
goto st576
case 577:
@@ -1119,42 +1109,20 @@ _again:
goto st582
case 583:
goto st583
- case 158:
- goto st158
- case 159:
- goto st159
- case 160:
- goto st160
case 584:
goto st584
- case 161:
- goto st161
- case 162:
- goto st162
- case 163:
- goto st163
+ case 153:
+ goto st153
+ case 154:
+ goto st154
case 585:
goto st585
- case 164:
- goto st164
- case 165:
- goto st165
+ case 155:
+ goto st155
case 586:
goto st586
case 587:
goto st587
- case 166:
- goto st166
- case 167:
- goto st167
- case 168:
- goto st168
- case 169:
- goto st169
- case 170:
- goto st170
- case 171:
- goto st171
case 588:
goto st588
case 589:
@@ -1167,14 +1135,42 @@ _again:
goto st592
case 593:
goto st593
+ case 156:
+ goto st156
+ case 157:
+ goto st157
+ case 158:
+ goto st158
case 594:
goto st594
+ case 159:
+ goto st159
+ case 160:
+ goto st160
+ case 161:
+ goto st161
case 595:
goto st595
+ case 162:
+ goto st162
+ case 163:
+ goto st163
case 596:
goto st596
case 597:
goto st597
+ case 164:
+ goto st164
+ case 165:
+ goto st165
+ case 166:
+ goto st166
+ case 167:
+ goto st167
+ case 168:
+ goto st168
+ case 169:
+ goto st169
case 598:
goto st598
case 599:
@@ -1193,26 +1189,16 @@ _again:
goto st605
case 606:
goto st606
- case 172:
- goto st172
- case 173:
- goto st173
- case 174:
- goto st174
case 607:
goto st607
case 608:
goto st608
case 609:
goto st609
- case 175:
- goto st175
case 610:
goto st610
case 611:
goto st611
- case 176:
- goto st176
case 612:
goto st612
case 613:
@@ -1223,100 +1209,104 @@ _again:
goto st615
case 616:
goto st616
- case 177:
- goto st177
- case 178:
- goto st178
- case 179:
- goto st179
+ case 170:
+ goto st170
+ case 171:
+ goto st171
+ case 172:
+ goto st172
case 617:
goto st617
- case 180:
- goto st180
- case 181:
- goto st181
- case 182:
- goto st182
case 618:
goto st618
- case 183:
- goto st183
- case 184:
- goto st184
case 619:
goto st619
+ case 173:
+ goto st173
case 620:
goto st620
- case 185:
- goto st185
case 621:
goto st621
+ case 174:
+ goto st174
case 622:
goto st622
- case 186:
- goto st186
- case 187:
- goto st187
- case 188:
- goto st188
case 623:
goto st623
- case 189:
- goto st189
- case 190:
- goto st190
case 624:
goto st624
case 625:
goto st625
case 626:
goto st626
+ case 175:
+ goto st175
+ case 176:
+ goto st176
+ case 177:
+ goto st177
case 627:
goto st627
+ case 178:
+ goto st178
+ case 179:
+ goto st179
+ case 180:
+ goto st180
case 628:
goto st628
+ case 181:
+ goto st181
+ case 182:
+ goto st182
case 629:
goto st629
case 630:
goto st630
+ case 183:
+ goto st183
case 631:
goto st631
- case 191:
- goto st191
- case 192:
- goto st192
- case 193:
- goto st193
case 632:
goto st632
- case 194:
- goto st194
- case 195:
- goto st195
- case 196:
- goto st196
case 633:
goto st633
- case 197:
- goto st197
- case 198:
- goto st198
+ case 184:
+ goto st184
+ case 185:
+ goto st185
+ case 186:
+ goto st186
case 634:
goto st634
+ case 187:
+ goto st187
+ case 188:
+ goto st188
+ case 189:
+ goto st189
case 635:
goto st635
- case 199:
- goto st199
- case 200:
- goto st200
- case 201:
- goto st201
+ case 190:
+ goto st190
+ case 191:
+ goto st191
case 636:
goto st636
case 637:
goto st637
+ case 192:
+ goto st192
+ case 193:
+ goto st193
+ case 194:
+ goto st194
case 638:
goto st638
+ case 195:
+ goto st195
+ case 196:
+ goto st196
case 639:
goto st639
case 640:
@@ -1333,14 +1323,36 @@ _again:
goto st645
case 646:
goto st646
+ case 197:
+ goto st197
+ case 198:
+ goto st198
+ case 199:
+ goto st199
case 647:
goto st647
+ case 200:
+ goto st200
+ case 201:
+ goto st201
+ case 202:
+ goto st202
case 648:
goto st648
+ case 203:
+ goto st203
+ case 204:
+ goto st204
case 649:
goto st649
case 650:
goto st650
+ case 205:
+ goto st205
+ case 206:
+ goto st206
+ case 207:
+ goto st207
case 651:
goto st651
case 652:
@@ -1349,22 +1361,8 @@ _again:
goto st653
case 654:
goto st654
- case 202:
- goto st202
- case 203:
- goto st203
- case 204:
- goto st204
- case 205:
- goto st205
- case 206:
- goto st206
case 655:
goto st655
- case 207:
- goto st207
- case 208:
- goto st208
case 656:
goto st656
case 657:
@@ -1383,46 +1381,32 @@ _again:
goto st663
case 664:
goto st664
- case 209:
- goto st209
- case 210:
- goto st210
- case 211:
- goto st211
case 665:
goto st665
- case 212:
- goto st212
- case 213:
- goto st213
- case 214:
- goto st214
case 666:
goto st666
- case 215:
- goto st215
- case 216:
- goto st216
case 667:
goto st667
case 668:
goto st668
- case 217:
- goto st217
- case 218:
- goto st218
- case 219:
- goto st219
- case 220:
- goto st220
case 669:
goto st669
- case 221:
- goto st221
- case 222:
- goto st222
+ case 208:
+ goto st208
+ case 209:
+ goto st209
+ case 210:
+ goto st210
+ case 211:
+ goto st211
+ case 212:
+ goto st212
case 670:
goto st670
+ case 213:
+ goto st213
+ case 214:
+ goto st214
case 671:
goto st671
case 672:
@@ -1437,42 +1421,46 @@ _again:
goto st676
case 677:
goto st677
- case 223:
- goto st223
- case 224:
- goto st224
- case 225:
- goto st225
case 678:
goto st678
- case 226:
- goto st226
- case 227:
- goto st227
- case 228:
- goto st228
case 679:
goto st679
- case 229:
- goto st229
- case 230:
- goto st230
+ case 215:
+ goto st215
+ case 216:
+ goto st216
+ case 217:
+ goto st217
case 680:
goto st680
+ case 218:
+ goto st218
+ case 219:
+ goto st219
+ case 220:
+ goto st220
case 681:
goto st681
- case 231:
- goto st231
- case 232:
- goto st232
- case 233:
- goto st233
+ case 221:
+ goto st221
+ case 222:
+ goto st222
case 682:
goto st682
case 683:
goto st683
+ case 223:
+ goto st223
+ case 224:
+ goto st224
+ case 225:
+ goto st225
case 684:
goto st684
+ case 226:
+ goto st226
+ case 227:
+ goto st227
case 685:
goto st685
case 686:
@@ -1489,8 +1477,18 @@ _again:
goto st691
case 692:
goto st692
+ case 228:
+ goto st228
+ case 229:
+ goto st229
+ case 230:
+ goto st230
case 693:
goto st693
+ case 231:
+ goto st231
+ case 232:
+ goto st232
case 694:
goto st694
case 695:
@@ -1505,24 +1503,38 @@ _again:
goto st699
case 700:
goto st700
+ case 701:
+ goto st701
+ case 233:
+ goto st233
case 234:
goto st234
case 235:
goto st235
- case 701:
- goto st701
+ case 702:
+ goto st702
case 236:
goto st236
case 237:
goto st237
- case 702:
- goto st702
+ case 238:
+ goto st238
case 703:
goto st703
+ case 239:
+ goto st239
+ case 240:
+ goto st240
case 704:
goto st704
case 705:
goto st705
+ case 241:
+ goto st241
+ case 242:
+ goto st242
+ case 243:
+ goto st243
case 706:
goto st706
case 707:
@@ -1531,70 +1543,124 @@ _again:
goto st708
case 709:
goto st709
- case 238:
- goto st238
- case 239:
- goto st239
- case 240:
- goto st240
case 710:
goto st710
- case 241:
- goto st241
- case 242:
- goto st242
- case 243:
- goto st243
case 711:
goto st711
- case 244:
- goto st244
- case 245:
- goto st245
case 712:
goto st712
case 713:
goto st713
- case 246:
- goto st246
- case 247:
- goto st247
case 714:
goto st714
- case 250:
- goto st250
+ case 715:
+ goto st715
+ case 716:
+ goto st716
case 717:
goto st717
case 718:
goto st718
+ case 719:
+ goto st719
+ case 720:
+ goto st720
+ case 721:
+ goto st721
+ case 722:
+ goto st722
+ case 723:
+ goto st723
+ case 724:
+ goto st724
+ case 244:
+ goto st244
+ case 245:
+ goto st245
+ case 725:
+ goto st725
+ case 246:
+ goto st246
+ case 247:
+ goto st247
+ case 726:
+ goto st726
+ case 727:
+ goto st727
+ case 728:
+ goto st728
+ case 729:
+ goto st729
+ case 730:
+ goto st730
+ case 731:
+ goto st731
+ case 732:
+ goto st732
+ case 733:
+ goto st733
+ case 248:
+ goto st248
+ case 249:
+ goto st249
+ case 250:
+ goto st250
+ case 734:
+ goto st734
case 251:
goto st251
case 252:
goto st252
case 253:
goto st253
+ case 735:
+ goto st735
case 254:
goto st254
- case 719:
- goto st719
case 255:
goto st255
- case 720:
- goto st720
+ case 736:
+ goto st736
+ case 737:
+ goto st737
case 256:
goto st256
case 257:
goto st257
+ case 738:
+ goto st738
+ case 260:
+ goto st260
+ case 740:
+ goto st740
+ case 741:
+ goto st741
+ case 261:
+ goto st261
+ case 262:
+ goto st262
+ case 263:
+ goto st263
+ case 264:
+ goto st264
+ case 742:
+ goto st742
+ case 265:
+ goto st265
+ case 743:
+ goto st743
+ case 266:
+ goto st266
+ case 267:
+ goto st267
+ case 268:
+ goto st268
+ case 739:
+ goto st739
case 258:
goto st258
- case 715:
- goto st715
- case 716:
- goto st716
- case 248:
- goto st248
- case 249:
- goto st249
+ case 259:
+ goto st259
}
if ( m.p)++; ( m.p) == ( m.pe) {
@@ -1602,8 +1668,8 @@ _again:
}
_resume:
switch ( m.cs) {
- case 259:
- goto st_case_259
+ case 269:
+ goto st_case_269
case 1:
goto st_case_1
case 2:
@@ -1618,16 +1684,16 @@ _resume:
goto st_case_5
case 6:
goto st_case_6
+ case 270:
+ goto st_case_270
+ case 271:
+ goto st_case_271
+ case 272:
+ goto st_case_272
case 7:
goto st_case_7
case 8:
goto st_case_8
- case 260:
- goto st_case_260
- case 261:
- goto st_case_261
- case 262:
- goto st_case_262
case 9:
goto st_case_9
case 10:
@@ -1674,46 +1740,22 @@ _resume:
goto st_case_30
case 31:
goto st_case_31
- case 32:
- goto st_case_32
- case 33:
- goto st_case_33
- case 263:
- goto st_case_263
- case 264:
- goto st_case_264
- case 34:
- goto st_case_34
- case 35:
- goto st_case_35
- case 265:
- goto st_case_265
- case 266:
- goto st_case_266
- case 267:
- goto st_case_267
- case 36:
- goto st_case_36
- case 268:
- goto st_case_268
- case 269:
- goto st_case_269
- case 270:
- goto st_case_270
- case 271:
- goto st_case_271
- case 272:
- goto st_case_272
case 273:
goto st_case_273
case 274:
goto st_case_274
+ case 32:
+ goto st_case_32
+ case 33:
+ goto st_case_33
case 275:
goto st_case_275
case 276:
goto st_case_276
case 277:
goto st_case_277
+ case 34:
+ goto st_case_34
case 278:
goto st_case_278
case 279:
@@ -1730,26 +1772,12 @@ _resume:
goto st_case_284
case 285:
goto st_case_285
- case 37:
- goto st_case_37
- case 38:
- goto st_case_38
case 286:
goto st_case_286
case 287:
goto st_case_287
case 288:
goto st_case_288
- case 39:
- goto st_case_39
- case 40:
- goto st_case_40
- case 41:
- goto st_case_41
- case 42:
- goto st_case_42
- case 43:
- goto st_case_43
case 289:
goto st_case_289
case 290:
@@ -1758,20 +1786,32 @@ _resume:
goto st_case_291
case 292:
goto st_case_292
- case 44:
- goto st_case_44
case 293:
goto st_case_293
case 294:
goto st_case_294
case 295:
goto st_case_295
+ case 35:
+ goto st_case_35
+ case 36:
+ goto st_case_36
case 296:
goto st_case_296
case 297:
goto st_case_297
case 298:
goto st_case_298
+ case 37:
+ goto st_case_37
+ case 38:
+ goto st_case_38
+ case 39:
+ goto st_case_39
+ case 40:
+ goto st_case_40
+ case 41:
+ goto st_case_41
case 299:
goto st_case_299
case 300:
@@ -1780,6 +1820,8 @@ _resume:
goto st_case_301
case 302:
goto st_case_302
+ case 42:
+ goto st_case_42
case 303:
goto st_case_303
case 304:
@@ -1804,6 +1846,30 @@ _resume:
goto st_case_313
case 314:
goto st_case_314
+ case 315:
+ goto st_case_315
+ case 316:
+ goto st_case_316
+ case 317:
+ goto st_case_317
+ case 318:
+ goto st_case_318
+ case 319:
+ goto st_case_319
+ case 320:
+ goto st_case_320
+ case 321:
+ goto st_case_321
+ case 322:
+ goto st_case_322
+ case 323:
+ goto st_case_323
+ case 324:
+ goto st_case_324
+ case 43:
+ goto st_case_43
+ case 44:
+ goto st_case_44
case 45:
goto st_case_45
case 46:
@@ -1820,16 +1886,16 @@ _resume:
goto st_case_51
case 52:
goto st_case_52
+ case 325:
+ goto st_case_325
+ case 326:
+ goto st_case_326
+ case 327:
+ goto st_case_327
case 53:
goto st_case_53
case 54:
goto st_case_54
- case 315:
- goto st_case_315
- case 316:
- goto st_case_316
- case 317:
- goto st_case_317
case 55:
goto st_case_55
case 56:
@@ -1838,36 +1904,12 @@ _resume:
goto st_case_57
case 58:
goto st_case_58
- case 59:
- goto st_case_59
- case 60:
- goto st_case_60
- case 318:
- goto st_case_318
- case 319:
- goto st_case_319
- case 61:
- goto st_case_61
- case 320:
- goto st_case_320
- case 321:
- goto st_case_321
- case 322:
- goto st_case_322
- case 323:
- goto st_case_323
- case 324:
- goto st_case_324
- case 325:
- goto st_case_325
- case 326:
- goto st_case_326
- case 327:
- goto st_case_327
case 328:
goto st_case_328
case 329:
goto st_case_329
+ case 59:
+ goto st_case_59
case 330:
goto st_case_330
case 331:
@@ -1888,16 +1930,12 @@ _resume:
goto st_case_338
case 339:
goto st_case_339
- case 62:
- goto st_case_62
case 340:
goto st_case_340
case 341:
goto st_case_341
case 342:
goto st_case_342
- case 63:
- goto st_case_63
case 343:
goto st_case_343
case 344:
@@ -1912,12 +1950,16 @@ _resume:
goto st_case_348
case 349:
goto st_case_349
+ case 60:
+ goto st_case_60
case 350:
goto st_case_350
case 351:
goto st_case_351
case 352:
goto st_case_352
+ case 61:
+ goto st_case_61
case 353:
goto st_case_353
case 354:
@@ -1938,68 +1980,68 @@ _resume:
goto st_case_361
case 362:
goto st_case_362
- case 64:
- goto st_case_64
- case 65:
- goto st_case_65
- case 66:
- goto st_case_66
- case 67:
- goto st_case_67
- case 68:
- goto st_case_68
case 363:
goto st_case_363
- case 69:
- goto st_case_69
- case 70:
- goto st_case_70
- case 71:
- goto st_case_71
- case 72:
- goto st_case_72
- case 73:
- goto st_case_73
case 364:
goto st_case_364
case 365:
goto st_case_365
case 366:
goto st_case_366
- case 74:
- goto st_case_74
- case 75:
- goto st_case_75
case 367:
goto st_case_367
case 368:
goto st_case_368
- case 76:
- goto st_case_76
case 369:
goto st_case_369
- case 77:
- goto st_case_77
case 370:
goto st_case_370
case 371:
goto st_case_371
case 372:
goto st_case_372
+ case 62:
+ goto st_case_62
+ case 63:
+ goto st_case_63
+ case 64:
+ goto st_case_64
+ case 65:
+ goto st_case_65
+ case 66:
+ goto st_case_66
case 373:
goto st_case_373
+ case 67:
+ goto st_case_67
+ case 68:
+ goto st_case_68
+ case 69:
+ goto st_case_69
+ case 70:
+ goto st_case_70
+ case 71:
+ goto st_case_71
case 374:
goto st_case_374
case 375:
goto st_case_375
case 376:
goto st_case_376
+ case 72:
+ goto st_case_72
+ case 73:
+ goto st_case_73
+ case 74:
+ goto st_case_74
case 377:
goto st_case_377
case 378:
goto st_case_378
case 379:
goto st_case_379
+ case 75:
+ goto st_case_75
case 380:
goto st_case_380
case 381:
@@ -2020,6 +2062,30 @@ _resume:
goto st_case_388
case 389:
goto st_case_389
+ case 390:
+ goto st_case_390
+ case 391:
+ goto st_case_391
+ case 392:
+ goto st_case_392
+ case 393:
+ goto st_case_393
+ case 394:
+ goto st_case_394
+ case 395:
+ goto st_case_395
+ case 396:
+ goto st_case_396
+ case 397:
+ goto st_case_397
+ case 398:
+ goto st_case_398
+ case 399:
+ goto st_case_399
+ case 76:
+ goto st_case_76
+ case 77:
+ goto st_case_77
case 78:
goto st_case_78
case 79:
@@ -2044,74 +2110,50 @@ _resume:
goto st_case_88
case 89:
goto st_case_89
+ case 400:
+ goto st_case_400
+ case 401:
+ goto st_case_401
+ case 402:
+ goto st_case_402
+ case 403:
+ goto st_case_403
case 90:
goto st_case_90
case 91:
goto st_case_91
- case 390:
- goto st_case_390
- case 391:
- goto st_case_391
- case 392:
- goto st_case_392
- case 393:
- goto st_case_393
case 92:
goto st_case_92
case 93:
goto st_case_93
+ case 404:
+ goto st_case_404
+ case 405:
+ goto st_case_405
case 94:
goto st_case_94
case 95:
goto st_case_95
- case 394:
- goto st_case_394
- case 395:
- goto st_case_395
+ case 406:
+ goto st_case_406
case 96:
goto st_case_96
case 97:
goto st_case_97
- case 396:
- goto st_case_396
- case 98:
- goto st_case_98
- case 99:
- goto st_case_99
- case 397:
- goto st_case_397
- case 398:
- goto st_case_398
- case 100:
- goto st_case_100
- case 399:
- goto st_case_399
- case 400:
- goto st_case_400
- case 101:
- goto st_case_101
- case 102:
- goto st_case_102
- case 401:
- goto st_case_401
- case 402:
- goto st_case_402
- case 403:
- goto st_case_403
- case 404:
- goto st_case_404
- case 405:
- goto st_case_405
- case 406:
- goto st_case_406
case 407:
goto st_case_407
case 408:
goto st_case_408
+ case 98:
+ goto st_case_98
case 409:
goto st_case_409
case 410:
goto st_case_410
+ case 99:
+ goto st_case_99
+ case 100:
+ goto st_case_100
case 411:
goto st_case_411
case 412:
@@ -2128,26 +2170,18 @@ _resume:
goto st_case_417
case 418:
goto st_case_418
- case 103:
- goto st_case_103
case 419:
goto st_case_419
case 420:
goto st_case_420
case 421:
goto st_case_421
- case 104:
- goto st_case_104
- case 105:
- goto st_case_105
case 422:
goto st_case_422
case 423:
goto st_case_423
case 424:
goto st_case_424
- case 106:
- goto st_case_106
case 425:
goto st_case_425
case 426:
@@ -2156,18 +2190,26 @@ _resume:
goto st_case_427
case 428:
goto st_case_428
+ case 101:
+ goto st_case_101
case 429:
goto st_case_429
case 430:
goto st_case_430
case 431:
goto st_case_431
+ case 102:
+ goto st_case_102
+ case 103:
+ goto st_case_103
case 432:
goto st_case_432
case 433:
goto st_case_433
case 434:
goto st_case_434
+ case 104:
+ goto st_case_104
case 435:
goto st_case_435
case 436:
@@ -2188,8 +2230,6 @@ _resume:
goto st_case_443
case 444:
goto st_case_444
- case 107:
- goto st_case_107
case 445:
goto st_case_445
case 446:
@@ -2210,6 +2250,8 @@ _resume:
goto st_case_453
case 454:
goto st_case_454
+ case 105:
+ goto st_case_105
case 455:
goto st_case_455
case 456:
@@ -2234,26 +2276,12 @@ _resume:
goto st_case_465
case 466:
goto st_case_466
- case 108:
- goto st_case_108
- case 109:
- goto st_case_109
- case 110:
- goto st_case_110
- case 111:
- goto st_case_111
- case 112:
- goto st_case_112
case 467:
goto st_case_467
- case 113:
- goto st_case_113
case 468:
goto st_case_468
case 469:
goto st_case_469
- case 114:
- goto st_case_114
case 470:
goto st_case_470
case 471:
@@ -2268,70 +2296,84 @@ _resume:
goto st_case_475
case 476:
goto st_case_476
+ case 106:
+ goto st_case_106
+ case 107:
+ goto st_case_107
+ case 108:
+ goto st_case_108
+ case 109:
+ goto st_case_109
+ case 110:
+ goto st_case_110
case 477:
goto st_case_477
+ case 111:
+ goto st_case_111
case 478:
goto st_case_478
- case 115:
- goto st_case_115
- case 116:
- goto st_case_116
- case 117:
- goto st_case_117
case 479:
goto st_case_479
- case 118:
- goto st_case_118
- case 119:
- goto st_case_119
- case 120:
- goto st_case_120
+ case 112:
+ goto st_case_112
case 480:
goto st_case_480
- case 121:
- goto st_case_121
- case 122:
- goto st_case_122
case 481:
goto st_case_481
case 482:
goto st_case_482
- case 123:
- goto st_case_123
- case 124:
- goto st_case_124
- case 125:
- goto st_case_125
- case 126:
- goto st_case_126
case 483:
goto st_case_483
case 484:
goto st_case_484
case 485:
goto st_case_485
- case 127:
- goto st_case_127
case 486:
goto st_case_486
case 487:
goto st_case_487
case 488:
goto st_case_488
+ case 113:
+ goto st_case_113
+ case 114:
+ goto st_case_114
+ case 115:
+ goto st_case_115
case 489:
goto st_case_489
+ case 116:
+ goto st_case_116
+ case 117:
+ goto st_case_117
+ case 118:
+ goto st_case_118
case 490:
goto st_case_490
+ case 119:
+ goto st_case_119
+ case 120:
+ goto st_case_120
case 491:
goto st_case_491
case 492:
goto st_case_492
+ case 121:
+ goto st_case_121
+ case 122:
+ goto st_case_122
+ case 123:
+ goto st_case_123
+ case 124:
+ goto st_case_124
case 493:
goto st_case_493
case 494:
goto st_case_494
case 495:
goto st_case_495
+ case 125:
+ goto st_case_125
case 496:
goto st_case_496
case 497:
@@ -2352,10 +2394,6 @@ _resume:
goto st_case_504
case 505:
goto st_case_505
- case 128:
- goto st_case_128
- case 129:
- goto st_case_129
case 506:
goto st_case_506
case 507:
@@ -2374,82 +2412,70 @@ _resume:
goto st_case_513
case 514:
goto st_case_514
- case 130:
- goto st_case_130
- case 131:
- goto st_case_131
- case 132:
- goto st_case_132
case 515:
goto st_case_515
- case 133:
- goto st_case_133
- case 134:
- goto st_case_134
- case 135:
- goto st_case_135
+ case 126:
+ goto st_case_126
+ case 127:
+ goto st_case_127
case 516:
goto st_case_516
- case 136:
- goto st_case_136
- case 137:
- goto st_case_137
case 517:
goto st_case_517
case 518:
goto st_case_518
- case 138:
- goto st_case_138
- case 139:
- goto st_case_139
- case 140:
- goto st_case_140
case 519:
goto st_case_519
case 520:
goto st_case_520
- case 141:
- goto st_case_141
case 521:
goto st_case_521
- case 142:
- goto st_case_142
case 522:
goto st_case_522
case 523:
goto st_case_523
case 524:
goto st_case_524
+ case 128:
+ goto st_case_128
+ case 129:
+ goto st_case_129
+ case 130:
+ goto st_case_130
case 525:
goto st_case_525
+ case 131:
+ goto st_case_131
+ case 132:
+ goto st_case_132
+ case 133:
+ goto st_case_133
case 526:
goto st_case_526
+ case 134:
+ goto st_case_134
+ case 135:
+ goto st_case_135
case 527:
goto st_case_527
case 528:
goto st_case_528
+ case 136:
+ goto st_case_136
+ case 137:
+ goto st_case_137
+ case 138:
+ goto st_case_138
case 529:
goto st_case_529
- case 143:
- goto st_case_143
- case 144:
- goto st_case_144
- case 145:
- goto st_case_145
case 530:
goto st_case_530
- case 146:
- goto st_case_146
- case 147:
- goto st_case_147
- case 148:
- goto st_case_148
+ case 139:
+ goto st_case_139
case 531:
goto st_case_531
- case 149:
- goto st_case_149
- case 150:
- goto st_case_150
+ case 140:
+ goto st_case_140
case 532:
goto st_case_532
case 533:
@@ -2466,10 +2492,26 @@ _resume:
goto st_case_538
case 539:
goto st_case_539
+ case 141:
+ goto st_case_141
+ case 142:
+ goto st_case_142
+ case 143:
+ goto st_case_143
case 540:
goto st_case_540
+ case 144:
+ goto st_case_144
+ case 145:
+ goto st_case_145
+ case 146:
+ goto st_case_146
case 541:
goto st_case_541
+ case 147:
+ goto st_case_147
+ case 148:
+ goto st_case_148
case 542:
goto st_case_542
case 543:
@@ -2490,24 +2532,16 @@ _resume:
goto st_case_550
case 551:
goto st_case_551
- case 151:
- goto st_case_151
- case 152:
- goto st_case_152
case 552:
goto st_case_552
case 553:
goto st_case_553
case 554:
goto st_case_554
- case 153:
- goto st_case_153
case 555:
goto st_case_555
case 556:
goto st_case_556
- case 154:
- goto st_case_154
case 557:
goto st_case_557
case 558:
@@ -2518,16 +2552,24 @@ _resume:
goto st_case_560
case 561:
goto st_case_561
+ case 149:
+ goto st_case_149
+ case 150:
+ goto st_case_150
case 562:
goto st_case_562
case 563:
goto st_case_563
case 564:
goto st_case_564
+ case 151:
+ goto st_case_151
case 565:
goto st_case_565
case 566:
goto st_case_566
+ case 152:
+ goto st_case_152
case 567:
goto st_case_567
case 568:
@@ -2544,14 +2586,8 @@ _resume:
goto st_case_573
case 574:
goto st_case_574
- case 155:
- goto st_case_155
- case 156:
- goto st_case_156
case 575:
goto st_case_575
- case 157:
- goto st_case_157
case 576:
goto st_case_576
case 577:
@@ -2568,42 +2604,20 @@ _resume:
goto st_case_582
case 583:
goto st_case_583
- case 158:
- goto st_case_158
- case 159:
- goto st_case_159
- case 160:
- goto st_case_160
case 584:
goto st_case_584
- case 161:
- goto st_case_161
- case 162:
- goto st_case_162
- case 163:
- goto st_case_163
+ case 153:
+ goto st_case_153
+ case 154:
+ goto st_case_154
case 585:
goto st_case_585
- case 164:
- goto st_case_164
- case 165:
- goto st_case_165
+ case 155:
+ goto st_case_155
case 586:
goto st_case_586
case 587:
goto st_case_587
- case 166:
- goto st_case_166
- case 167:
- goto st_case_167
- case 168:
- goto st_case_168
- case 169:
- goto st_case_169
- case 170:
- goto st_case_170
- case 171:
- goto st_case_171
case 588:
goto st_case_588
case 589:
@@ -2616,24 +2630,52 @@ _resume:
goto st_case_592
case 593:
goto st_case_593
+ case 156:
+ goto st_case_156
+ case 157:
+ goto st_case_157
+ case 158:
+ goto st_case_158
case 594:
goto st_case_594
+ case 159:
+ goto st_case_159
+ case 160:
+ goto st_case_160
+ case 161:
+ goto st_case_161
case 595:
goto st_case_595
+ case 162:
+ goto st_case_162
+ case 163:
+ goto st_case_163
case 596:
goto st_case_596
case 597:
goto st_case_597
- case 598:
- goto st_case_598
- case 599:
- goto st_case_599
- case 600:
- goto st_case_600
- case 601:
- goto st_case_601
- case 602:
- goto st_case_602
+ case 164:
+ goto st_case_164
+ case 165:
+ goto st_case_165
+ case 166:
+ goto st_case_166
+ case 167:
+ goto st_case_167
+ case 168:
+ goto st_case_168
+ case 169:
+ goto st_case_169
+ case 598:
+ goto st_case_598
+ case 599:
+ goto st_case_599
+ case 600:
+ goto st_case_600
+ case 601:
+ goto st_case_601
+ case 602:
+ goto st_case_602
case 603:
goto st_case_603
case 604:
@@ -2642,26 +2684,16 @@ _resume:
goto st_case_605
case 606:
goto st_case_606
- case 172:
- goto st_case_172
- case 173:
- goto st_case_173
- case 174:
- goto st_case_174
case 607:
goto st_case_607
case 608:
goto st_case_608
case 609:
goto st_case_609
- case 175:
- goto st_case_175
case 610:
goto st_case_610
case 611:
goto st_case_611
- case 176:
- goto st_case_176
case 612:
goto st_case_612
case 613:
@@ -2672,100 +2704,104 @@ _resume:
goto st_case_615
case 616:
goto st_case_616
- case 177:
- goto st_case_177
- case 178:
- goto st_case_178
- case 179:
- goto st_case_179
+ case 170:
+ goto st_case_170
+ case 171:
+ goto st_case_171
+ case 172:
+ goto st_case_172
case 617:
goto st_case_617
- case 180:
- goto st_case_180
- case 181:
- goto st_case_181
- case 182:
- goto st_case_182
case 618:
goto st_case_618
- case 183:
- goto st_case_183
- case 184:
- goto st_case_184
case 619:
goto st_case_619
+ case 173:
+ goto st_case_173
case 620:
goto st_case_620
- case 185:
- goto st_case_185
case 621:
goto st_case_621
+ case 174:
+ goto st_case_174
case 622:
goto st_case_622
- case 186:
- goto st_case_186
- case 187:
- goto st_case_187
- case 188:
- goto st_case_188
case 623:
goto st_case_623
- case 189:
- goto st_case_189
- case 190:
- goto st_case_190
case 624:
goto st_case_624
case 625:
goto st_case_625
case 626:
goto st_case_626
+ case 175:
+ goto st_case_175
+ case 176:
+ goto st_case_176
+ case 177:
+ goto st_case_177
case 627:
goto st_case_627
+ case 178:
+ goto st_case_178
+ case 179:
+ goto st_case_179
+ case 180:
+ goto st_case_180
case 628:
goto st_case_628
+ case 181:
+ goto st_case_181
+ case 182:
+ goto st_case_182
case 629:
goto st_case_629
case 630:
goto st_case_630
+ case 183:
+ goto st_case_183
case 631:
goto st_case_631
- case 191:
- goto st_case_191
- case 192:
- goto st_case_192
- case 193:
- goto st_case_193
case 632:
goto st_case_632
- case 194:
- goto st_case_194
- case 195:
- goto st_case_195
- case 196:
- goto st_case_196
case 633:
goto st_case_633
- case 197:
- goto st_case_197
- case 198:
- goto st_case_198
+ case 184:
+ goto st_case_184
+ case 185:
+ goto st_case_185
+ case 186:
+ goto st_case_186
case 634:
goto st_case_634
+ case 187:
+ goto st_case_187
+ case 188:
+ goto st_case_188
+ case 189:
+ goto st_case_189
case 635:
goto st_case_635
- case 199:
- goto st_case_199
- case 200:
- goto st_case_200
- case 201:
- goto st_case_201
+ case 190:
+ goto st_case_190
+ case 191:
+ goto st_case_191
case 636:
goto st_case_636
case 637:
goto st_case_637
+ case 192:
+ goto st_case_192
+ case 193:
+ goto st_case_193
+ case 194:
+ goto st_case_194
case 638:
goto st_case_638
+ case 195:
+ goto st_case_195
+ case 196:
+ goto st_case_196
case 639:
goto st_case_639
case 640:
@@ -2782,14 +2818,36 @@ _resume:
goto st_case_645
case 646:
goto st_case_646
+ case 197:
+ goto st_case_197
+ case 198:
+ goto st_case_198
+ case 199:
+ goto st_case_199
case 647:
goto st_case_647
+ case 200:
+ goto st_case_200
+ case 201:
+ goto st_case_201
+ case 202:
+ goto st_case_202
case 648:
goto st_case_648
+ case 203:
+ goto st_case_203
+ case 204:
+ goto st_case_204
case 649:
goto st_case_649
case 650:
goto st_case_650
+ case 205:
+ goto st_case_205
+ case 206:
+ goto st_case_206
+ case 207:
+ goto st_case_207
case 651:
goto st_case_651
case 652:
@@ -2798,22 +2856,8 @@ _resume:
goto st_case_653
case 654:
goto st_case_654
- case 202:
- goto st_case_202
- case 203:
- goto st_case_203
- case 204:
- goto st_case_204
- case 205:
- goto st_case_205
- case 206:
- goto st_case_206
case 655:
goto st_case_655
- case 207:
- goto st_case_207
- case 208:
- goto st_case_208
case 656:
goto st_case_656
case 657:
@@ -2832,46 +2876,32 @@ _resume:
goto st_case_663
case 664:
goto st_case_664
- case 209:
- goto st_case_209
- case 210:
- goto st_case_210
- case 211:
- goto st_case_211
case 665:
goto st_case_665
- case 212:
- goto st_case_212
- case 213:
- goto st_case_213
- case 214:
- goto st_case_214
case 666:
goto st_case_666
- case 215:
- goto st_case_215
- case 216:
- goto st_case_216
case 667:
goto st_case_667
case 668:
goto st_case_668
- case 217:
- goto st_case_217
- case 218:
- goto st_case_218
- case 219:
- goto st_case_219
- case 220:
- goto st_case_220
case 669:
goto st_case_669
- case 221:
- goto st_case_221
- case 222:
- goto st_case_222
+ case 208:
+ goto st_case_208
+ case 209:
+ goto st_case_209
+ case 210:
+ goto st_case_210
+ case 211:
+ goto st_case_211
+ case 212:
+ goto st_case_212
case 670:
goto st_case_670
+ case 213:
+ goto st_case_213
+ case 214:
+ goto st_case_214
case 671:
goto st_case_671
case 672:
@@ -2886,44 +2916,48 @@ _resume:
goto st_case_676
case 677:
goto st_case_677
- case 223:
- goto st_case_223
- case 224:
- goto st_case_224
- case 225:
- goto st_case_225
case 678:
goto st_case_678
- case 226:
- goto st_case_226
- case 227:
- goto st_case_227
- case 228:
- goto st_case_228
case 679:
goto st_case_679
- case 229:
- goto st_case_229
- case 230:
- goto st_case_230
+ case 215:
+ goto st_case_215
+ case 216:
+ goto st_case_216
+ case 217:
+ goto st_case_217
case 680:
goto st_case_680
+ case 218:
+ goto st_case_218
+ case 219:
+ goto st_case_219
+ case 220:
+ goto st_case_220
case 681:
goto st_case_681
- case 231:
- goto st_case_231
- case 232:
- goto st_case_232
- case 233:
- goto st_case_233
+ case 221:
+ goto st_case_221
+ case 222:
+ goto st_case_222
case 682:
goto st_case_682
case 683:
goto st_case_683
- case 684:
- goto st_case_684
- case 685:
- goto st_case_685
+ case 223:
+ goto st_case_223
+ case 224:
+ goto st_case_224
+ case 225:
+ goto st_case_225
+ case 684:
+ goto st_case_684
+ case 226:
+ goto st_case_226
+ case 227:
+ goto st_case_227
+ case 685:
+ goto st_case_685
case 686:
goto st_case_686
case 687:
@@ -2938,8 +2972,18 @@ _resume:
goto st_case_691
case 692:
goto st_case_692
+ case 228:
+ goto st_case_228
+ case 229:
+ goto st_case_229
+ case 230:
+ goto st_case_230
case 693:
goto st_case_693
+ case 231:
+ goto st_case_231
+ case 232:
+ goto st_case_232
case 694:
goto st_case_694
case 695:
@@ -2954,24 +2998,38 @@ _resume:
goto st_case_699
case 700:
goto st_case_700
+ case 701:
+ goto st_case_701
+ case 233:
+ goto st_case_233
case 234:
goto st_case_234
case 235:
goto st_case_235
- case 701:
- goto st_case_701
+ case 702:
+ goto st_case_702
case 236:
goto st_case_236
case 237:
goto st_case_237
- case 702:
- goto st_case_702
+ case 238:
+ goto st_case_238
case 703:
goto st_case_703
+ case 239:
+ goto st_case_239
+ case 240:
+ goto st_case_240
case 704:
goto st_case_704
case 705:
goto st_case_705
+ case 241:
+ goto st_case_241
+ case 242:
+ goto st_case_242
+ case 243:
+ goto st_case_243
case 706:
goto st_case_706
case 707:
@@ -2980,109 +3038,163 @@ _resume:
goto st_case_708
case 709:
goto st_case_709
- case 238:
- goto st_case_238
- case 239:
- goto st_case_239
- case 240:
- goto st_case_240
case 710:
goto st_case_710
- case 241:
- goto st_case_241
- case 242:
- goto st_case_242
- case 243:
- goto st_case_243
case 711:
goto st_case_711
- case 244:
- goto st_case_244
- case 245:
- goto st_case_245
case 712:
goto st_case_712
case 713:
goto st_case_713
- case 246:
- goto st_case_246
- case 247:
- goto st_case_247
case 714:
goto st_case_714
- case 250:
- goto st_case_250
+ case 715:
+ goto st_case_715
+ case 716:
+ goto st_case_716
case 717:
goto st_case_717
case 718:
goto st_case_718
+ case 719:
+ goto st_case_719
+ case 720:
+ goto st_case_720
+ case 721:
+ goto st_case_721
+ case 722:
+ goto st_case_722
+ case 723:
+ goto st_case_723
+ case 724:
+ goto st_case_724
+ case 244:
+ goto st_case_244
+ case 245:
+ goto st_case_245
+ case 725:
+ goto st_case_725
+ case 246:
+ goto st_case_246
+ case 247:
+ goto st_case_247
+ case 726:
+ goto st_case_726
+ case 727:
+ goto st_case_727
+ case 728:
+ goto st_case_728
+ case 729:
+ goto st_case_729
+ case 730:
+ goto st_case_730
+ case 731:
+ goto st_case_731
+ case 732:
+ goto st_case_732
+ case 733:
+ goto st_case_733
+ case 248:
+ goto st_case_248
+ case 249:
+ goto st_case_249
+ case 250:
+ goto st_case_250
+ case 734:
+ goto st_case_734
case 251:
goto st_case_251
case 252:
goto st_case_252
case 253:
goto st_case_253
+ case 735:
+ goto st_case_735
case 254:
goto st_case_254
- case 719:
- goto st_case_719
case 255:
goto st_case_255
- case 720:
- goto st_case_720
+ case 736:
+ goto st_case_736
+ case 737:
+ goto st_case_737
case 256:
goto st_case_256
case 257:
goto st_case_257
+ case 738:
+ goto st_case_738
+ case 260:
+ goto st_case_260
+ case 740:
+ goto st_case_740
+ case 741:
+ goto st_case_741
+ case 261:
+ goto st_case_261
+ case 262:
+ goto st_case_262
+ case 263:
+ goto st_case_263
+ case 264:
+ goto st_case_264
+ case 742:
+ goto st_case_742
+ case 265:
+ goto st_case_265
+ case 743:
+ goto st_case_743
+ case 266:
+ goto st_case_266
+ case 267:
+ goto st_case_267
+ case 268:
+ goto st_case_268
+ case 739:
+ goto st_case_739
case 258:
goto st_case_258
- case 715:
- goto st_case_715
- case 716:
- goto st_case_716
- case 248:
- goto st_case_248
- case 249:
- goto st_case_249
+ case 259:
+ goto st_case_259
}
goto st_out
- st259:
+ st269:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof259
+ goto _test_eof269
}
- st_case_259:
+ st_case_269:
switch ( m.data)[( m.p)] {
case 10:
- goto tr35
+ goto tr33
case 11:
- goto tr440
+ goto tr457
case 13:
- goto tr35
+ goto tr33
case 32:
- goto tr439
+ goto tr456
case 35:
- goto tr35
+ goto tr33
case 44:
- goto tr35
+ goto tr33
case 92:
- goto tr441
+ goto tr458
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr439
+ goto tr456
}
- goto tr438
-tr33:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr455
+tr31:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto st1
-tr438:
-//line plugins/parsers/influx/machine.go.rl:73
+tr455:
+//line plugins/parsers/influx/machine.go.rl:82
- foundMetric = true
+ m.beginMetric = true
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
@@ -3092,7 +3204,7 @@ tr438:
goto _test_eof1
}
st_case_1:
-//line plugins/parsers/influx/machine.go:3096
+//line plugins/parsers/influx/machine.go:3208
switch ( m.data)[( m.p)] {
case 10:
goto tr2
@@ -3105,7 +3217,7 @@ tr438:
case 44:
goto tr4
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
@@ -3113,26 +3225,26 @@ tr438:
goto st1
tr1:
( m.cs) = 2
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr60:
+tr58:
( m.cs) = 2
-//line plugins/parsers/influx/machine.go.rl:90
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
@@ -3142,7 +3254,7 @@ tr60:
goto _test_eof2
}
st_case_2:
-//line plugins/parsers/influx/machine.go:3146
+//line plugins/parsers/influx/machine.go:3258
switch ( m.data)[( m.p)] {
case 10:
goto tr8
@@ -3164,7 +3276,7 @@ tr60:
}
goto tr6
tr6:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
@@ -3174,7 +3286,7 @@ tr6:
goto _test_eof3
}
st_case_3:
-//line plugins/parsers/influx/machine.go:3178
+//line plugins/parsers/influx/machine.go:3290
switch ( m.data)[( m.p)] {
case 32:
goto tr8
@@ -3183,7 +3295,7 @@ tr6:
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -3196,214 +3308,214 @@ tr6:
goto st3
tr2:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
tr8:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr35:
+tr33:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:23
+//line plugins/parsers/influx/machine.go.rl:32
err = ErrNameParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr39:
+tr37:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:23
+//line plugins/parsers/influx/machine.go.rl:32
err = ErrNameParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr43:
+tr41:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:23
+//line plugins/parsers/influx/machine.go.rl:32
err = ErrNameParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr47:
+tr45:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr105:
+tr103:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:53
err = ErrTimestampParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr132:
+tr130:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:53
err = ErrTimestampParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr198:
+tr196:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:53
err = ErrTimestampParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr404:
+tr421:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:23
+//line plugins/parsers/influx/machine.go.rl:32
err = ErrNameParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr407:
+tr424:
( m.cs) = 0
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:53
err = ErrTimestampParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
goto _again
-tr1023:
-//line plugins/parsers/influx/machine.go.rl:64
+tr1053:
+//line plugins/parsers/influx/machine.go.rl:73
( m.p)--
- {goto st259 }
+ {goto st269 }
goto st0
-//line plugins/parsers/influx/machine.go:3399
+//line plugins/parsers/influx/machine.go:3511
st_case_0:
st0:
( m.cs) = 0
goto _out
tr12:
-//line plugins/parsers/influx/machine.go.rl:99
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
goto st4
st4:
@@ -3411,7 +3523,7 @@ tr12:
goto _test_eof4
}
st_case_4:
-//line plugins/parsers/influx/machine.go:3415
+//line plugins/parsers/influx/machine.go:3527
switch ( m.data)[( m.p)] {
case 34:
goto st5
@@ -3442,552 +3554,550 @@ tr12:
switch ( m.data)[( m.p)] {
case 10:
goto tr24
- case 12:
- goto tr8
- case 13:
- goto tr25
case 34:
- goto tr26
+ goto tr25
case 92:
- goto tr27
+ goto tr26
}
goto tr23
tr23:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto st6
- st6:
- if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof6
- }
- st_case_6:
-//line plugins/parsers/influx/machine.go:3467
- switch ( m.data)[( m.p)] {
- case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
- case 34:
- goto tr31
- case 92:
- goto st76
- }
- goto st6
tr24:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st7
- st7:
-//line plugins/parsers/influx/machine.go.rl:157
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+ goto st6
+tr28:
+//line plugins/parsers/influx/machine.go.rl:166
m.lineno++
m.sol = m.p
m.sol++ // next char will be the first column in the line
+ goto st6
+ st6:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof7
+ goto _test_eof6
}
- st_case_7:
-//line plugins/parsers/influx/machine.go:3498
+ st_case_6:
+//line plugins/parsers/influx/machine.go:3595
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
goto st6
tr25:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st8
- st8:
- if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof8
- }
- st_case_8:
-//line plugins/parsers/influx/machine.go:3523
- if ( m.data)[( m.p)] == 10 {
- goto st7
- }
- goto tr8
-tr26:
- ( m.cs) = 260
-//line plugins/parsers/influx/machine.go.rl:19
+ ( m.cs) = 270
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr31:
- ( m.cs) = 260
-//line plugins/parsers/influx/machine.go.rl:139
+tr29:
+ ( m.cs) = 270
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st260:
+ st270:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof260
+ goto _test_eof270
}
- st_case_260:
-//line plugins/parsers/influx/machine.go:3563
+ st_case_270:
+//line plugins/parsers/influx/machine.go:3640
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 44:
- goto st37
+ goto st35
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st261
+ goto st271
}
- goto tr105
-tr516:
- ( m.cs) = 261
-//line plugins/parsers/influx/machine.go.rl:121
+ goto tr103
+tr921:
+ ( m.cs) = 271
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr909:
- ( m.cs) = 261
-//line plugins/parsers/influx/machine.go.rl:103
+tr1041:
+ ( m.cs) = 271
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr912:
- ( m.cs) = 261
-//line plugins/parsers/influx/machine.go.rl:112
+tr1044:
+ ( m.cs) = 271
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr916:
- ( m.cs) = 261
-//line plugins/parsers/influx/machine.go.rl:130
+tr1047:
+ ( m.cs) = 271
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st261:
+ st271:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof261
+ goto _test_eof271
}
- st_case_261:
-//line plugins/parsers/influx/machine.go:3635
+ st_case_271:
+//line plugins/parsers/influx/machine.go:3712
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 45:
- goto tr445
+ goto tr462
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr446
+ goto tr463
}
case ( m.data)[( m.p)] >= 9:
- goto st261
+ goto st271
}
- goto tr407
-tr451:
- ( m.cs) = 262
-//line plugins/parsers/influx/machine.go.rl:148
+ goto tr424
+tr101:
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+ goto st272
+tr468:
+ ( m.cs) = 272
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr715:
- ( m.cs) = 262
-//line plugins/parsers/influx/machine.go.rl:121
+tr730:
+ ( m.cs) = 272
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr925:
- ( m.cs) = 262
-//line plugins/parsers/influx/machine.go.rl:103
+tr942:
+ ( m.cs) = 272
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr930:
- ( m.cs) = 262
-//line plugins/parsers/influx/machine.go.rl:112
+tr948:
+ ( m.cs) = 272
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr935:
- ( m.cs) = 262
-//line plugins/parsers/influx/machine.go.rl:130
+tr954:
+ ( m.cs) = 272
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
- goto _again
- st262:
-//line plugins/parsers/influx/machine.go.rl:157
+//line plugins/parsers/influx/machine.go.rl:166
m.lineno++
m.sol = m.p
m.sol++ // next char will be the first column in the line
-//line plugins/parsers/influx/machine.go.rl:163
+ goto _again
+ st272:
+//line plugins/parsers/influx/machine.go.rl:172
- ( m.cs) = 715;
+ m.finishMetric = true
+ ( m.cs) = 739;
{( m.p)++; goto _out }
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof262
+ goto _test_eof272
}
- st_case_262:
-//line plugins/parsers/influx/machine.go:3736
+ st_case_272:
+//line plugins/parsers/influx/machine.go:3846
switch ( m.data)[( m.p)] {
case 10:
- goto tr35
+ goto tr33
case 11:
- goto tr36
+ goto tr34
case 13:
- goto tr35
+ goto tr33
case 32:
- goto st9
+ goto st7
case 35:
- goto tr35
+ goto tr33
case 44:
- goto tr35
+ goto tr33
case 92:
- goto tr37
+ goto tr35
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st9
+ goto st7
}
- goto tr33
-tr439:
-//line plugins/parsers/influx/machine.go.rl:73
+ goto tr31
+tr456:
+//line plugins/parsers/influx/machine.go.rl:82
- foundMetric = true
+ m.beginMetric = true
- goto st9
- st9:
+ goto st7
+ st7:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof9
+ goto _test_eof7
}
- st_case_9:
-//line plugins/parsers/influx/machine.go:3768
+ st_case_7:
+//line plugins/parsers/influx/machine.go:3878
switch ( m.data)[( m.p)] {
case 10:
- goto tr35
+ goto tr33
case 11:
- goto tr36
+ goto tr34
case 13:
- goto tr35
+ goto tr33
case 32:
- goto st9
+ goto st7
case 35:
- goto tr35
+ goto tr33
case 44:
- goto tr35
+ goto tr33
case 92:
- goto tr37
+ goto tr35
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st9
+ goto st7
}
- goto tr33
-tr36:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr31
+tr34:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st10
-tr440:
-//line plugins/parsers/influx/machine.go.rl:73
+ goto st8
+tr457:
+//line plugins/parsers/influx/machine.go.rl:82
- foundMetric = true
+ m.beginMetric = true
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st10
- st10:
+ goto st8
+ st8:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof10
+ goto _test_eof8
}
- st_case_10:
-//line plugins/parsers/influx/machine.go:3810
+ st_case_8:
+//line plugins/parsers/influx/machine.go:3920
switch ( m.data)[( m.p)] {
case 10:
- goto tr39
+ goto tr37
case 11:
- goto tr40
+ goto tr38
case 13:
- goto tr39
+ goto tr37
case 32:
- goto tr38
+ goto tr36
case 35:
goto st1
case 44:
goto tr4
case 92:
- goto tr37
+ goto tr35
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr38
+ goto tr36
}
- goto tr33
-tr38:
- ( m.cs) = 11
-//line plugins/parsers/influx/machine.go.rl:77
+ goto tr31
+tr36:
+ ( m.cs) = 9
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st11:
+ st9:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof11
+ goto _test_eof9
}
- st_case_11:
-//line plugins/parsers/influx/machine.go:3849
+ st_case_9:
+//line plugins/parsers/influx/machine.go:3959
switch ( m.data)[( m.p)] {
case 10:
- goto tr43
+ goto tr41
case 11:
- goto tr44
+ goto tr42
case 13:
- goto tr43
+ goto tr41
case 32:
- goto st11
+ goto st9
case 35:
goto tr6
case 44:
- goto tr43
+ goto tr41
case 61:
- goto tr33
+ goto tr31
case 92:
- goto tr45
+ goto tr43
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st11
+ goto st9
}
- goto tr41
-tr41:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr39
+tr39:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st12
- st12:
+ goto st10
+ st10:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof12
+ goto _test_eof10
}
- st_case_12:
-//line plugins/parsers/influx/machine.go:3883
+ st_case_10:
+//line plugins/parsers/influx/machine.go:3993
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
- goto tr48
+ goto tr46
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
- goto st12
-tr48:
- ( m.cs) = 13
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st10
+tr46:
+ ( m.cs) = 11
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr51:
- ( m.cs) = 13
-//line plugins/parsers/influx/machine.go.rl:77
+tr49:
+ ( m.cs) = 11
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st13:
+ st11:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof13
+ goto _test_eof11
}
- st_case_13:
-//line plugins/parsers/influx/machine.go:3939
+ st_case_11:
+//line plugins/parsers/influx/machine.go:4049
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
- goto tr51
+ goto tr49
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto tr45
+ goto tr43
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
- goto tr41
+ goto tr39
tr4:
- ( m.cs) = 14
-//line plugins/parsers/influx/machine.go.rl:77
+ ( m.cs) = 12
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr62:
- ( m.cs) = 14
-//line plugins/parsers/influx/machine.go.rl:90
+tr60:
+ ( m.cs) = 12
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st14:
+ st12:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof14
+ goto _test_eof12
}
- st_case_14:
-//line plugins/parsers/influx/machine.go:3991
+ st_case_12:
+//line plugins/parsers/influx/machine.go:4101
switch ( m.data)[( m.p)] {
case 32:
goto tr2
@@ -3996,7 +4106,7 @@ tr62:
case 61:
goto tr2
case 92:
- goto tr53
+ goto tr51
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -4006,28 +4116,28 @@ tr62:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto tr52
-tr52:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr50
+tr50:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st15
- st15:
+ goto st13
+ st13:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof15
+ goto _test_eof13
}
- st_case_15:
-//line plugins/parsers/influx/machine.go:4022
+ st_case_13:
+//line plugins/parsers/influx/machine.go:4132
switch ( m.data)[( m.p)] {
case 32:
goto tr2
case 44:
goto tr2
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -4037,19 +4147,19 @@ tr52:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto st15
-tr55:
-//line plugins/parsers/influx/machine.go.rl:86
+ goto st13
+tr53:
+//line plugins/parsers/influx/machine.go.rl:95
- key = m.text()
+ m.key = m.text()
- goto st16
- st16:
+ goto st14
+ st14:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof16
+ goto _test_eof14
}
- st_case_16:
-//line plugins/parsers/influx/machine.go:4053
+ st_case_14:
+//line plugins/parsers/influx/machine.go:4163
switch ( m.data)[( m.p)] {
case 32:
goto tr2
@@ -4058,7 +4168,7 @@ tr55:
case 61:
goto tr2
case 92:
- goto tr58
+ goto tr56
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -4068,233 +4178,233 @@ tr55:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto tr57
-tr57:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr55
+tr55:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st17
- st17:
+ goto st15
+ st15:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof17
+ goto _test_eof15
}
- st_case_17:
-//line plugins/parsers/influx/machine.go:4084
+ st_case_15:
+//line plugins/parsers/influx/machine.go:4194
switch ( m.data)[( m.p)] {
case 10:
goto tr2
case 11:
- goto tr61
+ goto tr59
case 13:
goto tr2
case 32:
- goto tr60
+ goto tr58
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr2
case 92:
- goto st23
+ goto st21
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto tr58
}
- goto st17
-tr61:
- ( m.cs) = 18
-//line plugins/parsers/influx/machine.go.rl:90
+ goto st15
+tr59:
+ ( m.cs) = 16
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st18:
+ st16:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof18
+ goto _test_eof16
}
- st_case_18:
-//line plugins/parsers/influx/machine.go:4123
+ st_case_16:
+//line plugins/parsers/influx/machine.go:4233
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
- goto tr65
+ goto tr63
case 13:
- goto tr47
+ goto tr45
case 32:
- goto tr60
+ goto tr58
case 44:
- goto tr62
+ goto tr60
case 61:
- goto tr47
+ goto tr45
case 92:
- goto tr66
+ goto tr64
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto tr58
}
- goto tr64
-tr64:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr62
+tr62:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st19
- st19:
+ goto st17
+ st17:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof19
+ goto _test_eof17
}
- st_case_19:
-//line plugins/parsers/influx/machine.go:4155
+ st_case_17:
+//line plugins/parsers/influx/machine.go:4265
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
- goto tr68
+ goto tr66
case 13:
- goto tr47
+ goto tr45
case 32:
- goto tr60
+ goto tr58
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto tr58
}
- goto st19
-tr68:
- ( m.cs) = 20
-//line plugins/parsers/influx/machine.go.rl:90
+ goto st17
+tr66:
+ ( m.cs) = 18
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr65:
- ( m.cs) = 20
-//line plugins/parsers/influx/machine.go.rl:90
+tr63:
+ ( m.cs) = 18
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st20:
+ st18:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof20
+ goto _test_eof18
}
- st_case_20:
-//line plugins/parsers/influx/machine.go:4211
+ st_case_18:
+//line plugins/parsers/influx/machine.go:4321
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
- goto tr65
+ goto tr63
case 13:
- goto tr47
+ goto tr45
case 32:
- goto tr60
+ goto tr58
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto tr66
+ goto tr64
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto tr58
}
- goto tr64
-tr66:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr62
+tr64:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st21
- st21:
+ goto st19
+ st19:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof21
+ goto _test_eof19
}
- st_case_21:
-//line plugins/parsers/influx/machine.go:4243
+ st_case_19:
+//line plugins/parsers/influx/machine.go:4353
if ( m.data)[( m.p)] == 92 {
- goto st22
+ goto st20
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st19
- st22:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st17
+ st20:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof22
+ goto _test_eof20
}
- st_case_22:
-//line plugins/parsers/influx/machine.go:4264
+ st_case_20:
+//line plugins/parsers/influx/machine.go:4374
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
- goto tr68
+ goto tr66
case 13:
- goto tr47
+ goto tr45
case 32:
- goto tr60
+ goto tr58
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto tr58
}
- goto st19
-tr58:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st17
+tr56:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st23
- st23:
+ goto st21
+ st21:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof23
+ goto _test_eof21
}
- st_case_23:
-//line plugins/parsers/influx/machine.go:4296
+ st_case_21:
+//line plugins/parsers/influx/machine.go:4406
if ( m.data)[( m.p)] == 92 {
- goto st24
+ goto st22
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -4304,50 +4414,50 @@ tr58:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto st17
- st24:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st15
+ st22:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof24
+ goto _test_eof22
}
- st_case_24:
-//line plugins/parsers/influx/machine.go:4317
+ st_case_22:
+//line plugins/parsers/influx/machine.go:4427
switch ( m.data)[( m.p)] {
case 10:
goto tr2
case 11:
- goto tr61
+ goto tr59
case 13:
goto tr2
case 32:
- goto tr60
+ goto tr58
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr2
case 92:
- goto st23
+ goto st21
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto tr58
}
- goto st17
-tr53:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st15
+tr51:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st25
- st25:
+ goto st23
+ st23:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof25
+ goto _test_eof23
}
- st_case_25:
-//line plugins/parsers/influx/machine.go:4349
+ st_case_23:
+//line plugins/parsers/influx/machine.go:4459
if ( m.data)[( m.p)] == 92 {
- goto st26
+ goto st24
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -4357,25 +4467,25 @@ tr53:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto st15
- st26:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st13
+ st24:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof26
+ goto _test_eof24
}
- st_case_26:
-//line plugins/parsers/influx/machine.go:4370
+ st_case_24:
+//line plugins/parsers/influx/machine.go:4480
switch ( m.data)[( m.p)] {
case 32:
goto tr2
case 44:
goto tr2
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -4385,94 +4495,94 @@ tr53:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto st15
-tr49:
-//line plugins/parsers/influx/machine.go.rl:99
+ goto st13
+tr47:
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st27
-tr406:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st25
+tr423:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:99
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st27
- st27:
+ goto st25
+ st25:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof27
+ goto _test_eof25
}
- st_case_27:
-//line plugins/parsers/influx/machine.go:4411
+ st_case_25:
+//line plugins/parsers/influx/machine.go:4521
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 34:
- goto st30
+ goto st28
case 44:
goto tr4
case 45:
- goto tr74
+ goto tr72
case 46:
- goto tr75
+ goto tr73
case 48:
- goto tr76
+ goto tr74
case 70:
- goto tr78
+ goto tr76
case 84:
- goto tr79
+ goto tr77
case 92:
- goto st96
+ goto st94
case 102:
- goto tr80
+ goto tr78
case 116:
- goto tr81
+ goto tr79
}
switch {
case ( m.data)[( m.p)] > 12:
if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr77
+ goto tr75
}
case ( m.data)[( m.p)] >= 9:
goto tr1
}
goto st1
tr3:
- ( m.cs) = 28
-//line plugins/parsers/influx/machine.go.rl:77
+ ( m.cs) = 26
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st28:
+ st26:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof28
+ goto _test_eof26
}
- st_case_28:
-//line plugins/parsers/influx/machine.go:4469
+ st_case_26:
+//line plugins/parsers/influx/machine.go:4579
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
- goto tr51
+ goto tr49
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
@@ -4480,24 +4590,24 @@ tr3:
case 61:
goto st1
case 92:
- goto tr45
+ goto tr43
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
- goto tr41
-tr45:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr39
+tr43:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st29
- st29:
+ goto st27
+ st27:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof29
+ goto _test_eof27
}
- st_case_29:
-//line plugins/parsers/influx/machine.go:4501
+ st_case_27:
+//line plugins/parsers/influx/machine.go:4611
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
@@ -4506,505 +4616,501 @@ tr45:
case ( m.data)[( m.p)] >= 9:
goto tr8
}
- goto st12
- st30:
+ goto st10
+ st28:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof30
+ goto _test_eof28
}
- st_case_30:
+ st_case_28:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr83
case 10:
goto tr24
case 11:
- goto tr84
- case 12:
- goto tr1
+ goto tr82
case 13:
- goto tr25
+ goto tr23
case 32:
- goto tr83
+ goto tr81
case 34:
- goto tr85
+ goto tr83
case 44:
- goto tr86
+ goto tr84
case 92:
- goto tr87
+ goto tr85
}
- goto tr82
-tr82:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr81
+ }
+ goto tr80
+tr80:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st31
- st31:
+ goto st29
+ st29:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof31
+ goto _test_eof29
}
- st_case_31:
-//line plugins/parsers/influx/machine.go:4548
+ st_case_29:
+//line plugins/parsers/influx/machine.go:4657
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 92:
- goto st142
+ goto st140
}
- goto st31
-tr89:
- ( m.cs) = 32
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+tr87:
+ ( m.cs) = 30
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr83:
- ( m.cs) = 32
-//line plugins/parsers/influx/machine.go.rl:77
+tr81:
+ ( m.cs) = 30
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr231:
- ( m.cs) = 32
-//line plugins/parsers/influx/machine.go.rl:90
+tr229:
+ ( m.cs) = 30
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st32:
+ st30:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof32
+ goto _test_eof30
}
- st_case_32:
-//line plugins/parsers/influx/machine.go:4618
+ st_case_30:
+//line plugins/parsers/influx/machine.go:4726
switch ( m.data)[( m.p)] {
- case 9:
- goto st32
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr96
- case 12:
- goto st2
+ goto tr94
case 13:
- goto st8
+ goto st6
case 32:
- goto st32
+ goto st30
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr98
+ goto tr96
}
- goto tr94
-tr94:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st30
+ }
+ goto tr92
+tr92:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st33
- st33:
+ goto st31
+ st31:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof33
+ goto _test_eof31
}
- st_case_33:
-//line plugins/parsers/influx/machine.go:4653
+ st_case_31:
+//line plugins/parsers/influx/machine.go:4760
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- goto st33
-tr97:
- ( m.cs) = 263
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st31
+tr95:
+ ( m.cs) = 273
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr100:
- ( m.cs) = 263
-//line plugins/parsers/influx/machine.go.rl:139
+tr98:
+ ( m.cs) = 273
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr377:
- ( m.cs) = 263
-//line plugins/parsers/influx/machine.go.rl:139
+tr384:
+ ( m.cs) = 273
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st263:
+ st273:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof263
+ goto _test_eof273
}
- st_case_263:
-//line plugins/parsers/influx/machine.go:4727
+ st_case_273:
+//line plugins/parsers/influx/machine.go:4833
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st264
+ goto st274
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 44:
- goto st37
+ goto st35
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st261
+ goto st271
}
goto st3
- st264:
+ st274:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof264
+ goto _test_eof274
}
- st_case_264:
+ st_case_274:
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st264
+ goto st274
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 44:
- goto tr105
+ goto tr103
case 45:
- goto tr448
+ goto tr465
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr449
+ goto tr466
}
case ( m.data)[( m.p)] >= 9:
- goto st261
+ goto st271
}
goto st3
-tr453:
- ( m.cs) = 34
-//line plugins/parsers/influx/machine.go.rl:148
+tr470:
+ ( m.cs) = 32
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr717:
- ( m.cs) = 34
-//line plugins/parsers/influx/machine.go.rl:121
+tr732:
+ ( m.cs) = 32
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr927:
- ( m.cs) = 34
-//line plugins/parsers/influx/machine.go.rl:103
+tr944:
+ ( m.cs) = 32
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr932:
- ( m.cs) = 34
-//line plugins/parsers/influx/machine.go.rl:112
+tr950:
+ ( m.cs) = 32
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr937:
- ( m.cs) = 34
-//line plugins/parsers/influx/machine.go.rl:130
+tr956:
+ ( m.cs) = 32
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st34:
+ st32:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof34
+ goto _test_eof32
}
- st_case_34:
-//line plugins/parsers/influx/machine.go:4850
+ st_case_32:
+//line plugins/parsers/influx/machine.go:4956
if ( m.data)[( m.p)] == 10 {
- goto st262
+ goto tr101
}
goto st0
-tr448:
-//line plugins/parsers/influx/machine.go.rl:19
+tr465:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st35
- st35:
+ goto st33
+ st33:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof35
+ goto _test_eof33
}
- st_case_35:
-//line plugins/parsers/influx/machine.go:4866
+ st_case_33:
+//line plugins/parsers/influx/machine.go:4972
switch ( m.data)[( m.p)] {
case 32:
- goto tr105
+ goto tr103
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] < 12:
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 {
- goto tr105
+ goto tr103
}
case ( m.data)[( m.p)] > 13:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st265
+ goto st275
}
default:
- goto tr105
+ goto tr103
}
goto st3
-tr449:
-//line plugins/parsers/influx/machine.go.rl:19
+tr466:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st265
- st265:
+ goto st275
+ st275:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof265
+ goto _test_eof275
}
- st_case_265:
-//line plugins/parsers/influx/machine.go:4901
+ st_case_275:
+//line plugins/parsers/influx/machine.go:5007
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st268
+ goto st278
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
-tr450:
- ( m.cs) = 266
-//line plugins/parsers/influx/machine.go.rl:148
+tr467:
+ ( m.cs) = 276
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st266:
+ st276:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof266
+ goto _test_eof276
}
- st_case_266:
-//line plugins/parsers/influx/machine.go:4945
+ st_case_276:
+//line plugins/parsers/influx/machine.go:5051
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 13:
- goto st34
+ goto st32
case 32:
- goto st266
+ goto st276
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st266
+ goto st276
}
goto st0
-tr452:
- ( m.cs) = 267
-//line plugins/parsers/influx/machine.go.rl:148
+tr469:
+ ( m.cs) = 277
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st267:
+ st277:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof267
+ goto _test_eof277
}
- st_case_267:
-//line plugins/parsers/influx/machine.go:4976
+ st_case_277:
+//line plugins/parsers/influx/machine.go:5082
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st267
+ goto st277
case 13:
- goto st34
+ goto st32
case 32:
- goto st266
+ goto st276
case 44:
goto tr8
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st266
+ goto st276
}
goto st3
tr10:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st36
- st36:
+ goto st34
+ st34:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof36
+ goto _test_eof34
}
- st_case_36:
-//line plugins/parsers/influx/machine.go:5008
+ st_case_34:
+//line plugins/parsers/influx/machine.go:5114
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
@@ -5014,599 +5120,599 @@ tr10:
goto tr8
}
goto st3
- st268:
+ st278:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof268
+ goto _test_eof278
}
- st_case_268:
+ st_case_278:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st269
+ goto st279
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st269:
+ st279:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof269
+ goto _test_eof279
}
- st_case_269:
+ st_case_279:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st270
+ goto st280
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st270:
+ st280:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof270
+ goto _test_eof280
}
- st_case_270:
+ st_case_280:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st271
+ goto st281
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st271:
+ st281:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof271
+ goto _test_eof281
}
- st_case_271:
+ st_case_281:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st272
+ goto st282
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st272:
+ st282:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof272
+ goto _test_eof282
}
- st_case_272:
+ st_case_282:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st273
+ goto st283
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st273:
+ st283:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof273
+ goto _test_eof283
}
- st_case_273:
+ st_case_283:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st274
+ goto st284
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st274:
+ st284:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof274
+ goto _test_eof284
}
- st_case_274:
+ st_case_284:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st275
+ goto st285
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st275:
+ st285:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof275
+ goto _test_eof285
}
- st_case_275:
+ st_case_285:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st276
+ goto st286
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st276:
+ st286:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof276
+ goto _test_eof286
}
- st_case_276:
+ st_case_286:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st277
+ goto st287
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st277:
+ st287:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof277
+ goto _test_eof287
}
- st_case_277:
+ st_case_287:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st278
+ goto st288
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st278:
+ st288:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof278
+ goto _test_eof288
}
- st_case_278:
+ st_case_288:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st279
+ goto st289
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st279:
+ st289:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof279
+ goto _test_eof289
}
- st_case_279:
+ st_case_289:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st280
+ goto st290
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st280:
+ st290:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof280
+ goto _test_eof290
}
- st_case_280:
+ st_case_290:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st281
+ goto st291
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st281:
+ st291:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof281
+ goto _test_eof291
}
- st_case_281:
+ st_case_291:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st282
+ goto st292
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st282:
+ st292:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof282
+ goto _test_eof292
}
- st_case_282:
+ st_case_292:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st283
+ goto st293
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st283:
+ st293:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof283
+ goto _test_eof293
}
- st_case_283:
+ st_case_293:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st284
+ goto st294
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st284:
+ st294:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof284
+ goto _test_eof294
}
- st_case_284:
+ st_case_294:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st285
+ goto st295
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
goto st3
- st285:
+ st295:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof285
+ goto _test_eof295
}
- st_case_285:
+ st_case_295:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr452
+ goto tr469
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr105
+ goto tr103
case 61:
goto tr12
case 92:
- goto st36
+ goto st34
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr450
+ goto tr467
}
goto st3
-tr907:
- ( m.cs) = 37
-//line plugins/parsers/influx/machine.go.rl:121
+tr922:
+ ( m.cs) = 35
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr1014:
- ( m.cs) = 37
-//line plugins/parsers/influx/machine.go.rl:103
+tr1042:
+ ( m.cs) = 35
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr1016:
- ( m.cs) = 37
-//line plugins/parsers/influx/machine.go.rl:112
+tr1045:
+ ( m.cs) = 35
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr1018:
- ( m.cs) = 37
-//line plugins/parsers/influx/machine.go.rl:130
+tr1048:
+ ( m.cs) = 35
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st37:
+ st35:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof37
+ goto _test_eof35
}
- st_case_37:
-//line plugins/parsers/influx/machine.go:5610
+ st_case_35:
+//line plugins/parsers/influx/machine.go:5716
switch ( m.data)[( m.p)] {
case 32:
goto tr8
@@ -5626,812 +5732,894 @@ tr1018:
goto tr8
}
goto tr6
-tr101:
-//line plugins/parsers/influx/machine.go.rl:99
+tr99:
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st38
- st38:
+ goto st36
+ st36:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof38
+ goto _test_eof36
}
- st_case_38:
-//line plugins/parsers/influx/machine.go:5641
+ st_case_36:
+//line plugins/parsers/influx/machine.go:5747
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr107
+ goto tr105
case 45:
- goto tr108
+ goto tr106
case 46:
- goto tr109
+ goto tr107
case 48:
- goto tr110
+ goto tr108
case 70:
- goto tr112
+ goto tr110
case 84:
- goto tr113
+ goto tr111
case 92:
- goto st76
+ goto st73
case 102:
- goto tr114
+ goto tr112
case 116:
- goto tr115
+ goto tr113
}
if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr111
+ goto tr109
}
goto st6
-tr107:
- ( m.cs) = 286
-//line plugins/parsers/influx/machine.go.rl:139
+tr105:
+ ( m.cs) = 296
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st286:
+ st296:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof286
+ goto _test_eof296
}
- st_case_286:
-//line plugins/parsers/influx/machine.go:5690
+ st_case_296:
+//line plugins/parsers/influx/machine.go:5792
switch ( m.data)[( m.p)] {
case 10:
- goto tr475
- case 12:
- goto st261
+ goto tr492
case 13:
- goto tr476
+ goto tr493
case 32:
- goto tr474
+ goto tr491
case 34:
- goto tr26
+ goto tr25
case 44:
- goto tr477
+ goto tr494
case 92:
- goto tr27
+ goto tr26
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr474
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr491
}
goto tr23
-tr474:
-//line plugins/parsers/influx/machine.go.rl:19
+tr491:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st287
-tr961:
- ( m.cs) = 287
-//line plugins/parsers/influx/machine.go.rl:121
+ goto st297
+tr980:
+ ( m.cs) = 297
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr966:
- ( m.cs) = 287
-//line plugins/parsers/influx/machine.go.rl:103
+tr985:
+ ( m.cs) = 297
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr969:
- ( m.cs) = 287
-//line plugins/parsers/influx/machine.go.rl:112
+tr988:
+ ( m.cs) = 297
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr972:
- ( m.cs) = 287
-//line plugins/parsers/influx/machine.go.rl:130
+tr991:
+ ( m.cs) = 297
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st287:
+ st297:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof287
+ goto _test_eof297
}
- st_case_287:
-//line plugins/parsers/influx/machine.go:5774
+ st_case_297:
+//line plugins/parsers/influx/machine.go:5874
switch ( m.data)[( m.p)] {
case 10:
- goto st288
- case 12:
- goto st261
+ goto tr219
case 13:
- goto st74
+ goto st72
case 32:
- goto st287
+ goto st297
case 34:
- goto tr31
+ goto tr29
case 45:
- goto tr480
+ goto tr497
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr481
+ goto tr498
}
case ( m.data)[( m.p)] >= 9:
- goto st287
+ goto st297
}
goto st6
-tr475:
-//line plugins/parsers/influx/machine.go.rl:19
+tr492:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st288
-tr584:
- ( m.cs) = 288
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+ goto st298
+tr219:
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+ goto st298
+tr636:
+ ( m.cs) = 298
+//line plugins/parsers/influx/machine.go.rl:130
+
+ err = m.handler.AddFloat(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+ goto _again
+tr600:
+ ( m.cs) = 298
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr620:
- ( m.cs) = 288
-//line plugins/parsers/influx/machine.go.rl:121
+tr817:
+ ( m.cs) = 298
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr778:
- ( m.cs) = 288
-//line plugins/parsers/influx/machine.go.rl:103
+tr822:
+ ( m.cs) = 298
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr784:
- ( m.cs) = 288
-//line plugins/parsers/influx/machine.go.rl:112
+tr803:
+ ( m.cs) = 298
+//line plugins/parsers/influx/machine.go.rl:166
- err = m.handler.AddUint(key, m.text())
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+//line plugins/parsers/influx/machine.go.rl:139
+
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr790:
- ( m.cs) = 288
+tr758:
+ ( m.cs) = 298
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st288:
-//line plugins/parsers/influx/machine.go.rl:157
+tr791:
+ ( m.cs) = 298
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+//line plugins/parsers/influx/machine.go.rl:112
+
+ err = m.handler.AddInt(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+ goto _again
+tr797:
+ ( m.cs) = 298
+//line plugins/parsers/influx/machine.go.rl:166
m.lineno++
m.sol = m.p
m.sol++ // next char will be the first column in the line
-//line plugins/parsers/influx/machine.go.rl:163
+//line plugins/parsers/influx/machine.go.rl:121
+
+ err = m.handler.AddUint(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+ goto _again
+ st298:
+//line plugins/parsers/influx/machine.go.rl:172
- ( m.cs) = 715;
+ m.finishMetric = true
+ ( m.cs) = 739;
{( m.p)++; goto _out }
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof288
+ goto _test_eof298
}
- st_case_288:
-//line plugins/parsers/influx/machine.go:5887
+ st_case_298:
+//line plugins/parsers/influx/machine.go:6081
switch ( m.data)[( m.p)] {
- case 9:
- goto st39
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr117
- case 12:
- goto st9
+ goto tr115
case 13:
- goto st8
+ goto st6
case 32:
- goto st39
+ goto st37
case 34:
- goto tr118
+ goto tr116
case 35:
goto st6
case 44:
goto st6
case 92:
- goto tr87
+ goto tr85
}
- goto tr82
- st39:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st37
+ }
+ goto tr80
+ st37:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof39
+ goto _test_eof37
}
- st_case_39:
+ st_case_37:
switch ( m.data)[( m.p)] {
- case 9:
- goto st39
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr117
- case 12:
- goto st9
+ goto tr115
case 13:
- goto st8
+ goto st6
case 32:
- goto st39
+ goto st37
case 34:
- goto tr118
+ goto tr116
case 35:
goto st6
case 44:
goto st6
case 92:
- goto tr87
+ goto tr85
}
- goto tr82
-tr117:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st37
+ }
+ goto tr80
+tr115:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st40
- st40:
+ goto st38
+ st38:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof40
+ goto _test_eof38
}
- st_case_40:
-//line plugins/parsers/influx/machine.go:5950
+ st_case_38:
+//line plugins/parsers/influx/machine.go:6142
switch ( m.data)[( m.p)] {
- case 9:
- goto tr119
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr120
- case 12:
- goto tr38
+ goto tr118
case 13:
- goto st8
+ goto st6
case 32:
- goto tr119
+ goto tr117
case 34:
- goto tr85
+ goto tr83
case 35:
- goto st31
+ goto st29
case 44:
- goto tr92
+ goto tr90
case 92:
- goto tr87
+ goto tr85
}
- goto tr82
-tr119:
- ( m.cs) = 41
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr117
+ }
+ goto tr80
+tr117:
+ ( m.cs) = 39
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st41:
+ st39:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof41
+ goto _test_eof39
}
- st_case_41:
-//line plugins/parsers/influx/machine.go:5992
+ st_case_39:
+//line plugins/parsers/influx/machine.go:6183
switch ( m.data)[( m.p)] {
- case 9:
- goto st41
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr123
- case 12:
- goto st11
+ goto tr121
case 13:
- goto st8
+ goto st6
case 32:
- goto st41
+ goto st39
case 34:
- goto tr124
+ goto tr122
case 35:
- goto tr94
+ goto tr92
case 44:
goto st6
case 61:
- goto tr82
+ goto tr80
case 92:
- goto tr125
+ goto tr123
}
- goto tr121
-tr121:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st39
+ }
+ goto tr119
+tr119:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st42
- st42:
+ goto st40
+ st40:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof42
+ goto _test_eof40
}
- st_case_42:
-//line plugins/parsers/influx/machine.go:6029
+ st_case_40:
+//line plugins/parsers/influx/machine.go:6219
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr127
- case 12:
- goto tr1
+ goto tr125
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- goto st42
-tr127:
- ( m.cs) = 43
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st40
+tr125:
+ ( m.cs) = 41
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr131:
- ( m.cs) = 43
-//line plugins/parsers/influx/machine.go.rl:77
+tr129:
+ ( m.cs) = 41
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st43:
+ st41:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof43
+ goto _test_eof41
}
- st_case_43:
-//line plugins/parsers/influx/machine.go:6088
+ st_case_41:
+//line plugins/parsers/influx/machine.go:6277
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr131
- case 12:
- goto tr1
+ goto tr129
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto tr125
+ goto tr123
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
}
- goto tr121
-tr124:
- ( m.cs) = 289
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr119
+tr122:
+ ( m.cs) = 299
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr128:
- ( m.cs) = 289
-//line plugins/parsers/influx/machine.go.rl:139
+tr126:
+ ( m.cs) = 299
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st289:
+ st299:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof289
+ goto _test_eof299
}
- st_case_289:
-//line plugins/parsers/influx/machine.go:6147
+ st_case_299:
+//line plugins/parsers/influx/machine.go:6335
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr483
+ goto tr500
case 13:
- goto st34
+ goto st32
case 32:
- goto tr482
+ goto tr499
case 44:
- goto tr484
+ goto tr501
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr482
+ goto tr499
}
- goto st12
-tr482:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st10
+tr499:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr547:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:90
+tr563:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr622:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:77
+tr811:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr712:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:90
+tr729:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr724:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:90
+tr941:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr731:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:90
+tr947:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr738:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:90
+tr953:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr804:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:77
+tr1005:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr809:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:77
+tr1009:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr814:
- ( m.cs) = 290
-//line plugins/parsers/influx/machine.go.rl:77
+tr1013:
+ ( m.cs) = 300
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st290:
+ st300:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof290
+ goto _test_eof300
}
- st_case_290:
-//line plugins/parsers/influx/machine.go:6383
+ st_case_300:
+//line plugins/parsers/influx/machine.go:6571
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr486
+ goto tr503
case 13:
- goto st34
+ goto st32
case 32:
- goto st290
+ goto st300
case 44:
- goto tr105
+ goto tr103
case 45:
- goto tr448
+ goto tr465
case 61:
- goto tr105
+ goto tr103
case 92:
goto tr10
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr449
+ goto tr466
}
case ( m.data)[( m.p)] >= 9:
- goto st290
+ goto st300
}
goto tr6
-tr486:
-//line plugins/parsers/influx/machine.go.rl:19
+tr503:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st291
- st291:
+ goto st301
+ st301:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof291
+ goto _test_eof301
}
- st_case_291:
-//line plugins/parsers/influx/machine.go:6422
+ st_case_301:
+//line plugins/parsers/influx/machine.go:6610
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr486
+ goto tr503
case 13:
- goto st34
+ goto st32
case 32:
- goto st290
+ goto st300
case 44:
- goto tr105
+ goto tr103
case 45:
- goto tr448
+ goto tr465
case 61:
goto tr12
case 92:
@@ -6440,236 +6628,236 @@ tr486:
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr449
+ goto tr466
}
case ( m.data)[( m.p)] >= 9:
- goto st290
+ goto st300
}
goto tr6
-tr483:
- ( m.cs) = 292
-//line plugins/parsers/influx/machine.go.rl:77
+tr500:
+ ( m.cs) = 302
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr487:
- ( m.cs) = 292
-//line plugins/parsers/influx/machine.go.rl:77
+tr504:
+ ( m.cs) = 302
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st292:
+ st302:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof292
+ goto _test_eof302
}
- st_case_292:
-//line plugins/parsers/influx/machine.go:6485
+ st_case_302:
+//line plugins/parsers/influx/machine.go:6673
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr487
+ goto tr504
case 13:
- goto st34
+ goto st32
case 32:
- goto tr482
+ goto tr499
case 44:
goto tr4
case 45:
- goto tr488
+ goto tr505
case 61:
- goto tr49
+ goto tr47
case 92:
- goto tr45
+ goto tr43
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr489
+ goto tr506
}
case ( m.data)[( m.p)] >= 9:
- goto tr482
+ goto tr499
}
- goto tr41
-tr488:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr39
+tr505:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st44
- st44:
+ goto st42
+ st42:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof44
+ goto _test_eof42
}
- st_case_44:
-//line plugins/parsers/influx/machine.go:6524
+ st_case_42:
+//line plugins/parsers/influx/machine.go:6712
switch ( m.data)[( m.p)] {
case 10:
- goto tr132
+ goto tr130
case 11:
- goto tr48
+ goto tr46
case 13:
- goto tr132
+ goto tr130
case 32:
goto tr1
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st293
+ goto st303
}
case ( m.data)[( m.p)] >= 9:
goto tr1
}
- goto st12
-tr489:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st10
+tr506:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st293
- st293:
+ goto st303
+ st303:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof293
+ goto _test_eof303
}
- st_case_293:
-//line plugins/parsers/influx/machine.go:6561
+ st_case_303:
+//line plugins/parsers/influx/machine.go:6749
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st297
+ goto st307
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
-tr495:
- ( m.cs) = 294
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st10
+tr512:
+ ( m.cs) = 304
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr556:
- ( m.cs) = 294
-//line plugins/parsers/influx/machine.go.rl:90
+tr572:
+ ( m.cs) = 304
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr490:
- ( m.cs) = 294
-//line plugins/parsers/influx/machine.go.rl:77
+tr507:
+ ( m.cs) = 304
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr553:
- ( m.cs) = 294
-//line plugins/parsers/influx/machine.go.rl:90
+tr569:
+ ( m.cs) = 304
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st294:
+ st304:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof294
+ goto _test_eof304
}
- st_case_294:
-//line plugins/parsers/influx/machine.go:6664
+ st_case_304:
+//line plugins/parsers/influx/machine.go:6852
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr494
+ goto tr511
case 13:
- goto st34
+ goto st32
case 32:
- goto st294
+ goto st304
case 44:
goto tr8
case 61:
@@ -6678,30 +6866,30 @@ tr553:
goto tr10
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st294
+ goto st304
}
goto tr6
-tr494:
-//line plugins/parsers/influx/machine.go.rl:19
+tr511:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st295
- st295:
+ goto st305
+ st305:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof295
+ goto _test_eof305
}
- st_case_295:
-//line plugins/parsers/influx/machine.go:6696
+ st_case_305:
+//line plugins/parsers/influx/machine.go:6884
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr494
+ goto tr511
case 13:
- goto st34
+ goto st32
case 32:
- goto st294
+ goto st304
case 44:
goto tr8
case 61:
@@ -6710,4468 +6898,4498 @@ tr494:
goto tr10
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st294
+ goto st304
}
goto tr6
-tr496:
- ( m.cs) = 296
-//line plugins/parsers/influx/machine.go.rl:77
+tr513:
+ ( m.cs) = 306
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr491:
- ( m.cs) = 296
-//line plugins/parsers/influx/machine.go.rl:77
+tr508:
+ ( m.cs) = 306
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st296:
+ st306:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof296
+ goto _test_eof306
}
- st_case_296:
-//line plugins/parsers/influx/machine.go:6762
+ st_case_306:
+//line plugins/parsers/influx/machine.go:6950
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr496
+ goto tr513
case 13:
- goto st34
+ goto st32
case 32:
- goto tr495
+ goto tr512
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto tr45
+ goto tr43
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr495
+ goto tr512
}
- goto tr41
- st297:
+ goto tr39
+ st307:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof297
+ goto _test_eof307
}
- st_case_297:
+ st_case_307:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st298
+ goto st308
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st298:
+ goto st10
+ st308:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof298
+ goto _test_eof308
}
- st_case_298:
+ st_case_308:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st299
+ goto st309
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st299:
+ goto st10
+ st309:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof299
+ goto _test_eof309
}
- st_case_299:
+ st_case_309:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st300
+ goto st310
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st300:
+ goto st10
+ st310:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof300
+ goto _test_eof310
}
- st_case_300:
+ st_case_310:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st301
+ goto st311
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st301:
+ goto st10
+ st311:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof301
+ goto _test_eof311
}
- st_case_301:
+ st_case_311:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st302
+ goto st312
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st302:
+ goto st10
+ st312:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof302
+ goto _test_eof312
}
- st_case_302:
+ st_case_312:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st303
+ goto st313
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st303:
+ goto st10
+ st313:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof303
+ goto _test_eof313
}
- st_case_303:
+ st_case_313:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st304
+ goto st314
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st304:
+ goto st10
+ st314:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof304
+ goto _test_eof314
}
- st_case_304:
+ st_case_314:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st305
+ goto st315
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st305:
+ goto st10
+ st315:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof305
+ goto _test_eof315
}
- st_case_305:
+ st_case_315:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st306
+ goto st316
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st306:
+ goto st10
+ st316:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof306
+ goto _test_eof316
}
- st_case_306:
+ st_case_316:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st307
+ goto st317
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st307:
+ goto st10
+ st317:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof307
+ goto _test_eof317
}
- st_case_307:
+ st_case_317:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st308
+ goto st318
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st308:
+ goto st10
+ st318:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof308
+ goto _test_eof318
}
- st_case_308:
+ st_case_318:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st309
+ goto st319
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st309:
+ goto st10
+ st319:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof309
+ goto _test_eof319
}
- st_case_309:
+ st_case_319:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st310
+ goto st320
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st310:
+ goto st10
+ st320:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof310
+ goto _test_eof320
}
- st_case_310:
+ st_case_320:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st311
+ goto st321
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st311:
+ goto st10
+ st321:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof311
+ goto _test_eof321
}
- st_case_311:
+ st_case_321:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st312
+ goto st322
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st312:
+ goto st10
+ st322:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof312
+ goto _test_eof322
}
- st_case_312:
+ st_case_322:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st313
+ goto st323
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st313:
+ goto st10
+ st323:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof313
+ goto _test_eof323
}
- st_case_313:
+ st_case_323:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st314
+ goto st324
}
case ( m.data)[( m.p)] >= 9:
- goto tr490
+ goto tr507
}
- goto st12
- st314:
+ goto st10
+ st324:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof314
+ goto _test_eof324
}
- st_case_314:
+ st_case_324:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr491
+ goto tr508
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr490
+ goto tr507
case 44:
goto tr4
case 61:
- goto tr49
+ goto tr47
case 92:
- goto st29
+ goto st27
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr490
+ goto tr507
}
- goto st12
-tr484:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st10
+tr501:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr549:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:90
+tr565:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr799:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:77
+tr813:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr718:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:90
+tr733:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr928:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:90
+tr945:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr933:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:90
+tr951:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr938:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:90
+tr957:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr982:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:77
+tr1007:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr985:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:77
+tr1011:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr988:
- ( m.cs) = 45
-//line plugins/parsers/influx/machine.go.rl:77
+tr1015:
+ ( m.cs) = 43
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st45:
+ st43:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof45
+ goto _test_eof43
}
- st_case_45:
-//line plugins/parsers/influx/machine.go:7533
+ st_case_43:
+//line plugins/parsers/influx/machine.go:7721
switch ( m.data)[( m.p)] {
case 32:
- goto tr47
+ goto tr45
case 44:
- goto tr47
+ goto tr45
case 61:
- goto tr47
+ goto tr45
case 92:
- goto tr135
+ goto tr133
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto tr134
-tr134:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr132
+tr132:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st46
- st46:
+ goto st44
+ st44:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof46
+ goto _test_eof44
}
- st_case_46:
-//line plugins/parsers/influx/machine.go:7564
+ st_case_44:
+//line plugins/parsers/influx/machine.go:7752
switch ( m.data)[( m.p)] {
case 32:
- goto tr47
+ goto tr45
case 44:
- goto tr47
+ goto tr45
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st46
-tr137:
-//line plugins/parsers/influx/machine.go.rl:86
+ goto st44
+tr135:
+//line plugins/parsers/influx/machine.go.rl:95
- key = m.text()
+ m.key = m.text()
-//line plugins/parsers/influx/machine.go.rl:99
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st47
- st47:
+ goto st45
+ st45:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof47
+ goto _test_eof45
}
- st_case_47:
-//line plugins/parsers/influx/machine.go:7599
+ st_case_45:
+//line plugins/parsers/influx/machine.go:7787
switch ( m.data)[( m.p)] {
case 32:
- goto tr47
+ goto tr45
case 34:
- goto tr139
+ goto tr137
case 44:
- goto tr47
+ goto tr45
case 45:
- goto tr140
+ goto tr138
case 46:
- goto tr141
+ goto tr139
case 48:
- goto tr142
+ goto tr140
case 61:
- goto tr47
+ goto tr45
case 70:
- goto tr144
+ goto tr142
case 84:
- goto tr145
+ goto tr143
case 92:
- goto tr58
+ goto tr56
case 102:
- goto tr146
+ goto tr144
case 116:
- goto tr147
+ goto tr145
}
switch {
case ( m.data)[( m.p)] < 12:
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] > 13:
if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr143
+ goto tr141
}
default:
- goto tr47
+ goto tr45
}
- goto tr57
-tr139:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr55
+tr137:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st48
- st48:
+ goto st46
+ st46:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof48
+ goto _test_eof46
}
- st_case_48:
-//line plugins/parsers/influx/machine.go:7650
+ st_case_46:
+//line plugins/parsers/influx/machine.go:7838
switch ( m.data)[( m.p)] {
- case 9:
- goto tr149
case 10:
goto tr24
case 11:
- goto tr150
- case 12:
- goto tr60
+ goto tr148
case 13:
- goto tr25
+ goto tr23
case 32:
- goto tr149
+ goto tr147
case 34:
- goto tr151
+ goto tr149
case 44:
- goto tr152
+ goto tr150
case 61:
goto tr23
case 92:
- goto tr153
+ goto tr151
}
- goto tr148
-tr148:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr147
+ }
+ goto tr146
+tr146:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st49
- st49:
+ goto st47
+ st47:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof49
+ goto _test_eof47
}
- st_case_49:
-//line plugins/parsers/influx/machine.go:7685
+ st_case_47:
+//line plugins/parsers/influx/machine.go:7872
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- goto st49
-tr180:
- ( m.cs) = 50
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+tr178:
+ ( m.cs) = 48
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr155:
- ( m.cs) = 50
-//line plugins/parsers/influx/machine.go.rl:90
+tr153:
+ ( m.cs) = 48
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr149:
- ( m.cs) = 50
-//line plugins/parsers/influx/machine.go.rl:90
+tr147:
+ ( m.cs) = 48
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st50:
+ st48:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof50
+ goto _test_eof48
}
- st_case_50:
-//line plugins/parsers/influx/machine.go:7757
+ st_case_48:
+//line plugins/parsers/influx/machine.go:7943
switch ( m.data)[( m.p)] {
- case 9:
- goto st50
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr162
- case 12:
- goto st2
+ goto tr160
case 13:
- goto st8
+ goto st6
case 32:
- goto st50
+ goto st48
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr163
+ goto tr161
}
- goto tr160
-tr160:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st48
+ }
+ goto tr158
+tr158:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st51
- st51:
+ goto st49
+ st49:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof51
+ goto _test_eof49
}
- st_case_51:
-//line plugins/parsers/influx/machine.go:7792
+ st_case_49:
+//line plugins/parsers/influx/machine.go:7977
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- goto st51
-tr165:
-//line plugins/parsers/influx/machine.go.rl:99
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st49
+tr163:
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st52
- st52:
+ goto st50
+ st50:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof52
+ goto _test_eof50
}
- st_case_52:
-//line plugins/parsers/influx/machine.go:7825
+ st_case_50:
+//line plugins/parsers/influx/machine.go:8009
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr107
+ goto tr105
case 45:
- goto tr167
+ goto tr165
case 46:
- goto tr168
+ goto tr166
case 48:
- goto tr169
+ goto tr167
case 70:
- goto tr171
+ goto tr169
case 84:
- goto tr172
+ goto tr170
case 92:
- goto st76
+ goto st73
case 102:
- goto tr173
+ goto tr171
case 116:
- goto tr174
+ goto tr172
}
if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr170
+ goto tr168
}
goto st6
-tr167:
-//line plugins/parsers/influx/machine.go.rl:19
+tr165:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st53
- st53:
+ goto st51
+ st51:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof53
+ goto _test_eof51
}
- st_case_53:
-//line plugins/parsers/influx/machine.go:7867
+ st_case_51:
+//line plugins/parsers/influx/machine.go:8047
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 46:
- goto st54
+ goto st52
case 48:
- goto st621
+ goto st631
case 92:
- goto st76
+ goto st73
}
if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st622
+ goto st632
}
goto st6
-tr168:
-//line plugins/parsers/influx/machine.go.rl:19
+tr166:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st54
- st54:
+ goto st52
+ st52:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof54
+ goto _test_eof52
}
- st_case_54:
-//line plugins/parsers/influx/machine.go:7899
+ st_case_52:
+//line plugins/parsers/influx/machine.go:8075
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st315
+ goto st325
}
goto st6
- st315:
+ st325:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof315
+ goto _test_eof325
}
- st_case_315:
+ st_case_325:
switch ( m.data)[( m.p)] {
case 10:
- goto tr515
- case 12:
- goto tr516
+ goto tr532
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr514
+ goto tr531
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr518
+ goto tr534
case 69:
- goto st175
+ goto st173
case 92:
- goto st76
+ goto st73
case 101:
- goto st175
+ goto st173
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st315
+ goto st325
}
case ( m.data)[( m.p)] >= 9:
- goto tr514
+ goto tr531
}
goto st6
-tr902:
-//line plugins/parsers/influx/machine.go.rl:19
+tr916:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st316
-tr514:
- ( m.cs) = 316
-//line plugins/parsers/influx/machine.go.rl:121
+ goto st326
+tr531:
+ ( m.cs) = 326
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr908:
- ( m.cs) = 316
-//line plugins/parsers/influx/machine.go.rl:103
+tr923:
+ ( m.cs) = 326
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr911:
- ( m.cs) = 316
-//line plugins/parsers/influx/machine.go.rl:112
+tr925:
+ ( m.cs) = 326
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr915:
- ( m.cs) = 316
-//line plugins/parsers/influx/machine.go.rl:130
+tr928:
+ ( m.cs) = 326
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st316:
+ st326:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof316
+ goto _test_eof326
}
- st_case_316:
-//line plugins/parsers/influx/machine.go:8013
+ st_case_326:
+//line plugins/parsers/influx/machine.go:8183
switch ( m.data)[( m.p)] {
case 10:
- goto st317
- case 12:
- goto st261
+ goto tr273
case 13:
- goto st104
+ goto st102
case 32:
- goto st316
+ goto st326
case 34:
- goto tr31
+ goto tr29
case 45:
- goto tr522
+ goto tr538
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr523
+ goto tr539
}
case ( m.data)[( m.p)] >= 9:
- goto st316
+ goto st326
}
goto st6
-tr650:
-//line plugins/parsers/influx/machine.go.rl:19
+tr665:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st317
-tr659:
- ( m.cs) = 317
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:166
- err = m.handler.SetTimestamp(m.text())
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+ goto st327
+tr273:
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+ goto st327
+tr532:
+ ( m.cs) = 327
+//line plugins/parsers/influx/machine.go.rl:130
+
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr515:
- ( m.cs) = 317
-//line plugins/parsers/influx/machine.go.rl:121
+tr674:
+ ( m.cs) = 327
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+//line plugins/parsers/influx/machine.go.rl:157
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr722:
- ( m.cs) = 317
-//line plugins/parsers/influx/machine.go.rl:103
+tr737:
+ ( m.cs) = 327
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr729:
- ( m.cs) = 317
-//line plugins/parsers/influx/machine.go.rl:112
+tr743:
+ ( m.cs) = 327
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr736:
- ( m.cs) = 317
-//line plugins/parsers/influx/machine.go.rl:130
+tr749:
+ ( m.cs) = 327
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
- err = m.handler.AddBool(key, m.text())
+//line plugins/parsers/influx/machine.go.rl:139
+
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st317:
-//line plugins/parsers/influx/machine.go.rl:157
+tr891:
+ ( m.cs) = 327
+//line plugins/parsers/influx/machine.go.rl:139
+
+ err = m.handler.AddBool(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+//line plugins/parsers/influx/machine.go.rl:166
m.lineno++
m.sol = m.p
m.sol++ // next char will be the first column in the line
-//line plugins/parsers/influx/machine.go.rl:163
+ goto _again
+ st327:
+//line plugins/parsers/influx/machine.go.rl:172
- ( m.cs) = 715;
+ m.finishMetric = true
+ ( m.cs) = 739;
{( m.p)++; goto _out }
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof317
+ goto _test_eof327
}
- st_case_317:
-//line plugins/parsers/influx/machine.go:8126
+ st_case_327:
+//line plugins/parsers/influx/machine.go:8352
switch ( m.data)[( m.p)] {
- case 9:
- goto st166
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr339
- case 12:
- goto st9
+ goto tr337
case 13:
- goto st8
+ goto st6
case 32:
- goto st166
+ goto st164
case 34:
- goto tr118
+ goto tr116
case 35:
goto st6
case 44:
goto st6
case 92:
- goto tr340
+ goto tr338
}
- goto tr337
-tr337:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st164
+ }
+ goto tr335
+tr335:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st55
- st55:
+ goto st53
+ st53:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof55
+ goto _test_eof53
}
- st_case_55:
-//line plugins/parsers/influx/machine.go:8161
+ st_case_53:
+//line plugins/parsers/influx/machine.go:8386
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 92:
- goto st157
+ goto st155
}
- goto st55
-tr181:
- ( m.cs) = 56
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+tr179:
+ ( m.cs) = 54
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st56:
+ st54:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof56
+ goto _test_eof54
}
- st_case_56:
-//line plugins/parsers/influx/machine.go:8201
+ st_case_54:
+//line plugins/parsers/influx/machine.go:8425
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr185
- case 12:
- goto tr1
+ goto tr183
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr182
+ goto tr180
case 61:
- goto st55
+ goto st53
case 92:
- goto tr186
+ goto tr184
}
- goto tr184
-tr184:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto tr182
+tr182:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st57
- st57:
+ goto st55
+ st55:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof57
+ goto _test_eof55
}
- st_case_57:
-//line plugins/parsers/influx/machine.go:8236
+ st_case_55:
+//line plugins/parsers/influx/machine.go:8459
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr188
- case 12:
- goto tr1
+ goto tr186
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- goto st57
-tr188:
- ( m.cs) = 58
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st55
+tr186:
+ ( m.cs) = 56
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr185:
- ( m.cs) = 58
-//line plugins/parsers/influx/machine.go.rl:77
+tr183:
+ ( m.cs) = 56
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st58:
+ st56:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof58
+ goto _test_eof56
}
- st_case_58:
-//line plugins/parsers/influx/machine.go:8295
+ st_case_56:
+//line plugins/parsers/influx/machine.go:8517
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr185
- case 12:
- goto tr1
+ goto tr183
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto tr186
+ goto tr184
}
- goto tr184
-tr182:
- ( m.cs) = 59
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto tr182
+tr180:
+ ( m.cs) = 57
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr158:
- ( m.cs) = 59
-//line plugins/parsers/influx/machine.go.rl:90
-
- err = m.handler.AddTag(key, m.text())
+tr156:
+ ( m.cs) = 57
+//line plugins/parsers/influx/machine.go.rl:99
+
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr152:
- ( m.cs) = 59
-//line plugins/parsers/influx/machine.go.rl:90
+tr150:
+ ( m.cs) = 57
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st59:
+ st57:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof59
+ goto _test_eof57
}
- st_case_59:
-//line plugins/parsers/influx/machine.go:8367
+ st_case_57:
+//line plugins/parsers/influx/machine.go:8588
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr192
+ goto tr190
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr193
+ goto tr191
}
- goto tr191
-tr191:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto tr189
+tr189:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st60
- st60:
+ goto st58
+ st58:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof60
+ goto _test_eof58
}
- st_case_60:
-//line plugins/parsers/influx/machine.go:8400
+ st_case_58:
+//line plugins/parsers/influx/machine.go:8620
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr195
+ goto tr193
case 44:
goto st6
case 61:
- goto tr196
+ goto tr194
case 92:
- goto st71
+ goto st69
}
- goto st60
-tr192:
- ( m.cs) = 318
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st58
+tr190:
+ ( m.cs) = 328
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr195:
- ( m.cs) = 318
-//line plugins/parsers/influx/machine.go.rl:139
+tr193:
+ ( m.cs) = 328
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st318:
+ st328:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof318
+ goto _test_eof328
}
- st_case_318:
-//line plugins/parsers/influx/machine.go:8457
+ st_case_328:
+//line plugins/parsers/influx/machine.go:8676
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st319
+ goto st329
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 44:
- goto st37
+ goto st35
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st261
+ goto st271
}
- goto st15
- st319:
+ goto st13
+ st329:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof319
+ goto _test_eof329
}
- st_case_319:
+ st_case_329:
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st319
+ goto st329
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 44:
- goto tr198
+ goto tr196
case 45:
- goto tr525
+ goto tr541
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr526
+ goto tr542
}
case ( m.data)[( m.p)] >= 9:
- goto st261
+ goto st271
}
- goto st15
-tr525:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st13
+tr541:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st61
- st61:
+ goto st59
+ st59:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof61
+ goto _test_eof59
}
- st_case_61:
-//line plugins/parsers/influx/machine.go:8521
+ st_case_59:
+//line plugins/parsers/influx/machine.go:8740
switch ( m.data)[( m.p)] {
case 32:
- goto tr198
+ goto tr196
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] < 12:
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 {
- goto tr198
+ goto tr196
}
case ( m.data)[( m.p)] > 13:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st320
+ goto st330
}
default:
- goto tr198
+ goto tr196
}
- goto st15
-tr526:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st13
+tr542:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st320
- st320:
+ goto st330
+ st330:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof320
+ goto _test_eof330
}
- st_case_320:
-//line plugins/parsers/influx/machine.go:8556
+ st_case_330:
+//line plugins/parsers/influx/machine.go:8775
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st322
+ goto st332
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
-tr527:
- ( m.cs) = 321
-//line plugins/parsers/influx/machine.go.rl:148
+ goto st13
+tr543:
+ ( m.cs) = 331
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st321:
+ st331:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof321
+ goto _test_eof331
}
- st_case_321:
-//line plugins/parsers/influx/machine.go:8600
+ st_case_331:
+//line plugins/parsers/influx/machine.go:8819
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st321
+ goto st331
case 13:
- goto st34
+ goto st32
case 32:
- goto st266
+ goto st276
case 44:
goto tr2
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st266
+ goto st276
}
- goto st15
- st322:
+ goto st13
+ st332:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof322
+ goto _test_eof332
}
- st_case_322:
+ st_case_332:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st323
+ goto st333
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st323:
+ goto st13
+ st333:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof323
+ goto _test_eof333
}
- st_case_323:
+ st_case_333:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st324
+ goto st334
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st324:
+ goto st13
+ st334:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof324
+ goto _test_eof334
}
- st_case_324:
+ st_case_334:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st325
+ goto st335
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st325:
+ goto st13
+ st335:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof325
+ goto _test_eof335
}
- st_case_325:
+ st_case_335:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st326
+ goto st336
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st326:
+ goto st13
+ st336:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof326
+ goto _test_eof336
}
- st_case_326:
+ st_case_336:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st327
+ goto st337
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st327:
+ goto st13
+ st337:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof327
+ goto _test_eof337
}
- st_case_327:
+ st_case_337:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st328
+ goto st338
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st328:
+ goto st13
+ st338:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof328
+ goto _test_eof338
}
- st_case_328:
+ st_case_338:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st329
+ goto st339
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st329:
+ goto st13
+ st339:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof329
+ goto _test_eof339
}
- st_case_329:
+ st_case_339:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st330
+ goto st340
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st330:
+ goto st13
+ st340:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof330
+ goto _test_eof340
}
- st_case_330:
+ st_case_340:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st331
+ goto st341
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st331:
+ goto st13
+ st341:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof331
+ goto _test_eof341
}
- st_case_331:
+ st_case_341:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st332
+ goto st342
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st332:
+ goto st13
+ st342:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof332
+ goto _test_eof342
}
- st_case_332:
+ st_case_342:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st333
+ goto st343
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st333:
+ goto st13
+ st343:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof333
+ goto _test_eof343
}
- st_case_333:
+ st_case_343:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st334
+ goto st344
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st334:
+ goto st13
+ st344:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof334
+ goto _test_eof344
}
- st_case_334:
+ st_case_344:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st335
+ goto st345
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st335:
+ goto st13
+ st345:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof335
+ goto _test_eof345
}
- st_case_335:
+ st_case_345:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st336
+ goto st346
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st336:
+ goto st13
+ st346:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof336
+ goto _test_eof346
}
- st_case_336:
+ st_case_346:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st337
+ goto st347
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st337:
+ goto st13
+ st347:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof337
+ goto _test_eof347
}
- st_case_337:
+ st_case_347:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st338
+ goto st348
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st338:
+ goto st13
+ st348:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof338
+ goto _test_eof348
}
- st_case_338:
+ st_case_348:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st339
+ goto st349
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st15
- st339:
+ goto st13
+ st349:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof339
+ goto _test_eof349
}
- st_case_339:
+ st_case_349:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr527
+ goto tr543
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr198
+ goto tr196
case 61:
- goto tr55
+ goto tr53
case 92:
- goto st25
+ goto st23
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr450
+ goto tr467
}
- goto st15
-tr196:
-//line plugins/parsers/influx/machine.go.rl:86
+ goto st13
+tr194:
+//line plugins/parsers/influx/machine.go.rl:95
- key = m.text()
+ m.key = m.text()
- goto st62
- st62:
+ goto st60
+ st60:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof62
+ goto _test_eof60
}
- st_case_62:
-//line plugins/parsers/influx/machine.go:9167
+ st_case_60:
+//line plugins/parsers/influx/machine.go:9386
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr151
+ goto tr149
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr153
+ goto tr151
}
- goto tr148
-tr151:
- ( m.cs) = 340
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto tr146
+tr149:
+ ( m.cs) = 350
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr157:
- ( m.cs) = 340
-//line plugins/parsers/influx/machine.go.rl:139
+tr155:
+ ( m.cs) = 350
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st340:
+ st350:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof340
+ goto _test_eof350
}
- st_case_340:
-//line plugins/parsers/influx/machine.go:9224
+ st_case_350:
+//line plugins/parsers/influx/machine.go:9442
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr548
+ goto tr564
case 13:
- goto st34
+ goto st32
case 32:
- goto tr547
+ goto tr563
case 44:
- goto tr549
+ goto tr565
case 61:
- goto tr132
+ goto tr130
case 92:
- goto st23
+ goto st21
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr547
+ goto tr563
}
- goto st17
-tr548:
- ( m.cs) = 341
-//line plugins/parsers/influx/machine.go.rl:90
+ goto st15
+tr564:
+ ( m.cs) = 351
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr716:
- ( m.cs) = 341
-//line plugins/parsers/influx/machine.go.rl:90
+tr731:
+ ( m.cs) = 351
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr926:
- ( m.cs) = 341
-//line plugins/parsers/influx/machine.go.rl:90
+tr943:
+ ( m.cs) = 351
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr931:
- ( m.cs) = 341
-//line plugins/parsers/influx/machine.go.rl:90
+tr949:
+ ( m.cs) = 351
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr936:
- ( m.cs) = 341
-//line plugins/parsers/influx/machine.go.rl:90
+tr955:
+ ( m.cs) = 351
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st341:
+ st351:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof341
+ goto _test_eof351
}
- st_case_341:
-//line plugins/parsers/influx/machine.go:9355
+ st_case_351:
+//line plugins/parsers/influx/machine.go:9573
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr550
+ goto tr566
case 13:
- goto st34
+ goto st32
case 32:
- goto tr547
+ goto tr563
case 44:
- goto tr62
+ goto tr60
case 45:
- goto tr551
+ goto tr567
case 61:
- goto tr132
+ goto tr130
case 92:
- goto tr66
+ goto tr64
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr552
+ goto tr568
}
case ( m.data)[( m.p)] >= 9:
- goto tr547
+ goto tr563
}
- goto tr64
-tr575:
- ( m.cs) = 342
-//line plugins/parsers/influx/machine.go.rl:90
+ goto tr62
+tr591:
+ ( m.cs) = 352
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr550:
- ( m.cs) = 342
-//line plugins/parsers/influx/machine.go.rl:90
+tr566:
+ ( m.cs) = 352
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st342:
+ st352:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof342
+ goto _test_eof352
}
- st_case_342:
-//line plugins/parsers/influx/machine.go:9418
+ st_case_352:
+//line plugins/parsers/influx/machine.go:9636
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr550
+ goto tr566
case 13:
- goto st34
+ goto st32
case 32:
- goto tr547
+ goto tr563
case 44:
- goto tr62
+ goto tr60
case 45:
- goto tr551
+ goto tr567
case 61:
goto tr12
case 92:
- goto tr66
+ goto tr64
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr552
+ goto tr568
}
case ( m.data)[( m.p)] >= 9:
- goto tr547
+ goto tr563
}
- goto tr64
-tr551:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr62
+tr567:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st63
- st63:
+ goto st61
+ st61:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof63
+ goto _test_eof61
}
- st_case_63:
-//line plugins/parsers/influx/machine.go:9457
+ st_case_61:
+//line plugins/parsers/influx/machine.go:9675
switch ( m.data)[( m.p)] {
case 10:
- goto tr132
+ goto tr130
case 11:
- goto tr68
+ goto tr66
case 13:
- goto tr132
+ goto tr130
case 32:
- goto tr60
+ goto tr58
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st343
+ goto st353
}
case ( m.data)[( m.p)] >= 9:
- goto tr60
+ goto tr58
}
- goto st19
-tr552:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st17
+tr568:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st343
- st343:
+ goto st353
+ st353:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof343
+ goto _test_eof353
}
- st_case_343:
-//line plugins/parsers/influx/machine.go:9494
+ st_case_353:
+//line plugins/parsers/influx/machine.go:9712
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st345
+ goto st355
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
-tr557:
- ( m.cs) = 344
-//line plugins/parsers/influx/machine.go.rl:90
+ goto st17
+tr573:
+ ( m.cs) = 354
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr554:
- ( m.cs) = 344
-//line plugins/parsers/influx/machine.go.rl:90
+tr570:
+ ( m.cs) = 354
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st344:
+ st354:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof344
+ goto _test_eof354
}
- st_case_344:
-//line plugins/parsers/influx/machine.go:9565
+ st_case_354:
+//line plugins/parsers/influx/machine.go:9783
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr557
+ goto tr573
case 13:
- goto st34
+ goto st32
case 32:
- goto tr556
+ goto tr572
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto tr66
+ goto tr64
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr556
+ goto tr572
}
- goto tr64
- st345:
+ goto tr62
+ st355:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof345
+ goto _test_eof355
}
- st_case_345:
+ st_case_355:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st346
+ goto st356
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st346:
+ goto st17
+ st356:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof346
+ goto _test_eof356
}
- st_case_346:
+ st_case_356:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st347
+ goto st357
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st347:
+ goto st17
+ st357:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof347
+ goto _test_eof357
}
- st_case_347:
+ st_case_357:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st348
+ goto st358
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st348:
+ goto st17
+ st358:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof348
+ goto _test_eof358
}
- st_case_348:
+ st_case_358:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st349
+ goto st359
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st349:
+ goto st17
+ st359:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof349
+ goto _test_eof359
}
- st_case_349:
+ st_case_359:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st350
+ goto st360
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st350:
+ goto st17
+ st360:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof350
+ goto _test_eof360
}
- st_case_350:
+ st_case_360:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st351
+ goto st361
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st351:
+ goto st17
+ st361:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof351
+ goto _test_eof361
}
- st_case_351:
+ st_case_361:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st352
+ goto st362
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st352:
+ goto st17
+ st362:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof352
+ goto _test_eof362
}
- st_case_352:
+ st_case_362:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st353
+ goto st363
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st353:
+ goto st17
+ st363:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof353
+ goto _test_eof363
}
- st_case_353:
+ st_case_363:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st354
+ goto st364
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st354:
+ goto st17
+ st364:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof354
+ goto _test_eof364
}
- st_case_354:
+ st_case_364:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st355
+ goto st365
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st355:
+ goto st17
+ st365:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof355
+ goto _test_eof365
}
- st_case_355:
+ st_case_365:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st356
+ goto st366
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st356:
+ goto st17
+ st366:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof356
+ goto _test_eof366
}
- st_case_356:
+ st_case_366:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st357
+ goto st367
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st357:
+ goto st17
+ st367:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof357
+ goto _test_eof367
}
- st_case_357:
+ st_case_367:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st358
+ goto st368
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st358:
+ goto st17
+ st368:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof358
+ goto _test_eof368
}
- st_case_358:
+ st_case_368:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st359
+ goto st369
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st359:
+ goto st17
+ st369:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof359
+ goto _test_eof369
}
- st_case_359:
+ st_case_369:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st360
+ goto st370
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st360:
+ goto st17
+ st370:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof360
+ goto _test_eof370
}
- st_case_360:
+ st_case_370:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st361
+ goto st371
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st361:
+ goto st17
+ st371:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof361
+ goto _test_eof371
}
- st_case_361:
+ st_case_371:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st362
+ goto st372
}
case ( m.data)[( m.p)] >= 9:
- goto tr553
+ goto tr569
}
- goto st19
- st362:
+ goto st17
+ st372:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof362
+ goto _test_eof372
}
- st_case_362:
+ st_case_372:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr554
+ goto tr570
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr553
+ goto tr569
case 44:
- goto tr62
+ goto tr60
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr553
+ goto tr569
}
- goto st19
-tr153:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st17
+tr151:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st64
- st64:
+ goto st62
+ st62:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof64
+ goto _test_eof62
}
- st_case_64:
-//line plugins/parsers/influx/machine.go:10132
+ st_case_62:
+//line plugins/parsers/influx/machine.go:10350
switch ( m.data)[( m.p)] {
case 34:
- goto st49
+ goto st47
case 92:
- goto st65
+ goto st63
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st17
- st65:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st15
+ st63:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof65
+ goto _test_eof63
}
- st_case_65:
-//line plugins/parsers/influx/machine.go:10156
+ st_case_63:
+//line plugins/parsers/influx/machine.go:10374
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- goto st49
-tr156:
- ( m.cs) = 66
-//line plugins/parsers/influx/machine.go.rl:90
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+tr154:
+ ( m.cs) = 64
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr150:
- ( m.cs) = 66
-//line plugins/parsers/influx/machine.go.rl:90
+tr148:
+ ( m.cs) = 64
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st66:
+ st64:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof66
+ goto _test_eof64
}
- st_case_66:
-//line plugins/parsers/influx/machine.go:10215
+ st_case_64:
+//line plugins/parsers/influx/machine.go:10432
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr203
- case 12:
- goto tr60
+ goto tr201
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto tr205
+ goto tr203
}
- goto tr202
-tr202:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto tr200
+tr200:
+//line plugins/parsers/influx/machine.go.rl:28
- goto st67
- st67:
+ m.pb = m.p
+
+ goto st65
+ st65:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof67
+ goto _test_eof65
}
- st_case_67:
-//line plugins/parsers/influx/machine.go:10250
+ st_case_65:
+//line plugins/parsers/influx/machine.go:10466
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr207
- case 12:
- goto tr60
+ goto tr205
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- goto st67
-tr207:
- ( m.cs) = 68
-//line plugins/parsers/influx/machine.go.rl:90
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st65
+tr205:
+ ( m.cs) = 66
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr203:
- ( m.cs) = 68
-//line plugins/parsers/influx/machine.go.rl:90
+tr201:
+ ( m.cs) = 66
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st68:
+ st66:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof68
+ goto _test_eof66
}
- st_case_68:
-//line plugins/parsers/influx/machine.go:10309
+ st_case_66:
+//line plugins/parsers/influx/machine.go:10524
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr203
- case 12:
- goto tr60
+ goto tr201
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto tr205
+ goto tr203
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
}
- goto tr202
-tr204:
- ( m.cs) = 363
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr200
+tr202:
+ ( m.cs) = 373
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr208:
- ( m.cs) = 363
-//line plugins/parsers/influx/machine.go.rl:139
+tr206:
+ ( m.cs) = 373
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st363:
+ st373:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof363
+ goto _test_eof373
}
- st_case_363:
-//line plugins/parsers/influx/machine.go:10368
+ st_case_373:
+//line plugins/parsers/influx/machine.go:10582
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr575
+ goto tr591
case 13:
- goto st34
+ goto st32
case 32:
- goto tr547
+ goto tr563
case 44:
- goto tr549
+ goto tr565
case 61:
goto tr12
case 92:
- goto st21
+ goto st19
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr547
+ goto tr563
}
- goto st19
-tr205:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st17
+tr203:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st69
- st69:
+ goto st67
+ st67:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof69
+ goto _test_eof67
}
- st_case_69:
-//line plugins/parsers/influx/machine.go:10400
+ st_case_67:
+//line plugins/parsers/influx/machine.go:10614
switch ( m.data)[( m.p)] {
case 34:
- goto st67
+ goto st65
case 92:
- goto st70
+ goto st68
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st19
- st70:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st17
+ st68:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof70
+ goto _test_eof68
}
- st_case_70:
-//line plugins/parsers/influx/machine.go:10424
+ st_case_68:
+//line plugins/parsers/influx/machine.go:10638
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr207
- case 12:
- goto tr60
+ goto tr205
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- goto st67
-tr193:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st65
+tr191:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st71
- st71:
+ goto st69
+ st69:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof71
+ goto _test_eof69
}
- st_case_71:
-//line plugins/parsers/influx/machine.go:10459
+ st_case_69:
+//line plugins/parsers/influx/machine.go:10672
switch ( m.data)[( m.p)] {
case 34:
- goto st60
+ goto st58
case 92:
- goto st72
+ goto st70
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st15
- st72:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st13
+ st70:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof72
+ goto _test_eof70
}
- st_case_72:
-//line plugins/parsers/influx/machine.go:10483
+ st_case_70:
+//line plugins/parsers/influx/machine.go:10696
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr195
+ goto tr193
case 44:
goto st6
case 61:
- goto tr196
+ goto tr194
case 92:
- goto st71
+ goto st69
}
- goto st60
-tr189:
-//line plugins/parsers/influx/machine.go.rl:99
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st58
+tr187:
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st73
-tr346:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st71
+tr344:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:99
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st73
- st73:
+ goto st71
+ st71:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof73
+ goto _test_eof71
}
- st_case_73:
-//line plugins/parsers/influx/machine.go:10526
+ st_case_71:
+//line plugins/parsers/influx/machine.go:10738
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr212
+ goto tr210
case 44:
- goto tr182
+ goto tr180
case 45:
- goto tr213
+ goto tr211
case 46:
- goto tr214
+ goto tr212
case 48:
- goto tr215
+ goto tr213
case 70:
- goto tr217
+ goto tr215
case 84:
- goto tr218
+ goto tr216
case 92:
- goto st157
+ goto st155
case 102:
- goto tr219
+ goto tr217
case 116:
- goto tr220
+ goto tr218
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr216
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr214
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr178
}
- goto st55
-tr212:
- ( m.cs) = 364
-//line plugins/parsers/influx/machine.go.rl:139
+ goto st53
+tr210:
+ ( m.cs) = 374
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st364:
+ st374:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof364
+ goto _test_eof374
}
- st_case_364:
-//line plugins/parsers/influx/machine.go:10583
+ st_case_374:
+//line plugins/parsers/influx/machine.go:10796
switch ( m.data)[( m.p)] {
- case 9:
- goto tr576
case 10:
- goto tr475
+ goto tr492
case 11:
- goto tr577
- case 12:
- goto tr482
+ goto tr593
case 13:
- goto tr476
+ goto tr493
case 32:
- goto tr576
+ goto tr592
case 34:
- goto tr85
+ goto tr83
case 44:
- goto tr578
+ goto tr594
case 92:
- goto tr87
+ goto tr85
}
- goto tr82
-tr607:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr592
+ }
+ goto tr80
+tr623:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr576:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:77
+tr592:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr749:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:90
+tr762:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr619:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:77
+tr635:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr745:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:90
+tr757:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr777:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:90
+tr790:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr783:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:90
+tr796:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr789:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:90
+tr802:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr802:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:77
+tr816:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr807:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:77
+tr821:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr812:
- ( m.cs) = 365
-//line plugins/parsers/influx/machine.go.rl:77
+tr826:
+ ( m.cs) = 375
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st365:
+ st375:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof365
+ goto _test_eof375
}
- st_case_365:
-//line plugins/parsers/influx/machine.go:10837
+ st_case_375:
+//line plugins/parsers/influx/machine.go:11049
switch ( m.data)[( m.p)] {
- case 9:
- goto st365
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr580
- case 12:
- goto st290
+ goto tr596
case 13:
- goto st74
+ goto st72
case 32:
- goto st365
+ goto st375
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 45:
- goto tr581
+ goto tr597
case 61:
goto st6
case 92:
- goto tr98
+ goto tr96
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr582
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr598
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto st375
}
- goto tr94
-tr580:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr92
+tr596:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st366
- st366:
+ goto st376
+ st376:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof366
+ goto _test_eof376
}
- st_case_366:
-//line plugins/parsers/influx/machine.go:10877
+ st_case_376:
+//line plugins/parsers/influx/machine.go:11090
switch ( m.data)[( m.p)] {
- case 9:
- goto st365
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr580
- case 12:
- goto st290
+ goto tr596
case 13:
- goto st74
+ goto st72
case 32:
- goto st365
+ goto st375
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 45:
- goto tr581
+ goto tr597
case 61:
- goto tr101
+ goto tr99
case 92:
- goto tr98
+ goto tr96
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr582
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr598
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto st375
}
- goto tr94
-tr476:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr92
+tr493:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st74
-tr586:
- ( m.cs) = 74
-//line plugins/parsers/influx/machine.go.rl:148
+ goto st72
+tr602:
+ ( m.cs) = 72
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr623:
- ( m.cs) = 74
-//line plugins/parsers/influx/machine.go.rl:121
+tr638:
+ ( m.cs) = 72
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr780:
- ( m.cs) = 74
-//line plugins/parsers/influx/machine.go.rl:103
+tr793:
+ ( m.cs) = 72
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr786:
- ( m.cs) = 74
-//line plugins/parsers/influx/machine.go.rl:112
+tr799:
+ ( m.cs) = 72
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr792:
- ( m.cs) = 74
-//line plugins/parsers/influx/machine.go.rl:130
+tr805:
+ ( m.cs) = 72
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st74:
+ st72:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof74
+ goto _test_eof72
}
- st_case_74:
-//line plugins/parsers/influx/machine.go:10982
- if ( m.data)[( m.p)] == 10 {
- goto st288
+ st_case_72:
+//line plugins/parsers/influx/machine.go:11196
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr219
+ case 34:
+ goto tr29
+ case 92:
+ goto st73
+ }
+ goto st6
+tr26:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st73
+ st73:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof73
+ }
+ st_case_73:
+//line plugins/parsers/influx/machine.go:11217
+ switch ( m.data)[( m.p)] {
+ case 34:
+ goto st6
+ case 92:
+ goto st6
}
goto tr8
-tr581:
-//line plugins/parsers/influx/machine.go.rl:19
+tr597:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st75
- st75:
+ goto st74
+ st74:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof75
+ goto _test_eof74
}
- st_case_75:
-//line plugins/parsers/influx/machine.go:10998
+ st_case_74:
+//line plugins/parsers/influx/machine.go:11236
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr105
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st367
+ switch {
+ case ( m.data)[( m.p)] > 13:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st377
+ }
+ case ( m.data)[( m.p)] >= 12:
+ goto st6
}
- goto st33
-tr582:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st31
+tr598:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st367
- st367:
+ goto st377
+ st377:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof367
+ goto _test_eof377
}
- st_case_367:
-//line plugins/parsers/influx/machine.go:11034
+ st_case_377:
+//line plugins/parsers/influx/machine.go:11273
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st370
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st380
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
-tr583:
- ( m.cs) = 368
-//line plugins/parsers/influx/machine.go.rl:148
+ goto st31
+tr599:
+ ( m.cs) = 378
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st368:
+ st378:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof368
+ goto _test_eof378
}
- st_case_368:
-//line plugins/parsers/influx/machine.go:11079
+ st_case_378:
+//line plugins/parsers/influx/machine.go:11319
switch ( m.data)[( m.p)] {
case 10:
- goto st288
- case 12:
- goto st266
+ goto tr219
case 13:
- goto st74
+ goto st72
case 32:
- goto st368
+ goto st378
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto st368
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st378
}
goto st6
-tr27:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st76
- st76:
- if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof76
- }
- st_case_76:
-//line plugins/parsers/influx/machine.go:11109
- switch ( m.data)[( m.p)] {
- case 34:
- goto st6
- case 92:
- goto st6
- }
- goto tr8
-tr585:
- ( m.cs) = 369
-//line plugins/parsers/influx/machine.go.rl:148
+tr601:
+ ( m.cs) = 379
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st369:
+ st379:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof369
+ goto _test_eof379
}
- st_case_369:
-//line plugins/parsers/influx/machine.go:11135
+ st_case_379:
+//line plugins/parsers/influx/machine.go:11354
switch ( m.data)[( m.p)] {
- case 9:
- goto st368
case 10:
- goto st288
+ goto tr219
case 11:
- goto st369
- case 12:
- goto st266
+ goto st379
case 13:
- goto st74
+ goto st72
case 32:
- goto st368
+ goto st378
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- goto st33
-tr98:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st378
+ }
+ goto st31
+tr96:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st77
- st77:
+ goto st75
+ st75:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof77
+ goto _test_eof75
}
- st_case_77:
-//line plugins/parsers/influx/machine.go:11170
+ st_case_75:
+//line plugins/parsers/influx/machine.go:11388
switch ( m.data)[( m.p)] {
case 34:
- goto st33
+ goto st31
case 92:
- goto st33
+ goto st31
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -11182,1654 +11400,1661 @@ tr98:
goto tr8
}
goto st3
- st370:
+ st380:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof370
+ goto _test_eof380
}
- st_case_370:
+ st_case_380:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st371
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st381
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st371:
+ goto st31
+ st381:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof371
+ goto _test_eof381
}
- st_case_371:
+ st_case_381:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st372
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st382
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st372:
+ goto st31
+ st382:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof372
+ goto _test_eof382
}
- st_case_372:
+ st_case_382:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st373
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st383
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st373:
+ goto st31
+ st383:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof373
+ goto _test_eof383
}
- st_case_373:
+ st_case_383:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st374
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st384
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st374:
+ goto st31
+ st384:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof374
+ goto _test_eof384
}
- st_case_374:
+ st_case_384:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st375
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st385
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st375:
+ goto st31
+ st385:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof375
+ goto _test_eof385
}
- st_case_375:
+ st_case_385:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st376
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st386
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st376:
+ goto st31
+ st386:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof376
+ goto _test_eof386
}
- st_case_376:
+ st_case_386:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st377
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st387
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st377:
+ goto st31
+ st387:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof377
+ goto _test_eof387
}
- st_case_377:
+ st_case_387:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st378
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st388
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st378:
+ goto st31
+ st388:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof378
+ goto _test_eof388
}
- st_case_378:
+ st_case_388:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st379
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st389
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st379:
+ goto st31
+ st389:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof379
+ goto _test_eof389
}
- st_case_379:
+ st_case_389:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st380
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st390
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st380:
+ goto st31
+ st390:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof380
+ goto _test_eof390
}
- st_case_380:
+ st_case_390:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st381
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st391
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st381:
+ goto st31
+ st391:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof381
+ goto _test_eof391
}
- st_case_381:
+ st_case_391:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st382
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st392
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st382:
+ goto st31
+ st392:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof382
+ goto _test_eof392
}
- st_case_382:
+ st_case_392:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st383
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st393
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st383:
+ goto st31
+ st393:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof383
+ goto _test_eof393
}
- st_case_383:
+ st_case_393:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st384
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st394
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st384:
+ goto st31
+ st394:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof384
+ goto _test_eof394
}
- st_case_384:
+ st_case_394:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st385
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st395
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st385:
+ goto st31
+ st395:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof385
+ goto _test_eof395
}
- st_case_385:
+ st_case_395:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st386
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st396
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st386:
+ goto st31
+ st396:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof386
+ goto _test_eof396
}
- st_case_386:
+ st_case_396:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st387
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st397
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st33
- st387:
+ goto st31
+ st397:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof387
+ goto _test_eof397
}
- st_case_387:
+ st_case_397:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr583
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr585
- case 12:
- goto tr450
+ goto tr601
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st77
+ goto st75
}
- goto st33
-tr577:
- ( m.cs) = 388
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr599
+ }
+ goto st31
+tr593:
+ ( m.cs) = 398
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr621:
- ( m.cs) = 388
-//line plugins/parsers/influx/machine.go.rl:77
+tr637:
+ ( m.cs) = 398
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr803:
- ( m.cs) = 388
-//line plugins/parsers/influx/machine.go.rl:77
+tr818:
+ ( m.cs) = 398
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr808:
- ( m.cs) = 388
-//line plugins/parsers/influx/machine.go.rl:77
+tr823:
+ ( m.cs) = 398
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr813:
- ( m.cs) = 388
-//line plugins/parsers/influx/machine.go.rl:77
+tr827:
+ ( m.cs) = 398
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st388:
+ st398:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof388
+ goto _test_eof398
}
- st_case_388:
-//line plugins/parsers/influx/machine.go:11855
+ st_case_398:
+//line plugins/parsers/influx/machine.go:12089
switch ( m.data)[( m.p)] {
- case 9:
- goto tr607
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr608
- case 12:
- goto tr482
+ goto tr624
case 13:
- goto st74
+ goto st72
case 32:
- goto tr607
+ goto tr623
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr92
+ goto tr90
case 45:
- goto tr609
+ goto tr625
case 61:
- goto st31
+ goto st29
case 92:
- goto tr125
+ goto tr123
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr610
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr626
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr623
}
- goto tr121
-tr608:
- ( m.cs) = 389
-//line plugins/parsers/influx/machine.go.rl:77
+ goto tr119
+tr624:
+ ( m.cs) = 399
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st389:
+ st399:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof389
+ goto _test_eof399
}
- st_case_389:
-//line plugins/parsers/influx/machine.go:11906
+ st_case_399:
+//line plugins/parsers/influx/machine.go:12141
switch ( m.data)[( m.p)] {
- case 9:
- goto tr607
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr608
- case 12:
- goto tr482
+ goto tr624
case 13:
- goto st74
+ goto st72
case 32:
- goto tr607
+ goto tr623
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr92
+ goto tr90
case 45:
- goto tr609
+ goto tr625
case 61:
- goto tr129
+ goto tr127
case 92:
- goto tr125
+ goto tr123
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr610
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr626
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr623
}
- goto tr121
-tr92:
- ( m.cs) = 78
-//line plugins/parsers/influx/machine.go.rl:77
+ goto tr119
+tr90:
+ ( m.cs) = 76
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr86:
- ( m.cs) = 78
-//line plugins/parsers/influx/machine.go.rl:77
+tr84:
+ ( m.cs) = 76
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr233:
- ( m.cs) = 78
-//line plugins/parsers/influx/machine.go.rl:90
+tr231:
+ ( m.cs) = 76
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st78:
+ st76:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof78
+ goto _test_eof76
}
- st_case_78:
-//line plugins/parsers/influx/machine.go:11983
+ st_case_76:
+//line plugins/parsers/influx/machine.go:12219
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr192
+ goto tr190
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr224
+ goto tr222
+ }
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
}
- goto tr223
-tr223:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr221
+tr221:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st79
- st79:
+ goto st77
+ st77:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof79
+ goto _test_eof77
}
- st_case_79:
-//line plugins/parsers/influx/machine.go:12016
+ st_case_77:
+//line plugins/parsers/influx/machine.go:12251
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr195
+ goto tr193
case 44:
goto st6
case 61:
- goto tr226
+ goto tr224
case 92:
- goto st89
+ goto st87
}
- goto st79
-tr226:
-//line plugins/parsers/influx/machine.go.rl:86
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st77
+tr224:
+//line plugins/parsers/influx/machine.go.rl:95
- key = m.text()
+ m.key = m.text()
- goto st80
- st80:
+ goto st78
+ st78:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof80
+ goto _test_eof78
}
- st_case_80:
-//line plugins/parsers/influx/machine.go:12049
+ st_case_78:
+//line plugins/parsers/influx/machine.go:12283
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr151
+ goto tr149
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr229
+ goto tr227
+ }
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
}
- goto tr228
-tr228:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr226
+tr226:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st81
- st81:
+ goto st79
+ st79:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof81
+ goto _test_eof79
}
- st_case_81:
-//line plugins/parsers/influx/machine.go:12082
+ st_case_79:
+//line plugins/parsers/influx/machine.go:12315
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- goto st81
-tr232:
- ( m.cs) = 82
-//line plugins/parsers/influx/machine.go.rl:90
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+tr230:
+ ( m.cs) = 80
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st82:
+ st80:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof82
+ goto _test_eof80
}
- st_case_82:
-//line plugins/parsers/influx/machine.go:12124
+ st_case_80:
+//line plugins/parsers/influx/machine.go:12356
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr236
- case 12:
- goto tr60
+ goto tr234
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto tr237
+ goto tr235
}
- goto tr235
-tr235:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto tr233
+tr233:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st83
- st83:
+ goto st81
+ st81:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof83
+ goto _test_eof81
}
- st_case_83:
-//line plugins/parsers/influx/machine.go:12159
+ st_case_81:
+//line plugins/parsers/influx/machine.go:12390
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr239
- case 12:
- goto tr60
+ goto tr237
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- goto st83
-tr239:
- ( m.cs) = 84
-//line plugins/parsers/influx/machine.go.rl:90
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st81
+tr237:
+ ( m.cs) = 82
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr236:
- ( m.cs) = 84
-//line plugins/parsers/influx/machine.go.rl:90
+tr234:
+ ( m.cs) = 82
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st84:
+ st82:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof84
+ goto _test_eof82
}
- st_case_84:
-//line plugins/parsers/influx/machine.go:12218
+ st_case_82:
+//line plugins/parsers/influx/machine.go:12448
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr236
- case 12:
- goto tr60
+ goto tr234
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto tr237
+ goto tr235
}
- goto tr235
-tr237:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto tr233
+tr235:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st85
- st85:
+ goto st83
+ st83:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof85
+ goto _test_eof83
}
- st_case_85:
-//line plugins/parsers/influx/machine.go:12253
+ st_case_83:
+//line plugins/parsers/influx/machine.go:12482
switch ( m.data)[( m.p)] {
case 34:
- goto st83
+ goto st81
case 92:
- goto st86
+ goto st84
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st19
- st86:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st17
+ st84:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof86
+ goto _test_eof84
}
- st_case_86:
-//line plugins/parsers/influx/machine.go:12277
+ st_case_84:
+//line plugins/parsers/influx/machine.go:12506
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr239
- case 12:
- goto tr60
+ goto tr237
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- goto st83
-tr229:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st81
+tr227:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st87
- st87:
+ goto st85
+ st85:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof87
+ goto _test_eof85
}
- st_case_87:
-//line plugins/parsers/influx/machine.go:12312
+ st_case_85:
+//line plugins/parsers/influx/machine.go:12540
switch ( m.data)[( m.p)] {
case 34:
- goto st81
+ goto st79
case 92:
- goto st88
+ goto st86
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st17
- st88:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st15
+ st86:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof88
+ goto _test_eof86
}
- st_case_88:
-//line plugins/parsers/influx/machine.go:12336
+ st_case_86:
+//line plugins/parsers/influx/machine.go:12564
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- goto st81
-tr224:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+tr222:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st89
- st89:
+ goto st87
+ st87:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof89
+ goto _test_eof87
}
- st_case_89:
-//line plugins/parsers/influx/machine.go:12371
+ st_case_87:
+//line plugins/parsers/influx/machine.go:12598
switch ( m.data)[( m.p)] {
case 34:
- goto st79
+ goto st77
case 92:
- goto st90
+ goto st88
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st15
- st90:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st13
+ st88:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof90
+ goto _test_eof88
}
- st_case_90:
-//line plugins/parsers/influx/machine.go:12395
+ st_case_88:
+//line plugins/parsers/influx/machine.go:12622
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr195
+ goto tr193
case 44:
goto st6
case 61:
- goto tr226
+ goto tr224
case 92:
- goto st89
+ goto st87
}
- goto st79
-tr609:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st77
+tr625:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st91
- st91:
+ goto st89
+ st89:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof91
+ goto _test_eof89
}
- st_case_91:
-//line plugins/parsers/influx/machine.go:12428
+ st_case_89:
+//line plugins/parsers/influx/machine.go:12654
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr127
- case 12:
- goto tr1
+ goto tr125
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st390
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st400
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr87
}
- goto st42
-tr610:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st40
+tr626:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st390
- st390:
+ goto st400
+ st400:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof390
+ goto _test_eof400
}
- st_case_390:
-//line plugins/parsers/influx/machine.go:12466
+ st_case_400:
+//line plugins/parsers/influx/machine.go:12693
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st534
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st544
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
-tr616:
- ( m.cs) = 391
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st40
+tr632:
+ ( m.cs) = 401
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr756:
- ( m.cs) = 391
-//line plugins/parsers/influx/machine.go.rl:90
+tr769:
+ ( m.cs) = 401
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr611:
- ( m.cs) = 391
-//line plugins/parsers/influx/machine.go.rl:77
+tr627:
+ ( m.cs) = 401
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr753:
- ( m.cs) = 391
-//line plugins/parsers/influx/machine.go.rl:90
+tr766:
+ ( m.cs) = 401
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st391:
+ st401:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof391
+ goto _test_eof401
}
- st_case_391:
-//line plugins/parsers/influx/machine.go:12570
+ st_case_401:
+//line plugins/parsers/influx/machine.go:12798
switch ( m.data)[( m.p)] {
- case 9:
- goto st391
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr615
- case 12:
- goto st294
+ goto tr631
case 13:
- goto st74
+ goto st72
case 32:
- goto st391
+ goto st401
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr98
+ goto tr96
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st401
}
- goto tr94
-tr615:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr92
+tr631:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st392
- st392:
+ goto st402
+ st402:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof392
+ goto _test_eof402
}
- st_case_392:
-//line plugins/parsers/influx/machine.go:12605
+ st_case_402:
+//line plugins/parsers/influx/machine.go:12832
switch ( m.data)[( m.p)] {
- case 9:
- goto st391
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr615
- case 12:
- goto st294
+ goto tr631
case 13:
- goto st74
+ goto st72
case 32:
- goto st391
+ goto st401
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto tr98
+ goto tr96
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st401
}
- goto tr94
-tr617:
- ( m.cs) = 393
-//line plugins/parsers/influx/machine.go.rl:77
+ goto tr92
+tr633:
+ ( m.cs) = 403
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr612:
- ( m.cs) = 393
-//line plugins/parsers/influx/machine.go.rl:77
+tr628:
+ ( m.cs) = 403
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st393:
+ st403:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof393
+ goto _test_eof403
}
- st_case_393:
-//line plugins/parsers/influx/machine.go:12674
+ st_case_403:
+//line plugins/parsers/influx/machine.go:12900
switch ( m.data)[( m.p)] {
- case 9:
- goto tr616
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr617
- case 12:
- goto tr495
+ goto tr633
case 13:
- goto st74
+ goto st72
case 32:
- goto tr616
+ goto tr632
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto tr125
+ goto tr123
}
- goto tr121
-tr129:
-//line plugins/parsers/influx/machine.go.rl:99
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr632
+ }
+ goto tr119
+tr127:
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st92
-tr374:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st90
+tr381:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:99
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st92
- st92:
+ goto st90
+ st90:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof92
+ goto _test_eof90
}
- st_case_92:
-//line plugins/parsers/influx/machine.go:12719
+ st_case_90:
+//line plugins/parsers/influx/machine.go:12944
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr212
+ goto tr210
case 44:
- goto tr92
+ goto tr90
case 45:
- goto tr245
+ goto tr243
case 46:
- goto tr246
+ goto tr244
case 48:
- goto tr247
+ goto tr245
case 70:
- goto tr249
+ goto tr247
case 84:
- goto tr250
+ goto tr248
case 92:
- goto st142
+ goto st140
case 102:
- goto tr251
+ goto tr249
case 116:
- goto tr252
+ goto tr250
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr248
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr246
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr87
}
- goto st31
-tr90:
- ( m.cs) = 93
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st29
+tr88:
+ ( m.cs) = 91
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr84:
- ( m.cs) = 93
-//line plugins/parsers/influx/machine.go.rl:77
+tr82:
+ ( m.cs) = 91
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st93:
+ st91:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof93
+ goto _test_eof91
}
- st_case_93:
-//line plugins/parsers/influx/machine.go:12793
+ st_case_91:
+//line plugins/parsers/influx/machine.go:13019
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr131
- case 12:
- goto tr1
+ goto tr129
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr92
+ goto tr90
case 61:
- goto st31
+ goto st29
case 92:
- goto tr125
+ goto tr123
}
- goto tr121
-tr125:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto tr119
+tr123:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st94
- st94:
+ goto st92
+ st92:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof94
+ goto _test_eof92
}
- st_case_94:
-//line plugins/parsers/influx/machine.go:12828
+ st_case_92:
+//line plugins/parsers/influx/machine.go:13053
switch ( m.data)[( m.p)] {
case 34:
- goto st42
+ goto st40
case 92:
- goto st42
+ goto st40
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -12839,278 +13064,279 @@ tr125:
case ( m.data)[( m.p)] >= 9:
goto tr8
}
- goto st12
-tr245:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st10
+tr243:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st95
- st95:
+ goto st93
+ st93:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof95
+ goto _test_eof93
}
- st_case_95:
-//line plugins/parsers/influx/machine.go:12855
+ st_case_93:
+//line plugins/parsers/influx/machine.go:13080
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 46:
- goto st97
+ goto st95
case 48:
- goto st522
+ goto st532
case 92:
- goto st142
+ goto st140
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st525
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st535
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr87
}
- goto st31
-tr85:
- ( m.cs) = 394
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st29
+tr83:
+ ( m.cs) = 404
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr91:
- ( m.cs) = 394
-//line plugins/parsers/influx/machine.go.rl:139
+tr89:
+ ( m.cs) = 404
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr118:
- ( m.cs) = 394
-//line plugins/parsers/influx/machine.go.rl:139
+tr116:
+ ( m.cs) = 404
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st394:
+ st404:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof394
+ goto _test_eof404
}
- st_case_394:
-//line plugins/parsers/influx/machine.go:12936
+ st_case_404:
+//line plugins/parsers/influx/machine.go:13162
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr618
+ goto tr634
case 13:
- goto st34
+ goto st32
case 32:
- goto tr482
+ goto tr499
case 44:
- goto tr484
+ goto tr501
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr482
+ goto tr499
}
goto st1
-tr618:
- ( m.cs) = 395
-//line plugins/parsers/influx/machine.go.rl:77
+tr634:
+ ( m.cs) = 405
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr798:
- ( m.cs) = 395
-//line plugins/parsers/influx/machine.go.rl:77
+tr812:
+ ( m.cs) = 405
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr981:
- ( m.cs) = 395
-//line plugins/parsers/influx/machine.go.rl:77
+tr1006:
+ ( m.cs) = 405
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr984:
- ( m.cs) = 395
-//line plugins/parsers/influx/machine.go.rl:77
+tr1010:
+ ( m.cs) = 405
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr987:
- ( m.cs) = 395
-//line plugins/parsers/influx/machine.go.rl:77
+tr1014:
+ ( m.cs) = 405
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st395:
+ st405:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof395
+ goto _test_eof405
}
- st_case_395:
-//line plugins/parsers/influx/machine.go:13065
+ st_case_405:
+//line plugins/parsers/influx/machine.go:13291
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr487
+ goto tr504
case 13:
- goto st34
+ goto st32
case 32:
- goto tr482
+ goto tr499
case 44:
goto tr4
case 45:
- goto tr488
+ goto tr505
case 61:
goto st1
case 92:
- goto tr45
+ goto tr43
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr489
+ goto tr506
}
case ( m.data)[( m.p)] >= 9:
- goto tr482
+ goto tr499
}
- goto tr41
-tr37:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr39
+tr35:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st96
-tr441:
-//line plugins/parsers/influx/machine.go.rl:73
+ goto st94
+tr458:
+//line plugins/parsers/influx/machine.go.rl:82
- foundMetric = true
+ m.beginMetric = true
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st96
- st96:
+ goto st94
+ st94:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof96
+ goto _test_eof94
}
- st_case_96:
-//line plugins/parsers/influx/machine.go:13114
+ st_case_94:
+//line plugins/parsers/influx/machine.go:13340
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
@@ -13120,1774 +13346,1780 @@ tr441:
goto st0
}
goto st1
-tr246:
-//line plugins/parsers/influx/machine.go.rl:19
+tr244:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st97
- st97:
+ goto st95
+ st95:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof97
+ goto _test_eof95
}
- st_case_97:
-//line plugins/parsers/influx/machine.go:13135
+ st_case_95:
+//line plugins/parsers/influx/machine.go:13361
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 92:
- goto st142
+ goto st140
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st396
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st406
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr87
}
- goto st31
- st396:
+ goto st29
+ st406:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof396
+ goto _test_eof406
}
- st_case_396:
+ st_case_406:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr619
case 10:
- goto tr620
+ goto tr636
case 11:
- goto tr621
- case 12:
- goto tr622
+ goto tr637
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr619
+ goto tr635
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr624
+ goto tr639
case 69:
- goto st140
+ goto st138
case 92:
- goto st142
- case 101:
goto st140
+ case 101:
+ goto st138
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st396
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st406
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr635
}
- goto st31
-tr578:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st29
+tr594:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr624:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:77
+tr639:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr747:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:90
+tr760:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr781:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:90
+tr794:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr787:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:90
+tr800:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr793:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:90
+tr806:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr805:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:77
+tr819:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr810:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:77
+tr824:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr815:
- ( m.cs) = 98
-//line plugins/parsers/influx/machine.go.rl:77
+tr828:
+ ( m.cs) = 96
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st98:
+ st96:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof98
+ goto _test_eof96
}
- st_case_98:
-//line plugins/parsers/influx/machine.go:13399
+ st_case_96:
+//line plugins/parsers/influx/machine.go:13627
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr258
+ goto tr256
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr259
+ goto tr257
}
- goto tr257
-tr257:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto tr255
+tr255:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st99
- st99:
+ goto st97
+ st97:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof99
+ goto _test_eof97
}
- st_case_99:
-//line plugins/parsers/influx/machine.go:13432
+ st_case_97:
+//line plugins/parsers/influx/machine.go:13659
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr261
+ goto tr259
case 44:
goto st6
case 61:
- goto tr262
+ goto tr260
case 92:
- goto st138
+ goto st136
}
- goto st99
-tr258:
- ( m.cs) = 397
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st97
+tr256:
+ ( m.cs) = 407
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr261:
- ( m.cs) = 397
-//line plugins/parsers/influx/machine.go.rl:139
+tr259:
+ ( m.cs) = 407
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st397:
+ st407:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof397
+ goto _test_eof407
}
- st_case_397:
-//line plugins/parsers/influx/machine.go:13489
+ st_case_407:
+//line plugins/parsers/influx/machine.go:13715
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st398
+ goto st408
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 44:
- goto st37
+ goto st35
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st261
+ goto st271
}
- goto st46
- st398:
+ goto st44
+ st408:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof398
+ goto _test_eof408
}
- st_case_398:
+ st_case_408:
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st398
+ goto st408
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 44:
- goto tr132
+ goto tr130
case 45:
- goto tr627
+ goto tr642
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr628
+ goto tr643
}
case ( m.data)[( m.p)] >= 9:
- goto st261
+ goto st271
}
- goto st46
-tr627:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st44
+tr642:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st100
- st100:
+ goto st98
+ st98:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof100
+ goto _test_eof98
}
- st_case_100:
-//line plugins/parsers/influx/machine.go:13553
+ st_case_98:
+//line plugins/parsers/influx/machine.go:13779
switch ( m.data)[( m.p)] {
case 32:
- goto tr132
+ goto tr130
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] < 12:
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 {
- goto tr132
+ goto tr130
}
case ( m.data)[( m.p)] > 13:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st399
+ goto st409
}
default:
- goto tr132
+ goto tr130
}
- goto st46
-tr628:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st44
+tr643:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st399
- st399:
+ goto st409
+ st409:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof399
+ goto _test_eof409
}
- st_case_399:
-//line plugins/parsers/influx/machine.go:13588
+ st_case_409:
+//line plugins/parsers/influx/machine.go:13814
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st401
+ goto st411
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
-tr629:
- ( m.cs) = 400
-//line plugins/parsers/influx/machine.go.rl:148
+ goto st44
+tr644:
+ ( m.cs) = 410
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st400:
+ st410:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof400
+ goto _test_eof410
}
- st_case_400:
-//line plugins/parsers/influx/machine.go:13632
+ st_case_410:
+//line plugins/parsers/influx/machine.go:13858
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto st400
+ goto st410
case 13:
- goto st34
+ goto st32
case 32:
- goto st266
+ goto st276
case 44:
- goto tr47
+ goto tr45
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st266
+ goto st276
}
- goto st46
-tr135:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st44
+tr133:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st101
- st101:
+ goto st99
+ st99:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof101
+ goto _test_eof99
}
- st_case_101:
-//line plugins/parsers/influx/machine.go:13664
+ st_case_99:
+//line plugins/parsers/influx/machine.go:13890
if ( m.data)[( m.p)] == 92 {
- goto st102
+ goto st100
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st46
- st102:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st44
+ st100:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof102
+ goto _test_eof100
}
- st_case_102:
-//line plugins/parsers/influx/machine.go:13685
+ st_case_100:
+//line plugins/parsers/influx/machine.go:13911
switch ( m.data)[( m.p)] {
case 32:
- goto tr47
+ goto tr45
case 44:
- goto tr47
+ goto tr45
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st46
- st401:
+ goto st44
+ st411:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof401
+ goto _test_eof411
}
- st_case_401:
+ st_case_411:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st402
+ goto st412
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st402:
+ goto st44
+ st412:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof402
+ goto _test_eof412
}
- st_case_402:
+ st_case_412:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st403
+ goto st413
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st403:
+ goto st44
+ st413:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof403
+ goto _test_eof413
}
- st_case_403:
+ st_case_413:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st404
+ goto st414
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st404:
+ goto st44
+ st414:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof404
+ goto _test_eof414
}
- st_case_404:
+ st_case_414:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st405
+ goto st415
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st405:
+ goto st44
+ st415:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof405
+ goto _test_eof415
}
- st_case_405:
+ st_case_415:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st406
+ goto st416
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st406:
+ goto st44
+ st416:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof406
+ goto _test_eof416
}
- st_case_406:
+ st_case_416:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st407
+ goto st417
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st407:
+ goto st44
+ st417:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof407
+ goto _test_eof417
}
- st_case_407:
+ st_case_417:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st408
+ goto st418
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st408:
+ goto st44
+ st418:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof408
+ goto _test_eof418
}
- st_case_408:
+ st_case_418:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st409
+ goto st419
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st409:
+ goto st44
+ st419:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof409
+ goto _test_eof419
}
- st_case_409:
+ st_case_419:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st410
+ goto st420
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st410:
+ goto st44
+ st420:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof410
+ goto _test_eof420
}
- st_case_410:
+ st_case_420:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st411
+ goto st421
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st411:
+ goto st44
+ st421:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof411
+ goto _test_eof421
}
- st_case_411:
+ st_case_421:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st412
+ goto st422
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st412:
+ goto st44
+ st422:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof412
+ goto _test_eof422
}
- st_case_412:
+ st_case_422:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st413
+ goto st423
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st413:
+ goto st44
+ st423:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof413
+ goto _test_eof423
}
- st_case_413:
+ st_case_423:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st414
+ goto st424
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st414:
+ goto st44
+ st424:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof414
+ goto _test_eof424
}
- st_case_414:
+ st_case_424:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st415
+ goto st425
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st415:
+ goto st44
+ st425:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof415
+ goto _test_eof425
}
- st_case_415:
+ st_case_425:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st416
+ goto st426
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st416:
+ goto st44
+ st426:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof416
+ goto _test_eof426
}
- st_case_416:
+ st_case_426:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st417
+ goto st427
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st417:
+ goto st44
+ st427:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof417
+ goto _test_eof427
}
- st_case_417:
+ st_case_427:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st418
+ goto st428
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto st46
- st418:
+ goto st44
+ st428:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof418
+ goto _test_eof428
}
- st_case_418:
+ st_case_428:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 11:
- goto tr629
+ goto tr644
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
case 44:
- goto tr132
+ goto tr130
case 61:
- goto tr137
+ goto tr135
case 92:
- goto st101
+ goto st99
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr450
+ goto tr467
}
- goto st46
-tr262:
-//line plugins/parsers/influx/machine.go.rl:86
+ goto st44
+tr260:
+//line plugins/parsers/influx/machine.go.rl:95
- key = m.text()
+ m.key = m.text()
-//line plugins/parsers/influx/machine.go.rl:99
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st103
- st103:
+ goto st101
+ st101:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof103
+ goto _test_eof101
}
- st_case_103:
-//line plugins/parsers/influx/machine.go:14255
+ st_case_101:
+//line plugins/parsers/influx/machine.go:14481
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr266
+ goto tr264
case 44:
goto st6
case 45:
- goto tr267
+ goto tr265
case 46:
- goto tr268
+ goto tr266
case 48:
- goto tr269
+ goto tr267
case 61:
goto st6
case 70:
- goto tr271
+ goto tr269
case 84:
- goto tr272
+ goto tr270
case 92:
- goto tr229
+ goto tr227
case 102:
- goto tr273
+ goto tr271
case 116:
- goto tr274
+ goto tr272
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr270
+ switch {
+ case ( m.data)[( m.p)] > 13:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr268
+ }
+ case ( m.data)[( m.p)] >= 12:
+ goto st6
}
- goto tr228
-tr266:
- ( m.cs) = 419
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr226
+tr264:
+ ( m.cs) = 429
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:139
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st419:
+ st429:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof419
+ goto _test_eof429
}
- st_case_419:
-//line plugins/parsers/influx/machine.go:14316
+ st_case_429:
+//line plugins/parsers/influx/machine.go:14543
switch ( m.data)[( m.p)] {
- case 9:
- goto tr649
case 10:
- goto tr650
+ goto tr665
case 11:
- goto tr651
- case 12:
- goto tr547
+ goto tr666
case 13:
- goto tr652
+ goto tr667
case 32:
- goto tr649
+ goto tr664
case 34:
- goto tr151
+ goto tr149
case 44:
- goto tr653
+ goto tr668
case 61:
goto tr23
case 92:
- goto tr153
+ goto tr151
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr664
}
- goto tr148
-tr841:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:77
+ goto tr146
+tr854:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr682:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:90
+tr697:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr649:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:90
+tr664:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr837:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:77
+tr850:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr710:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:90
+tr725:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr721:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:90
+tr736:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr728:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:90
+tr742:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr735:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:90
+tr748:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr869:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:77
+tr882:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr873:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:77
+tr886:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr877:
- ( m.cs) = 420
-//line plugins/parsers/influx/machine.go.rl:77
+tr890:
+ ( m.cs) = 430
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st420:
+ st430:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof420
+ goto _test_eof430
}
- st_case_420:
-//line plugins/parsers/influx/machine.go:14572
+ st_case_430:
+//line plugins/parsers/influx/machine.go:14798
switch ( m.data)[( m.p)] {
- case 9:
- goto st420
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr655
- case 12:
- goto st290
+ goto tr670
case 13:
- goto st104
+ goto st102
case 32:
- goto st420
+ goto st430
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 45:
- goto tr656
+ goto tr671
case 61:
goto st6
case 92:
- goto tr163
+ goto tr161
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr657
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr672
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto st430
}
- goto tr160
-tr655:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr158
+tr670:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st421
- st421:
+ goto st431
+ st431:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof421
+ goto _test_eof431
}
- st_case_421:
-//line plugins/parsers/influx/machine.go:14612
+ st_case_431:
+//line plugins/parsers/influx/machine.go:14839
switch ( m.data)[( m.p)] {
- case 9:
- goto st420
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr655
- case 12:
- goto st290
+ goto tr670
case 13:
- goto st104
+ goto st102
case 32:
- goto st420
+ goto st430
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 45:
- goto tr656
+ goto tr671
case 61:
- goto tr165
- case 92:
goto tr163
+ case 92:
+ goto tr161
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr657
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr672
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto st430
}
- goto tr160
-tr652:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr158
+tr667:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st104
-tr661:
- ( m.cs) = 104
-//line plugins/parsers/influx/machine.go.rl:148
+ goto st102
+tr676:
+ ( m.cs) = 102
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr517:
- ( m.cs) = 104
-//line plugins/parsers/influx/machine.go.rl:121
+tr533:
+ ( m.cs) = 102
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr725:
- ( m.cs) = 104
-//line plugins/parsers/influx/machine.go.rl:103
+tr739:
+ ( m.cs) = 102
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr732:
- ( m.cs) = 104
-//line plugins/parsers/influx/machine.go.rl:112
+tr745:
+ ( m.cs) = 102
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr739:
- ( m.cs) = 104
-//line plugins/parsers/influx/machine.go.rl:130
+tr751:
+ ( m.cs) = 102
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st104:
+ st102:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof104
+ goto _test_eof102
}
- st_case_104:
-//line plugins/parsers/influx/machine.go:14717
- if ( m.data)[( m.p)] == 10 {
- goto st317
+ st_case_102:
+//line plugins/parsers/influx/machine.go:14945
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr273
+ case 34:
+ goto tr29
+ case 92:
+ goto st73
}
- goto tr8
-tr656:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st6
+tr671:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st105
- st105:
+ goto st103
+ st103:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof105
+ goto _test_eof103
}
- st_case_105:
-//line plugins/parsers/influx/machine.go:14733
+ st_case_103:
+//line plugins/parsers/influx/machine.go:14966
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr105
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st422
+ switch {
+ case ( m.data)[( m.p)] > 13:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st432
+ }
+ case ( m.data)[( m.p)] >= 12:
+ goto st6
}
- goto st51
-tr657:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st49
+tr672:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st422
- st422:
+ goto st432
+ st432:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof422
+ goto _test_eof432
}
- st_case_422:
-//line plugins/parsers/influx/machine.go:14769
+ st_case_432:
+//line plugins/parsers/influx/machine.go:15003
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st425
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st435
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
-tr658:
- ( m.cs) = 423
-//line plugins/parsers/influx/machine.go.rl:148
+ goto st49
+tr673:
+ ( m.cs) = 433
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st423:
+ st433:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof423
+ goto _test_eof433
}
- st_case_423:
-//line plugins/parsers/influx/machine.go:14814
+ st_case_433:
+//line plugins/parsers/influx/machine.go:15049
switch ( m.data)[( m.p)] {
case 10:
- goto st317
- case 12:
- goto st266
+ goto tr273
case 13:
- goto st104
+ goto st102
case 32:
- goto st423
+ goto st433
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto st423
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st433
}
goto st6
-tr660:
- ( m.cs) = 424
-//line plugins/parsers/influx/machine.go.rl:148
+tr675:
+ ( m.cs) = 434
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st424:
+ st434:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof424
+ goto _test_eof434
}
- st_case_424:
-//line plugins/parsers/influx/machine.go:14851
+ st_case_434:
+//line plugins/parsers/influx/machine.go:15084
switch ( m.data)[( m.p)] {
- case 9:
- goto st423
case 10:
- goto st317
+ goto tr273
case 11:
- goto st424
- case 12:
- goto st266
+ goto st434
case 13:
- goto st104
+ goto st102
case 32:
- goto st423
+ goto st433
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- goto st51
-tr163:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st433
+ }
+ goto st49
+tr161:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st106
- st106:
+ goto st104
+ st104:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof106
+ goto _test_eof104
}
- st_case_106:
-//line plugins/parsers/influx/machine.go:14886
+ st_case_104:
+//line plugins/parsers/influx/machine.go:15118
switch ( m.data)[( m.p)] {
case 34:
- goto st51
+ goto st49
case 92:
- goto st51
+ goto st49
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -14898,4950 +15130,4991 @@ tr163:
goto tr8
}
goto st3
- st425:
+ st435:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof425
+ goto _test_eof435
}
- st_case_425:
+ st_case_435:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st426
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st436
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st426:
+ goto st49
+ st436:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof426
+ goto _test_eof436
}
- st_case_426:
+ st_case_436:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st427
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st437
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st427:
+ goto st49
+ st437:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof427
+ goto _test_eof437
}
- st_case_427:
+ st_case_437:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st428
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st438
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st428:
+ goto st49
+ st438:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof428
+ goto _test_eof438
}
- st_case_428:
+ st_case_438:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st429
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st439
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st429:
+ goto st49
+ st439:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof429
+ goto _test_eof439
}
- st_case_429:
+ st_case_439:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st430
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st440
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st430:
+ goto st49
+ st440:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof430
+ goto _test_eof440
}
- st_case_430:
+ st_case_440:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st431
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st441
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st431:
+ goto st49
+ st441:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof431
+ goto _test_eof441
}
- st_case_431:
+ st_case_441:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st432
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st442
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st432:
+ goto st49
+ st442:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof432
+ goto _test_eof442
}
- st_case_432:
+ st_case_442:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st433
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st443
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st433:
+ goto st49
+ st443:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof433
+ goto _test_eof443
}
- st_case_433:
+ st_case_443:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st434
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st444
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st434:
+ goto st49
+ st444:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof434
+ goto _test_eof444
}
- st_case_434:
+ st_case_444:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st435
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st445
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st435:
+ goto st49
+ st445:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof435
+ goto _test_eof445
}
- st_case_435:
+ st_case_445:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st436
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st446
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st436:
+ goto st49
+ st446:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof436
+ goto _test_eof446
}
- st_case_436:
+ st_case_446:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st437
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st447
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st437:
+ goto st49
+ st447:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof437
+ goto _test_eof447
}
- st_case_437:
+ st_case_447:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st438
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st448
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st438:
+ goto st49
+ st448:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof438
+ goto _test_eof448
}
- st_case_438:
+ st_case_448:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st439
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st449
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st439:
+ goto st49
+ st449:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof439
+ goto _test_eof449
}
- st_case_439:
+ st_case_449:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st440
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st450
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st440:
+ goto st49
+ st450:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof440
+ goto _test_eof450
}
- st_case_440:
+ st_case_450:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st441
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st451
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st441:
+ goto st49
+ st451:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof441
+ goto _test_eof451
}
- st_case_441:
+ st_case_451:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st442
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st452
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr673
}
- goto st51
- st442:
+ goto st49
+ st452:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof442
+ goto _test_eof452
}
- st_case_442:
+ st_case_452:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr658
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr660
- case 12:
- goto tr450
+ goto tr675
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st106
+ goto st104
}
- goto st51
-tr651:
- ( m.cs) = 443
-//line plugins/parsers/influx/machine.go.rl:90
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr673
+ }
+ goto st49
+tr666:
+ ( m.cs) = 453
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr711:
- ( m.cs) = 443
-//line plugins/parsers/influx/machine.go.rl:90
+tr726:
+ ( m.cs) = 453
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr723:
- ( m.cs) = 443
-//line plugins/parsers/influx/machine.go.rl:90
+tr738:
+ ( m.cs) = 453
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr730:
- ( m.cs) = 443
-//line plugins/parsers/influx/machine.go.rl:90
+tr744:
+ ( m.cs) = 453
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr737:
- ( m.cs) = 443
-//line plugins/parsers/influx/machine.go.rl:90
+tr750:
+ ( m.cs) = 453
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st443:
+ st453:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof443
+ goto _test_eof453
}
- st_case_443:
-//line plugins/parsers/influx/machine.go:15571
+ st_case_453:
+//line plugins/parsers/influx/machine.go:15819
switch ( m.data)[( m.p)] {
- case 9:
- goto tr682
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr683
- case 12:
- goto tr547
+ goto tr698
case 13:
- goto st104
+ goto st102
case 32:
- goto tr682
+ goto tr697
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr158
+ goto tr156
case 45:
- goto tr684
+ goto tr699
case 61:
goto st6
case 92:
- goto tr205
+ goto tr203
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr685
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr700
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr697
}
- goto tr202
-tr683:
- ( m.cs) = 444
-//line plugins/parsers/influx/machine.go.rl:90
+ goto tr200
+tr698:
+ ( m.cs) = 454
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st444:
+ st454:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof444
+ goto _test_eof454
}
- st_case_444:
-//line plugins/parsers/influx/machine.go:15622
+ st_case_454:
+//line plugins/parsers/influx/machine.go:15871
switch ( m.data)[( m.p)] {
- case 9:
- goto tr682
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr683
- case 12:
- goto tr547
+ goto tr698
case 13:
- goto st104
+ goto st102
case 32:
- goto tr682
+ goto tr697
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr158
+ goto tr156
case 45:
- goto tr684
+ goto tr699
case 61:
- goto tr165
+ goto tr163
case 92:
- goto tr205
+ goto tr203
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr685
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr700
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr697
}
- goto tr202
-tr684:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr200
+tr699:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st107
- st107:
+ goto st105
+ st105:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof107
+ goto _test_eof105
}
- st_case_107:
-//line plugins/parsers/influx/machine.go:15662
+ st_case_105:
+//line plugins/parsers/influx/machine.go:15912
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr207
- case 12:
- goto tr60
+ goto tr205
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st445
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st455
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr153
}
- goto st67
-tr685:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st65
+tr700:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st445
- st445:
+ goto st455
+ st455:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof445
+ goto _test_eof455
}
- st_case_445:
-//line plugins/parsers/influx/machine.go:15700
+ st_case_455:
+//line plugins/parsers/influx/machine.go:15951
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st449
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st459
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
-tr848:
- ( m.cs) = 446
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st65
+tr861:
+ ( m.cs) = 456
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr691:
- ( m.cs) = 446
-//line plugins/parsers/influx/machine.go.rl:90
+tr706:
+ ( m.cs) = 456
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr845:
- ( m.cs) = 446
-//line plugins/parsers/influx/machine.go.rl:77
+tr858:
+ ( m.cs) = 456
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr686:
- ( m.cs) = 446
-//line plugins/parsers/influx/machine.go.rl:90
+tr701:
+ ( m.cs) = 456
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st446:
+ st456:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof446
+ goto _test_eof456
}
- st_case_446:
-//line plugins/parsers/influx/machine.go:15804
+ st_case_456:
+//line plugins/parsers/influx/machine.go:16056
switch ( m.data)[( m.p)] {
- case 9:
- goto st446
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr690
- case 12:
- goto st294
+ goto tr705
case 13:
- goto st104
+ goto st102
case 32:
- goto st446
+ goto st456
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr163
+ goto tr161
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st456
}
- goto tr160
-tr690:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr158
+tr705:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st447
- st447:
+ goto st457
+ st457:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof447
+ goto _test_eof457
}
- st_case_447:
-//line plugins/parsers/influx/machine.go:15839
+ st_case_457:
+//line plugins/parsers/influx/machine.go:16090
switch ( m.data)[( m.p)] {
- case 9:
- goto st446
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr690
- case 12:
- goto st294
+ goto tr705
case 13:
- goto st104
+ goto st102
case 32:
- goto st446
+ goto st456
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 61:
- goto tr165
- case 92:
goto tr163
+ case 92:
+ goto tr161
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st456
}
- goto tr160
-tr692:
- ( m.cs) = 448
-//line plugins/parsers/influx/machine.go.rl:90
+ goto tr158
+tr707:
+ ( m.cs) = 458
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr687:
- ( m.cs) = 448
-//line plugins/parsers/influx/machine.go.rl:90
+tr702:
+ ( m.cs) = 458
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st448:
+ st458:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof448
+ goto _test_eof458
}
- st_case_448:
-//line plugins/parsers/influx/machine.go:15908
+ st_case_458:
+//line plugins/parsers/influx/machine.go:16158
switch ( m.data)[( m.p)] {
- case 9:
- goto tr691
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr692
- case 12:
- goto tr556
+ goto tr707
case 13:
- goto st104
+ goto st102
case 32:
- goto tr691
+ goto tr706
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto tr205
+ goto tr203
}
- goto tr202
- st449:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr706
+ }
+ goto tr200
+ st459:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof449
+ goto _test_eof459
}
- st_case_449:
+ st_case_459:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st450
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st460
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st450:
+ goto st65
+ st460:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof450
+ goto _test_eof460
}
- st_case_450:
+ st_case_460:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st451
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st461
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st451:
+ goto st65
+ st461:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof451
+ goto _test_eof461
}
- st_case_451:
+ st_case_461:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st452
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st462
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st452:
+ goto st65
+ st462:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof452
+ goto _test_eof462
}
- st_case_452:
+ st_case_462:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st453
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st463
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st453:
+ goto st65
+ st463:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof453
+ goto _test_eof463
}
- st_case_453:
+ st_case_463:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st454
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st464
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st454:
+ goto st65
+ st464:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof454
+ goto _test_eof464
}
- st_case_454:
+ st_case_464:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st455
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st465
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st455:
+ goto st65
+ st465:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof455
+ goto _test_eof465
}
- st_case_455:
+ st_case_465:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st456
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st466
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st456:
+ goto st65
+ st466:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof456
+ goto _test_eof466
}
- st_case_456:
+ st_case_466:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st457
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st467
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st457:
+ goto st65
+ st467:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof457
+ goto _test_eof467
}
- st_case_457:
+ st_case_467:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st458
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st468
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st458:
+ goto st65
+ st468:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof458
+ goto _test_eof468
}
- st_case_458:
+ st_case_468:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st459
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st469
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st459:
+ goto st65
+ st469:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof459
+ goto _test_eof469
}
- st_case_459:
+ st_case_469:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st460
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st470
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st460:
+ goto st65
+ st470:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof460
+ goto _test_eof470
}
- st_case_460:
+ st_case_470:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st461
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st471
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st461:
+ goto st65
+ st471:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof461
+ goto _test_eof471
}
- st_case_461:
+ st_case_471:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st462
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st472
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st462:
+ goto st65
+ st472:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof462
+ goto _test_eof472
}
- st_case_462:
+ st_case_472:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st463
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st473
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st463:
+ goto st65
+ st473:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof463
+ goto _test_eof473
}
- st_case_463:
+ st_case_473:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st464
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st474
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st464:
+ goto st65
+ st474:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof464
+ goto _test_eof474
}
- st_case_464:
+ st_case_474:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st465
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st475
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st465:
+ goto st65
+ st475:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof465
+ goto _test_eof475
}
- st_case_465:
+ st_case_475:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st466
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st476
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr701
}
- goto st67
- st466:
+ goto st65
+ st476:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof466
+ goto _test_eof476
}
- st_case_466:
+ st_case_476:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr686
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr687
- case 12:
- goto tr553
+ goto tr702
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr686
+ goto tr701
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr158
+ goto tr156
case 61:
- goto tr165
+ goto tr163
case 92:
- goto st69
+ goto st67
}
- goto st67
-tr653:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:90
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr701
+ }
+ goto st65
+tr668:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr839:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:77
+tr852:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr713:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:90
+tr727:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr726:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:90
+tr740:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr733:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:90
+tr746:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr740:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:90
+tr752:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr871:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:77
+tr884:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr875:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:77
+tr888:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr879:
- ( m.cs) = 108
-//line plugins/parsers/influx/machine.go.rl:77
+tr893:
+ ( m.cs) = 106
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st108:
+ st106:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof108
+ goto _test_eof106
}
- st_case_108:
-//line plugins/parsers/influx/machine.go:16693
+ st_case_106:
+//line plugins/parsers/influx/machine.go:16958
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr258
+ goto tr256
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr279
+ goto tr277
}
- goto tr278
-tr278:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto tr276
+tr276:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st109
- st109:
+ goto st107
+ st107:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof109
+ goto _test_eof107
}
- st_case_109:
-//line plugins/parsers/influx/machine.go:16726
+ st_case_107:
+//line plugins/parsers/influx/machine.go:16990
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr261
+ goto tr259
case 44:
goto st6
case 61:
- goto tr281
+ goto tr279
case 92:
- goto st123
+ goto st121
}
- goto st109
-tr281:
-//line plugins/parsers/influx/machine.go.rl:86
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st107
+tr279:
+//line plugins/parsers/influx/machine.go.rl:95
- key = m.text()
+ m.key = m.text()
-//line plugins/parsers/influx/machine.go.rl:99
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st110
- st110:
+ goto st108
+ st108:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof110
+ goto _test_eof108
}
- st_case_110:
-//line plugins/parsers/influx/machine.go:16763
+ st_case_108:
+//line plugins/parsers/influx/machine.go:17026
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr266
+ goto tr264
case 44:
goto st6
case 45:
- goto tr283
+ goto tr281
case 46:
- goto tr284
+ goto tr282
case 48:
- goto tr285
+ goto tr283
case 61:
goto st6
case 70:
- goto tr287
+ goto tr285
case 84:
- goto tr288
+ goto tr286
case 92:
- goto tr153
+ goto tr151
case 102:
- goto tr289
+ goto tr287
case 116:
- goto tr290
+ goto tr288
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr286
+ switch {
+ case ( m.data)[( m.p)] > 13:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr284
+ }
+ case ( m.data)[( m.p)] >= 12:
+ goto st6
}
- goto tr148
-tr283:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr146
+tr281:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st111
- st111:
+ goto st109
+ st109:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof111
+ goto _test_eof109
}
- st_case_111:
-//line plugins/parsers/influx/machine.go:16813
+ st_case_109:
+//line plugins/parsers/influx/machine.go:17077
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 46:
- goto st112
+ goto st110
case 48:
- goto st471
+ goto st481
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st474
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st484
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr153
}
- goto st49
-tr284:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st47
+tr282:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st112
- st112:
+ goto st110
+ st110:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof112
+ goto _test_eof110
}
- st_case_112:
-//line plugins/parsers/influx/machine.go:16855
+ st_case_110:
+//line plugins/parsers/influx/machine.go:17120
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st467
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st477
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr153
}
- goto st49
- st467:
+ goto st47
+ st477:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof467
+ goto _test_eof477
}
- st_case_467:
+ st_case_477:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr710
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr711
- case 12:
- goto tr712
+ goto tr726
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr710
+ goto tr725
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr713
+ goto tr727
case 61:
goto st6
case 69:
- goto st113
+ goto st111
case 92:
- goto st64
+ goto st62
case 101:
- goto st113
+ goto st111
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st467
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st477
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr725
}
- goto st49
- st113:
+ goto st47
+ st111:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof113
+ goto _test_eof111
}
- st_case_113:
+ st_case_111:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr295
+ goto tr293
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
switch {
+ case ( m.data)[( m.p)] < 43:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
case ( m.data)[( m.p)] > 45:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st470
+ goto st480
}
- case ( m.data)[( m.p)] >= 43:
- goto st114
+ default:
+ goto st112
}
- goto st49
-tr295:
- ( m.cs) = 468
-//line plugins/parsers/influx/machine.go.rl:139
+ goto st47
+tr293:
+ ( m.cs) = 478
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st468:
+ st478:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof468
+ goto _test_eof478
}
- st_case_468:
-//line plugins/parsers/influx/machine.go:16971
+ st_case_478:
+//line plugins/parsers/influx/machine.go:17238
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr548
+ goto tr564
case 13:
- goto st34
+ goto st32
case 32:
- goto tr547
+ goto tr563
case 44:
- goto tr549
+ goto tr565
case 61:
- goto tr132
+ goto tr130
case 92:
- goto st23
+ goto st21
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st469
+ goto st479
}
case ( m.data)[( m.p)] >= 9:
- goto tr547
+ goto tr563
}
- goto st17
- st469:
+ goto st15
+ st479:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof469
+ goto _test_eof479
}
- st_case_469:
+ st_case_479:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 11:
- goto tr716
+ goto tr731
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr712
+ goto tr729
case 44:
- goto tr718
+ goto tr733
case 61:
- goto tr132
+ goto tr130
case 92:
- goto st23
+ goto st21
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st469
+ goto st479
}
case ( m.data)[( m.p)] >= 9:
- goto tr712
+ goto tr729
}
- goto st17
- st114:
+ goto st15
+ st112:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof114
+ goto _test_eof112
}
- st_case_114:
+ st_case_112:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st470
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st480
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr153
}
- goto st49
- st470:
+ goto st47
+ st480:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof470
+ goto _test_eof480
}
- st_case_470:
+ st_case_480:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr710
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr711
- case 12:
- goto tr712
+ goto tr726
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr710
+ goto tr725
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr713
+ goto tr727
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st470
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st480
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr725
}
- goto st49
- st471:
+ goto st47
+ st481:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof471
+ goto _test_eof481
}
- st_case_471:
+ st_case_481:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr710
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr711
- case 12:
- goto tr712
+ goto tr726
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr710
+ goto tr725
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr713
+ goto tr727
case 46:
- goto st467
+ goto st477
case 61:
goto st6
case 69:
- goto st113
+ goto st111
case 92:
- goto st64
+ goto st62
case 101:
- goto st113
+ goto st111
case 105:
- goto st473
+ goto st483
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st472
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st482
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr725
}
- goto st49
- st472:
+ goto st47
+ st482:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof472
+ goto _test_eof482
}
- st_case_472:
+ st_case_482:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr710
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr711
- case 12:
- goto tr712
+ goto tr726
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr710
+ goto tr725
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr713
+ goto tr727
case 46:
- goto st467
+ goto st477
case 61:
goto st6
case 69:
- goto st113
+ goto st111
case 92:
- goto st64
+ goto st62
case 101:
- goto st113
+ goto st111
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st472
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st482
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr725
}
- goto st49
- st473:
+ goto st47
+ st483:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof473
+ goto _test_eof483
}
- st_case_473:
+ st_case_483:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr721
case 10:
- goto tr722
+ goto tr737
case 11:
- goto tr723
- case 12:
- goto tr724
+ goto tr738
case 13:
- goto tr725
+ goto tr739
case 32:
- goto tr721
+ goto tr736
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr726
+ goto tr740
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- goto st49
- st474:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr736
+ }
+ goto st47
+ st484:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof474
+ goto _test_eof484
}
- st_case_474:
+ st_case_484:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr710
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr711
- case 12:
- goto tr712
+ goto tr726
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr710
+ goto tr725
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr713
+ goto tr727
case 46:
- goto st467
+ goto st477
case 61:
goto st6
case 69:
- goto st113
+ goto st111
case 92:
- goto st64
+ goto st62
case 101:
- goto st113
+ goto st111
case 105:
- goto st473
+ goto st483
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st474
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st484
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr725
}
- goto st49
-tr285:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st47
+tr283:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st475
- st475:
+ goto st485
+ st485:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof475
+ goto _test_eof485
}
- st_case_475:
-//line plugins/parsers/influx/machine.go:17243
+ st_case_485:
+//line plugins/parsers/influx/machine.go:17514
switch ( m.data)[( m.p)] {
- case 9:
- goto tr710
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr711
- case 12:
- goto tr712
+ goto tr726
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr710
+ goto tr725
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr713
+ goto tr727
case 46:
- goto st467
+ goto st477
case 61:
goto st6
case 69:
- goto st113
+ goto st111
case 92:
- goto st64
+ goto st62
case 101:
- goto st113
+ goto st111
case 105:
- goto st473
+ goto st483
case 117:
- goto st476
+ goto st486
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st472
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st482
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr725
}
- goto st49
- st476:
+ goto st47
+ st486:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof476
+ goto _test_eof486
}
- st_case_476:
+ st_case_486:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr728
case 10:
- goto tr729
+ goto tr743
case 11:
- goto tr730
- case 12:
- goto tr731
+ goto tr744
case 13:
- goto tr732
+ goto tr745
case 32:
- goto tr728
+ goto tr742
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr733
+ goto tr746
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- goto st49
-tr286:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr742
+ }
+ goto st47
+tr284:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st477
- st477:
+ goto st487
+ st487:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof477
+ goto _test_eof487
}
- st_case_477:
-//line plugins/parsers/influx/machine.go:17319
+ st_case_487:
+//line plugins/parsers/influx/machine.go:17590
switch ( m.data)[( m.p)] {
- case 9:
- goto tr710
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr711
- case 12:
- goto tr712
+ goto tr726
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr710
+ goto tr725
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr713
+ goto tr727
case 46:
- goto st467
+ goto st477
case 61:
goto st6
case 69:
- goto st113
+ goto st111
case 92:
- goto st64
+ goto st62
case 101:
- goto st113
+ goto st111
case 105:
- goto st473
+ goto st483
case 117:
- goto st476
+ goto st486
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st477
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st487
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr725
}
- goto st49
-tr287:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st47
+tr285:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st478
- st478:
+ goto st488
+ st488:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof478
+ goto _test_eof488
}
- st_case_478:
-//line plugins/parsers/influx/machine.go:17367
+ st_case_488:
+//line plugins/parsers/influx/machine.go:17639
switch ( m.data)[( m.p)] {
- case 9:
- goto tr735
case 10:
- goto tr736
+ goto tr749
case 11:
- goto tr737
- case 12:
- goto tr738
+ goto tr750
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr735
+ goto tr748
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr740
+ goto tr752
case 61:
goto st6
case 65:
- goto st115
+ goto st113
case 92:
- goto st64
+ goto st62
case 97:
- goto st118
+ goto st116
}
- goto st49
- st115:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr748
+ }
+ goto st47
+ st113:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof115
+ goto _test_eof113
}
- st_case_115:
+ st_case_113:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 76:
- goto st116
+ goto st114
case 92:
- goto st64
+ goto st62
}
- goto st49
- st116:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+ st114:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof116
+ goto _test_eof114
}
- st_case_116:
+ st_case_114:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 83:
- goto st117
+ goto st115
case 92:
- goto st64
+ goto st62
}
- goto st49
- st117:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+ st115:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof117
+ goto _test_eof115
}
- st_case_117:
+ st_case_115:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 69:
- goto st479
+ goto st489
case 92:
- goto st64
+ goto st62
}
- goto st49
- st479:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+ st489:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof479
+ goto _test_eof489
}
- st_case_479:
+ st_case_489:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr735
case 10:
- goto tr736
+ goto tr749
case 11:
- goto tr737
- case 12:
- goto tr738
+ goto tr750
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr735
+ goto tr748
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr740
+ goto tr752
case 61:
goto st6
case 92:
- goto st64
+ goto st62
}
- goto st49
- st118:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr748
+ }
+ goto st47
+ st116:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof118
+ goto _test_eof116
}
- st_case_118:
+ st_case_116:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
case 108:
- goto st119
+ goto st117
}
- goto st49
- st119:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+ st117:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof119
+ goto _test_eof117
}
- st_case_119:
+ st_case_117:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
case 115:
- goto st120
+ goto st118
}
- goto st49
- st120:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+ st118:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof120
+ goto _test_eof118
}
- st_case_120:
+ st_case_118:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
case 101:
- goto st479
+ goto st489
}
- goto st49
-tr288:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+tr286:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st480
- st480:
+ goto st490
+ st490:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof480
+ goto _test_eof490
}
- st_case_480:
-//line plugins/parsers/influx/machine.go:17614
+ st_case_490:
+//line plugins/parsers/influx/machine.go:17878
switch ( m.data)[( m.p)] {
- case 9:
- goto tr735
case 10:
- goto tr736
+ goto tr749
case 11:
- goto tr737
- case 12:
- goto tr738
+ goto tr750
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr735
+ goto tr748
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr740
+ goto tr752
case 61:
goto st6
case 82:
- goto st121
+ goto st119
case 92:
- goto st64
+ goto st62
case 114:
- goto st122
+ goto st120
}
- goto st49
- st121:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr748
+ }
+ goto st47
+ st119:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof121
+ goto _test_eof119
}
- st_case_121:
+ st_case_119:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 85:
- goto st117
+ goto st115
case 92:
- goto st64
+ goto st62
}
- goto st49
- st122:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+ st120:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof122
+ goto _test_eof120
}
- st_case_122:
+ st_case_120:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr155
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr156
- case 12:
- goto tr60
+ goto tr154
case 13:
- goto st8
+ goto st6
case 32:
- goto tr155
+ goto tr153
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr158
+ goto tr156
case 61:
goto st6
case 92:
- goto st64
+ goto st62
case 117:
- goto st120
+ goto st118
}
- goto st49
-tr289:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr153
+ }
+ goto st47
+tr287:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st481
- st481:
+ goto st491
+ st491:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof481
+ goto _test_eof491
}
- st_case_481:
-//line plugins/parsers/influx/machine.go:17713
+ st_case_491:
+//line plugins/parsers/influx/machine.go:17974
switch ( m.data)[( m.p)] {
- case 9:
- goto tr735
case 10:
- goto tr736
+ goto tr749
case 11:
- goto tr737
- case 12:
- goto tr738
+ goto tr750
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr735
+ goto tr748
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr740
+ goto tr752
case 61:
goto st6
case 92:
- goto st64
+ goto st62
case 97:
- goto st118
+ goto st116
}
- goto st49
-tr290:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr748
+ }
+ goto st47
+tr288:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st482
- st482:
+ goto st492
+ st492:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof482
+ goto _test_eof492
}
- st_case_482:
-//line plugins/parsers/influx/machine.go:17750
+ st_case_492:
+//line plugins/parsers/influx/machine.go:18010
switch ( m.data)[( m.p)] {
- case 9:
- goto tr735
case 10:
- goto tr736
+ goto tr749
case 11:
- goto tr737
- case 12:
- goto tr738
+ goto tr750
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr735
+ goto tr748
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr740
+ goto tr752
case 61:
goto st6
case 92:
- goto st64
+ goto st62
case 114:
- goto st122
+ goto st120
}
- goto st49
-tr279:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr748
+ }
+ goto st47
+tr277:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st123
- st123:
+ goto st121
+ st121:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof123
+ goto _test_eof121
}
- st_case_123:
-//line plugins/parsers/influx/machine.go:17787
+ st_case_121:
+//line plugins/parsers/influx/machine.go:18046
switch ( m.data)[( m.p)] {
case 34:
- goto st109
+ goto st107
case 92:
- goto st124
+ goto st122
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st46
- st124:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st44
+ st122:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof124
+ goto _test_eof122
}
- st_case_124:
-//line plugins/parsers/influx/machine.go:17811
+ st_case_122:
+//line plugins/parsers/influx/machine.go:18070
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr261
+ goto tr259
case 44:
goto st6
case 61:
- goto tr281
+ goto tr279
case 92:
- goto st123
+ goto st121
}
- goto st109
-tr267:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st107
+tr265:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st125
- st125:
+ goto st123
+ st123:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof125
+ goto _test_eof123
}
- st_case_125:
-//line plugins/parsers/influx/machine.go:17844
+ st_case_123:
+//line plugins/parsers/influx/machine.go:18102
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 46:
- goto st126
+ goto st124
case 48:
- goto st507
+ goto st517
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st510
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st520
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr229
}
- goto st81
-tr268:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st79
+tr266:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st126
- st126:
+ goto st124
+ st124:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof126
+ goto _test_eof124
}
- st_case_126:
-//line plugins/parsers/influx/machine.go:17886
+ st_case_124:
+//line plugins/parsers/influx/machine.go:18145
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st483
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st493
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr229
}
- goto st81
- st483:
+ goto st79
+ st493:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof483
+ goto _test_eof493
}
- st_case_483:
+ st_case_493:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr745
case 10:
- goto tr620
+ goto tr758
case 11:
- goto tr746
- case 12:
- goto tr712
+ goto tr759
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr745
+ goto tr757
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr747
+ goto tr760
case 61:
goto st6
case 69:
- goto st128
+ goto st126
case 92:
- goto st87
+ goto st85
case 101:
- goto st128
+ goto st126
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st483
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st493
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr757
}
- goto st81
-tr746:
- ( m.cs) = 484
-//line plugins/parsers/influx/machine.go.rl:90
+ goto st79
+tr759:
+ ( m.cs) = 494
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr779:
- ( m.cs) = 484
-//line plugins/parsers/influx/machine.go.rl:90
+tr792:
+ ( m.cs) = 494
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr785:
- ( m.cs) = 484
-//line plugins/parsers/influx/machine.go.rl:90
+tr798:
+ ( m.cs) = 494
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr791:
- ( m.cs) = 484
-//line plugins/parsers/influx/machine.go.rl:90
+tr804:
+ ( m.cs) = 494
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st484:
+ st494:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof484
+ goto _test_eof494
}
- st_case_484:
-//line plugins/parsers/influx/machine.go:18045
+ st_case_494:
+//line plugins/parsers/influx/machine.go:18306
switch ( m.data)[( m.p)] {
- case 9:
- goto tr749
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr750
- case 12:
- goto tr547
+ goto tr763
case 13:
- goto st74
+ goto st72
case 32:
- goto tr749
+ goto tr762
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr233
+ goto tr231
case 45:
- goto tr751
+ goto tr764
case 61:
goto st6
case 92:
- goto tr237
+ goto tr235
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr752
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr765
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr762
}
- goto tr235
-tr750:
- ( m.cs) = 485
-//line plugins/parsers/influx/machine.go.rl:90
+ goto tr233
+tr763:
+ ( m.cs) = 495
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st485:
+ st495:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof485
+ goto _test_eof495
}
- st_case_485:
-//line plugins/parsers/influx/machine.go:18096
+ st_case_495:
+//line plugins/parsers/influx/machine.go:18358
switch ( m.data)[( m.p)] {
- case 9:
- goto tr749
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr750
- case 12:
- goto tr547
+ goto tr763
case 13:
- goto st74
+ goto st72
case 32:
- goto tr749
+ goto tr762
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr233
+ goto tr231
case 45:
- goto tr751
+ goto tr764
case 61:
- goto tr101
+ goto tr99
case 92:
- goto tr237
+ goto tr235
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr752
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr765
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr762
}
- goto tr235
-tr751:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr233
+tr764:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st127
- st127:
+ goto st125
+ st125:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof127
+ goto _test_eof125
}
- st_case_127:
-//line plugins/parsers/influx/machine.go:18136
+ st_case_125:
+//line plugins/parsers/influx/machine.go:18399
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr239
- case 12:
- goto tr60
+ goto tr237
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st486
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st496
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr229
}
- goto st83
-tr752:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st81
+tr765:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st486
- st486:
+ goto st496
+ st496:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof486
+ goto _test_eof496
}
- st_case_486:
-//line plugins/parsers/influx/machine.go:18174
+ st_case_496:
+//line plugins/parsers/influx/machine.go:18438
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st488
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st498
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
-tr757:
- ( m.cs) = 487
-//line plugins/parsers/influx/machine.go.rl:90
+ goto st81
+tr770:
+ ( m.cs) = 497
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr754:
- ( m.cs) = 487
-//line plugins/parsers/influx/machine.go.rl:90
+tr767:
+ ( m.cs) = 497
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st487:
+ st497:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof487
+ goto _test_eof497
}
- st_case_487:
-//line plugins/parsers/influx/machine.go:18246
+ st_case_497:
+//line plugins/parsers/influx/machine.go:18511
switch ( m.data)[( m.p)] {
- case 9:
- goto tr756
case 10:
- goto st288
+ goto tr219
case 11:
- goto tr757
- case 12:
- goto tr556
+ goto tr770
case 13:
- goto st74
+ goto st72
case 32:
- goto tr756
+ goto tr769
case 34:
- goto tr204
+ goto tr202
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto tr237
+ goto tr235
}
- goto tr235
- st488:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr769
+ }
+ goto tr233
+ st498:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof488
+ goto _test_eof498
}
- st_case_488:
+ st_case_498:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st489
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st499
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st489:
+ goto st81
+ st499:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof489
+ goto _test_eof499
}
- st_case_489:
+ st_case_499:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st490
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st500
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st490:
+ goto st81
+ st500:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof490
+ goto _test_eof500
}
- st_case_490:
+ st_case_500:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st491
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st501
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st491:
+ goto st81
+ st501:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof491
+ goto _test_eof501
}
- st_case_491:
+ st_case_501:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st492
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st502
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st492:
+ goto st81
+ st502:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof492
+ goto _test_eof502
}
- st_case_492:
+ st_case_502:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st493
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st503
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st493:
+ goto st81
+ st503:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof493
+ goto _test_eof503
}
- st_case_493:
+ st_case_503:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st494
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st504
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st494:
+ goto st81
+ st504:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof494
+ goto _test_eof504
}
- st_case_494:
+ st_case_504:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st495
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st505
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st495:
+ goto st81
+ st505:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof495
+ goto _test_eof505
}
- st_case_495:
+ st_case_505:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st496
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st506
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st496:
+ goto st81
+ st506:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof496
+ goto _test_eof506
}
- st_case_496:
+ st_case_506:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st497
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st507
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st497:
+ goto st81
+ st507:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof497
+ goto _test_eof507
}
- st_case_497:
+ st_case_507:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st498
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st508
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st498:
+ goto st81
+ st508:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof498
+ goto _test_eof508
}
- st_case_498:
+ st_case_508:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st499
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st509
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st499:
+ goto st81
+ st509:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof499
+ goto _test_eof509
}
- st_case_499:
+ st_case_509:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st500
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st510
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st500:
+ goto st81
+ st510:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof500
+ goto _test_eof510
}
- st_case_500:
+ st_case_510:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st501
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st511
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st501:
+ goto st81
+ st511:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof501
+ goto _test_eof511
}
- st_case_501:
+ st_case_511:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st502
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st512
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st502:
+ goto st81
+ st512:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof502
+ goto _test_eof512
}
- st_case_502:
+ st_case_512:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st503
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st513
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st503:
+ goto st81
+ st513:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof503
+ goto _test_eof513
}
- st_case_503:
+ st_case_513:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st504
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st514
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st504:
+ goto st81
+ st514:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof504
+ goto _test_eof514
}
- st_case_504:
+ st_case_514:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st505
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st515
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr766
}
- goto st83
- st505:
+ goto st81
+ st515:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof505
+ goto _test_eof515
}
- st_case_505:
+ st_case_515:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr753
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr754
- case 12:
- goto tr553
+ goto tr767
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr753
+ goto tr766
case 34:
- goto tr208
+ goto tr206
case 44:
- goto tr233
+ goto tr231
case 61:
- goto tr101
+ goto tr99
case 92:
- goto st85
+ goto st83
}
- goto st83
- st128:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr766
+ }
+ goto st81
+ st126:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof128
+ goto _test_eof126
}
- st_case_128:
+ st_case_126:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr295
+ goto tr293
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
switch {
+ case ( m.data)[( m.p)] < 43:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
case ( m.data)[( m.p)] > 45:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st506
+ goto st516
}
- case ( m.data)[( m.p)] >= 43:
- goto st129
+ default:
+ goto st127
}
- goto st81
- st129:
+ goto st79
+ st127:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof129
+ goto _test_eof127
}
- st_case_129:
+ st_case_127:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st506
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st516
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr229
}
- goto st81
- st506:
+ goto st79
+ st516:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof506
+ goto _test_eof516
}
- st_case_506:
+ st_case_516:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr745
case 10:
- goto tr620
+ goto tr758
case 11:
- goto tr746
- case 12:
- goto tr712
+ goto tr759
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr745
+ goto tr757
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr747
+ goto tr760
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st506
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st516
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr757
}
- goto st81
- st507:
+ goto st79
+ st517:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof507
+ goto _test_eof517
}
- st_case_507:
+ st_case_517:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr745
case 10:
- goto tr620
+ goto tr758
case 11:
- goto tr746
- case 12:
- goto tr712
+ goto tr759
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr745
+ goto tr757
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr747
+ goto tr760
case 46:
- goto st483
+ goto st493
case 61:
goto st6
case 69:
- goto st128
+ goto st126
case 92:
- goto st87
+ goto st85
case 101:
- goto st128
+ goto st126
case 105:
- goto st509
+ goto st519
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st508
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st518
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr757
}
- goto st81
- st508:
+ goto st79
+ st518:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof508
+ goto _test_eof518
}
- st_case_508:
+ st_case_518:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr745
case 10:
- goto tr620
+ goto tr758
case 11:
- goto tr746
- case 12:
- goto tr712
+ goto tr759
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr745
+ goto tr757
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr747
+ goto tr760
case 46:
- goto st483
+ goto st493
case 61:
goto st6
case 69:
- goto st128
+ goto st126
case 92:
- goto st87
+ goto st85
case 101:
- goto st128
+ goto st126
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st508
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st518
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr757
}
- goto st81
- st509:
+ goto st79
+ st519:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof509
+ goto _test_eof519
}
- st_case_509:
+ st_case_519:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr777
case 10:
- goto tr778
+ goto tr791
case 11:
- goto tr779
- case 12:
- goto tr724
+ goto tr792
case 13:
- goto tr780
+ goto tr793
case 32:
- goto tr777
+ goto tr790
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr781
+ goto tr794
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- goto st81
- st510:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr790
+ }
+ goto st79
+ st520:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof510
+ goto _test_eof520
}
- st_case_510:
+ st_case_520:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr745
case 10:
- goto tr620
+ goto tr758
case 11:
- goto tr746
- case 12:
- goto tr712
+ goto tr759
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr745
+ goto tr757
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr747
+ goto tr760
case 46:
- goto st483
+ goto st493
case 61:
goto st6
case 69:
- goto st128
+ goto st126
case 92:
- goto st87
+ goto st85
case 101:
- goto st128
+ goto st126
case 105:
- goto st509
+ goto st519
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st510
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st520
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr757
}
- goto st81
-tr269:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st79
+tr267:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st511
- st511:
+ goto st521
+ st521:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof511
+ goto _test_eof521
}
- st_case_511:
-//line plugins/parsers/influx/machine.go:19077
+ st_case_521:
+//line plugins/parsers/influx/machine.go:19361
switch ( m.data)[( m.p)] {
- case 9:
- goto tr745
case 10:
- goto tr620
+ goto tr758
case 11:
- goto tr746
- case 12:
- goto tr712
+ goto tr759
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr745
+ goto tr757
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr747
+ goto tr760
case 46:
- goto st483
+ goto st493
case 61:
goto st6
case 69:
- goto st128
+ goto st126
case 92:
- goto st87
+ goto st85
case 101:
- goto st128
+ goto st126
case 105:
- goto st509
+ goto st519
case 117:
- goto st512
+ goto st522
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st508
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st518
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr757
}
- goto st81
- st512:
+ goto st79
+ st522:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof512
+ goto _test_eof522
}
- st_case_512:
+ st_case_522:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr783
case 10:
- goto tr784
+ goto tr797
case 11:
- goto tr785
- case 12:
- goto tr731
+ goto tr798
case 13:
- goto tr786
+ goto tr799
case 32:
- goto tr783
+ goto tr796
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr787
+ goto tr800
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- goto st81
-tr270:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr796
+ }
+ goto st79
+tr268:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st513
- st513:
+ goto st523
+ st523:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof513
+ goto _test_eof523
}
- st_case_513:
-//line plugins/parsers/influx/machine.go:19153
+ st_case_523:
+//line plugins/parsers/influx/machine.go:19437
switch ( m.data)[( m.p)] {
- case 9:
- goto tr745
case 10:
- goto tr620
+ goto tr758
case 11:
- goto tr746
- case 12:
- goto tr712
+ goto tr759
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr745
+ goto tr757
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr747
+ goto tr760
case 46:
- goto st483
+ goto st493
case 61:
goto st6
case 69:
- goto st128
+ goto st126
case 92:
- goto st87
+ goto st85
case 101:
- goto st128
+ goto st126
case 105:
- goto st509
+ goto st519
case 117:
- goto st512
+ goto st522
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st513
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st523
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr757
}
- goto st81
-tr271:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st79
+tr269:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st514
- st514:
+ goto st524
+ st524:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof514
+ goto _test_eof524
}
- st_case_514:
-//line plugins/parsers/influx/machine.go:19201
+ st_case_524:
+//line plugins/parsers/influx/machine.go:19486
switch ( m.data)[( m.p)] {
- case 9:
- goto tr789
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr791
- case 12:
- goto tr738
+ goto tr804
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr789
+ goto tr802
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr793
+ goto tr806
case 61:
goto st6
case 65:
- goto st130
+ goto st128
case 92:
- goto st87
+ goto st85
case 97:
- goto st133
+ goto st131
}
- goto st81
- st130:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr802
+ }
+ goto st79
+ st128:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof130
+ goto _test_eof128
}
- st_case_130:
+ st_case_128:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 76:
- goto st131
+ goto st129
case 92:
- goto st87
+ goto st85
}
- goto st81
- st131:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+ st129:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof131
+ goto _test_eof129
}
- st_case_131:
+ st_case_129:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 83:
- goto st132
+ goto st130
case 92:
- goto st87
+ goto st85
}
- goto st81
- st132:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+ st130:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof132
+ goto _test_eof130
}
- st_case_132:
+ st_case_130:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 69:
- goto st515
+ goto st525
case 92:
- goto st87
+ goto st85
}
- goto st81
- st515:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+ st525:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof515
+ goto _test_eof525
}
- st_case_515:
+ st_case_525:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr789
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr791
- case 12:
- goto tr738
+ goto tr804
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr789
+ goto tr802
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr793
+ goto tr806
case 61:
goto st6
case 92:
- goto st87
+ goto st85
}
- goto st81
- st133:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr802
+ }
+ goto st79
+ st131:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof133
+ goto _test_eof131
}
- st_case_133:
+ st_case_131:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
case 108:
- goto st134
+ goto st132
}
- goto st81
- st134:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+ st132:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof134
+ goto _test_eof132
}
- st_case_134:
+ st_case_132:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
case 115:
- goto st135
+ goto st133
}
- goto st81
- st135:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+ st133:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof135
+ goto _test_eof133
}
- st_case_135:
+ st_case_133:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
case 101:
- goto st515
+ goto st525
}
- goto st81
-tr272:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+tr270:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st516
- st516:
+ goto st526
+ st526:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof516
+ goto _test_eof526
}
- st_case_516:
-//line plugins/parsers/influx/machine.go:19448
+ st_case_526:
+//line plugins/parsers/influx/machine.go:19725
switch ( m.data)[( m.p)] {
- case 9:
- goto tr789
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr791
- case 12:
- goto tr738
+ goto tr804
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr789
+ goto tr802
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr793
+ goto tr806
case 61:
goto st6
case 82:
- goto st136
+ goto st134
case 92:
- goto st87
+ goto st85
case 114:
- goto st137
+ goto st135
}
- goto st81
- st136:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr802
+ }
+ goto st79
+ st134:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof136
+ goto _test_eof134
}
- st_case_136:
+ st_case_134:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 85:
- goto st132
+ goto st130
case 92:
- goto st87
+ goto st85
}
- goto st81
- st137:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+ st135:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof137
+ goto _test_eof135
}
- st_case_137:
+ st_case_135:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr231
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr232
- case 12:
- goto tr60
+ goto tr230
case 13:
- goto st8
+ goto st6
case 32:
- goto tr231
+ goto tr229
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr233
+ goto tr231
case 61:
goto st6
case 92:
- goto st87
+ goto st85
case 117:
- goto st135
+ goto st133
}
- goto st81
-tr273:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr229
+ }
+ goto st79
+tr271:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st517
- st517:
+ goto st527
+ st527:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof517
+ goto _test_eof527
}
- st_case_517:
-//line plugins/parsers/influx/machine.go:19547
+ st_case_527:
+//line plugins/parsers/influx/machine.go:19821
switch ( m.data)[( m.p)] {
- case 9:
- goto tr789
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr791
- case 12:
- goto tr738
+ goto tr804
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr789
+ goto tr802
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr793
+ goto tr806
case 61:
goto st6
case 92:
- goto st87
+ goto st85
case 97:
- goto st133
+ goto st131
}
- goto st81
-tr274:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr802
+ }
+ goto st79
+tr272:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st518
- st518:
+ goto st528
+ st528:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof518
+ goto _test_eof528
}
- st_case_518:
-//line plugins/parsers/influx/machine.go:19584
+ st_case_528:
+//line plugins/parsers/influx/machine.go:19857
switch ( m.data)[( m.p)] {
- case 9:
- goto tr789
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr791
- case 12:
- goto tr738
+ goto tr804
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr789
+ goto tr802
case 34:
- goto tr157
+ goto tr155
case 44:
- goto tr793
+ goto tr806
case 61:
goto st6
case 92:
- goto st87
+ goto st85
case 114:
- goto st137
+ goto st135
}
- goto st81
-tr259:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr802
+ }
+ goto st79
+tr257:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st138
- st138:
+ goto st136
+ st136:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof138
+ goto _test_eof136
}
- st_case_138:
-//line plugins/parsers/influx/machine.go:19621
+ st_case_136:
+//line plugins/parsers/influx/machine.go:19893
switch ( m.data)[( m.p)] {
case 34:
- goto st99
+ goto st97
case 92:
- goto st139
+ goto st137
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr47
+ goto tr45
}
case ( m.data)[( m.p)] >= 9:
- goto tr47
+ goto tr45
}
- goto st46
- st139:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st44
+ st137:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof139
+ goto _test_eof137
}
- st_case_139:
-//line plugins/parsers/influx/machine.go:19645
+ st_case_137:
+//line plugins/parsers/influx/machine.go:19917
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr47
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr261
+ goto tr259
case 44:
goto st6
case 61:
- goto tr262
+ goto tr260
case 92:
- goto st138
+ goto st136
}
- goto st99
- st140:
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st97
+ st138:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof140
+ goto _test_eof138
}
- st_case_140:
+ st_case_138:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr317
+ goto tr315
case 44:
- goto tr92
+ goto tr90
case 92:
- goto st142
+ goto st140
}
switch {
+ case ( m.data)[( m.p)] < 43:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
case ( m.data)[( m.p)] > 45:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st521
+ goto st531
}
- case ( m.data)[( m.p)] >= 43:
- goto st141
+ default:
+ goto st139
}
- goto st31
-tr317:
- ( m.cs) = 519
-//line plugins/parsers/influx/machine.go.rl:139
+ goto st29
+tr315:
+ ( m.cs) = 529
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st519:
+ st529:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof519
+ goto _test_eof529
}
- st_case_519:
-//line plugins/parsers/influx/machine.go:19719
+ st_case_529:
+//line plugins/parsers/influx/machine.go:19990
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 11:
- goto tr618
+ goto tr634
case 13:
- goto st34
+ goto st32
case 32:
- goto tr482
+ goto tr499
case 44:
- goto tr484
+ goto tr501
case 92:
- goto st96
+ goto st94
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st520
+ goto st530
}
case ( m.data)[( m.p)] >= 9:
- goto tr482
+ goto tr499
}
goto st1
- st520:
+ st530:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof520
+ goto _test_eof530
}
- st_case_520:
+ st_case_530:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 11:
- goto tr798
+ goto tr812
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr622
+ goto tr811
case 44:
- goto tr799
+ goto tr813
case 92:
- goto st96
+ goto st94
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st520
+ goto st530
}
case ( m.data)[( m.p)] >= 9:
- goto tr622
+ goto tr811
}
goto st1
- st141:
+ st139:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof141
+ goto _test_eof139
}
- st_case_141:
+ st_case_139:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 92:
- goto st142
+ goto st140
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st521
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st531
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr87
}
- goto st31
- st521:
+ goto st29
+ st531:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof521
+ goto _test_eof531
}
- st_case_521:
+ st_case_531:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr619
case 10:
- goto tr620
+ goto tr636
case 11:
- goto tr621
- case 12:
- goto tr622
+ goto tr637
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr619
+ goto tr635
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr624
+ goto tr639
case 92:
- goto st142
+ goto st140
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st521
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st531
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr635
}
- goto st31
-tr87:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st29
+tr85:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st142
- st142:
+ goto st140
+ st140:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof142
+ goto _test_eof140
}
- st_case_142:
-//line plugins/parsers/influx/machine.go:19840
+ st_case_140:
+//line plugins/parsers/influx/machine.go:20113
switch ( m.data)[( m.p)] {
case 34:
- goto st31
+ goto st29
case 92:
- goto st31
+ goto st29
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -19852,1656 +20125,1668 @@ tr87:
goto tr8
}
goto st1
- st522:
+ st532:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof522
+ goto _test_eof532
}
- st_case_522:
+ st_case_532:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr619
case 10:
- goto tr620
+ goto tr636
case 11:
- goto tr621
- case 12:
- goto tr622
+ goto tr637
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr619
+ goto tr635
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr624
+ goto tr639
case 46:
- goto st396
+ goto st406
case 69:
- goto st140
+ goto st138
case 92:
- goto st142
- case 101:
goto st140
+ case 101:
+ goto st138
case 105:
- goto st524
+ goto st534
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st523
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st533
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr635
}
- goto st31
- st523:
+ goto st29
+ st533:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof523
+ goto _test_eof533
}
- st_case_523:
+ st_case_533:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr619
case 10:
- goto tr620
+ goto tr636
case 11:
- goto tr621
- case 12:
- goto tr622
+ goto tr637
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr619
+ goto tr635
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr624
+ goto tr639
case 46:
- goto st396
+ goto st406
case 69:
- goto st140
+ goto st138
case 92:
- goto st142
- case 101:
goto st140
+ case 101:
+ goto st138
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st523
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st533
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr635
}
- goto st31
- st524:
+ goto st29
+ st534:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof524
+ goto _test_eof534
}
- st_case_524:
+ st_case_534:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr802
case 10:
- goto tr778
+ goto tr817
case 11:
- goto tr803
- case 12:
- goto tr804
+ goto tr818
case 13:
- goto tr780
+ goto tr793
case 32:
- goto tr802
+ goto tr816
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr805
+ goto tr819
case 92:
- goto st142
+ goto st140
}
- goto st31
- st525:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr816
+ }
+ goto st29
+ st535:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof525
+ goto _test_eof535
}
- st_case_525:
+ st_case_535:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr619
case 10:
- goto tr620
+ goto tr636
case 11:
- goto tr621
- case 12:
- goto tr622
+ goto tr637
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr619
+ goto tr635
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr624
+ goto tr639
case 46:
- goto st396
+ goto st406
case 69:
- goto st140
+ goto st138
case 92:
- goto st142
- case 101:
goto st140
+ case 101:
+ goto st138
case 105:
- goto st524
+ goto st534
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st525
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st535
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr635
}
- goto st31
-tr247:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st29
+tr245:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st526
- st526:
+ goto st536
+ st536:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof526
+ goto _test_eof536
}
- st_case_526:
-//line plugins/parsers/influx/machine.go:20002
+ st_case_536:
+//line plugins/parsers/influx/machine.go:20277
switch ( m.data)[( m.p)] {
- case 9:
- goto tr619
case 10:
- goto tr620
+ goto tr636
case 11:
- goto tr621
- case 12:
- goto tr622
+ goto tr637
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr619
+ goto tr635
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr624
+ goto tr639
case 46:
- goto st396
+ goto st406
case 69:
- goto st140
+ goto st138
case 92:
- goto st142
- case 101:
goto st140
+ case 101:
+ goto st138
case 105:
- goto st524
+ goto st534
case 117:
- goto st527
+ goto st537
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st523
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st533
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr635
}
- goto st31
- st527:
+ goto st29
+ st537:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof527
+ goto _test_eof537
}
- st_case_527:
+ st_case_537:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr807
case 10:
- goto tr784
+ goto tr822
case 11:
- goto tr808
- case 12:
- goto tr809
+ goto tr823
case 13:
- goto tr786
+ goto tr799
case 32:
- goto tr807
+ goto tr821
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr810
+ goto tr824
case 92:
- goto st142
+ goto st140
}
- goto st31
-tr248:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr821
+ }
+ goto st29
+tr246:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st528
- st528:
+ goto st538
+ st538:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof528
+ goto _test_eof538
}
- st_case_528:
-//line plugins/parsers/influx/machine.go:20074
+ st_case_538:
+//line plugins/parsers/influx/machine.go:20349
switch ( m.data)[( m.p)] {
- case 9:
- goto tr619
case 10:
- goto tr620
+ goto tr636
case 11:
- goto tr621
- case 12:
- goto tr622
+ goto tr637
case 13:
- goto tr623
+ goto tr638
case 32:
- goto tr619
+ goto tr635
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr624
+ goto tr639
case 46:
- goto st396
+ goto st406
case 69:
- goto st140
+ goto st138
case 92:
- goto st142
- case 101:
goto st140
+ case 101:
+ goto st138
case 105:
- goto st524
+ goto st534
case 117:
- goto st527
+ goto st537
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st528
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st538
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr635
}
- goto st31
-tr249:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st29
+tr247:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st529
- st529:
+ goto st539
+ st539:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof529
+ goto _test_eof539
}
- st_case_529:
-//line plugins/parsers/influx/machine.go:20120
+ st_case_539:
+//line plugins/parsers/influx/machine.go:20396
switch ( m.data)[( m.p)] {
- case 9:
- goto tr812
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr813
- case 12:
- goto tr814
+ goto tr827
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr812
+ goto tr826
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr815
+ goto tr828
case 65:
- goto st143
+ goto st141
case 92:
- goto st142
+ goto st140
case 97:
- goto st146
+ goto st144
}
- goto st31
- st143:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr826
+ }
+ goto st29
+ st141:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof143
+ goto _test_eof141
}
- st_case_143:
+ st_case_141:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 76:
- goto st144
- case 92:
goto st142
+ case 92:
+ goto st140
}
- goto st31
- st144:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+ st142:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof144
+ goto _test_eof142
}
- st_case_144:
+ st_case_142:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 83:
- goto st145
+ goto st143
case 92:
- goto st142
+ goto st140
}
- goto st31
- st145:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+ st143:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof145
+ goto _test_eof143
}
- st_case_145:
+ st_case_143:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 69:
- goto st530
+ goto st540
case 92:
- goto st142
+ goto st140
}
- goto st31
- st530:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+ st540:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof530
+ goto _test_eof540
}
- st_case_530:
+ st_case_540:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr812
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr813
- case 12:
- goto tr814
+ goto tr827
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr812
+ goto tr826
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr815
+ goto tr828
case 92:
- goto st142
+ goto st140
}
- goto st31
- st146:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr826
+ }
+ goto st29
+ st144:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof146
+ goto _test_eof144
}
- st_case_146:
+ st_case_144:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 92:
- goto st142
+ goto st140
case 108:
- goto st147
+ goto st145
}
- goto st31
- st147:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+ st145:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof147
+ goto _test_eof145
}
- st_case_147:
+ st_case_145:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 92:
- goto st142
+ goto st140
case 115:
- goto st148
+ goto st146
}
- goto st31
- st148:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+ st146:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof148
+ goto _test_eof146
}
- st_case_148:
+ st_case_146:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 92:
- goto st142
+ goto st140
case 101:
- goto st530
+ goto st540
}
- goto st31
-tr250:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+tr248:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st531
- st531:
+ goto st541
+ st541:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof531
+ goto _test_eof541
}
- st_case_531:
-//line plugins/parsers/influx/machine.go:20351
+ st_case_541:
+//line plugins/parsers/influx/machine.go:20619
switch ( m.data)[( m.p)] {
- case 9:
- goto tr812
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr813
- case 12:
- goto tr814
+ goto tr827
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr812
+ goto tr826
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr815
+ goto tr828
case 82:
- goto st149
+ goto st147
case 92:
- goto st142
+ goto st140
case 114:
- goto st150
+ goto st148
}
- goto st31
- st149:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr826
+ }
+ goto st29
+ st147:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof149
+ goto _test_eof147
}
- st_case_149:
+ st_case_147:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 85:
- goto st145
+ goto st143
case 92:
- goto st142
+ goto st140
}
- goto st31
- st150:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+ st148:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof150
+ goto _test_eof148
}
- st_case_150:
+ st_case_148:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr89
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr90
- case 12:
- goto tr1
+ goto tr88
case 13:
- goto st8
+ goto st6
case 32:
- goto tr89
+ goto tr87
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr92
+ goto tr90
case 92:
- goto st142
+ goto st140
case 117:
- goto st148
+ goto st146
}
- goto st31
-tr251:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr87
+ }
+ goto st29
+tr249:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st532
- st532:
+ goto st542
+ st542:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof532
+ goto _test_eof542
}
- st_case_532:
-//line plugins/parsers/influx/machine.go:20444
+ st_case_542:
+//line plugins/parsers/influx/machine.go:20709
switch ( m.data)[( m.p)] {
- case 9:
- goto tr812
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr813
- case 12:
- goto tr814
+ goto tr827
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr812
+ goto tr826
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr815
+ goto tr828
case 92:
- goto st142
+ goto st140
case 97:
- goto st146
+ goto st144
}
- goto st31
-tr252:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr826
+ }
+ goto st29
+tr250:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st533
- st533:
+ goto st543
+ st543:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof533
+ goto _test_eof543
}
- st_case_533:
-//line plugins/parsers/influx/machine.go:20479
+ st_case_543:
+//line plugins/parsers/influx/machine.go:20743
switch ( m.data)[( m.p)] {
- case 9:
- goto tr812
case 10:
- goto tr790
+ goto tr803
case 11:
- goto tr813
- case 12:
- goto tr814
+ goto tr827
case 13:
- goto tr792
+ goto tr805
case 32:
- goto tr812
+ goto tr826
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr815
+ goto tr828
case 92:
- goto st142
+ goto st140
case 114:
- goto st150
+ goto st148
}
- goto st31
- st534:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr826
+ }
+ goto st29
+ st544:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof534
+ goto _test_eof544
}
- st_case_534:
+ st_case_544:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st535
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st545
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st535:
+ goto st40
+ st545:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof535
+ goto _test_eof545
}
- st_case_535:
+ st_case_545:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st536
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st546
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st536:
+ goto st40
+ st546:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof536
+ goto _test_eof546
}
- st_case_536:
+ st_case_546:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st537
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st547
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st537:
+ goto st40
+ st547:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof537
+ goto _test_eof547
}
- st_case_537:
+ st_case_547:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st538
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st548
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st538:
+ goto st40
+ st548:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof538
+ goto _test_eof548
}
- st_case_538:
+ st_case_548:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st539
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st549
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st539:
+ goto st40
+ st549:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof539
+ goto _test_eof549
}
- st_case_539:
+ st_case_549:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st540
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st550
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st540:
+ goto st40
+ st550:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof540
+ goto _test_eof550
}
- st_case_540:
+ st_case_550:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st541
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st551
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st541:
+ goto st40
+ st551:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof541
+ goto _test_eof551
}
- st_case_541:
+ st_case_551:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st542
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st552
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st542:
+ goto st40
+ st552:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof542
+ goto _test_eof552
}
- st_case_542:
+ st_case_552:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st543
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st553
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st543:
+ goto st40
+ st553:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof543
+ goto _test_eof553
}
- st_case_543:
+ st_case_553:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st544
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st554
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st544:
+ goto st40
+ st554:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof544
+ goto _test_eof554
}
- st_case_544:
+ st_case_554:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st545
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st555
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st545:
+ goto st40
+ st555:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof545
+ goto _test_eof555
}
- st_case_545:
+ st_case_555:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st546
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st556
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st546:
+ goto st40
+ st556:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof546
+ goto _test_eof556
}
- st_case_546:
+ st_case_556:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st547
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st557
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st547:
+ goto st40
+ st557:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof547
+ goto _test_eof557
}
- st_case_547:
+ st_case_557:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st548
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st558
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st548:
+ goto st40
+ st558:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof548
+ goto _test_eof558
}
- st_case_548:
+ st_case_558:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st549
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st559
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st549:
+ goto st40
+ st559:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof549
+ goto _test_eof559
}
- st_case_549:
+ st_case_559:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st550
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st560
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st550:
+ goto st40
+ st560:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof550
+ goto _test_eof560
}
- st_case_550:
+ st_case_560:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st551
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st561
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr627
}
- goto st42
- st551:
+ goto st40
+ st561:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof551
+ goto _test_eof561
}
- st_case_551:
+ st_case_561:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr611
case 10:
- goto tr584
+ goto tr600
case 11:
- goto tr612
- case 12:
- goto tr490
+ goto tr628
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr611
+ goto tr627
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr92
+ goto tr90
case 61:
- goto tr129
+ goto tr127
case 92:
- goto st94
+ goto st92
}
- goto st42
-tr213:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr627
+ }
+ goto st40
+tr211:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st151
- st151:
+ goto st149
+ st149:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof151
+ goto _test_eof149
}
- st_case_151:
-//line plugins/parsers/influx/machine.go:21069
+ st_case_149:
+//line plugins/parsers/influx/machine.go:21348
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 46:
- goto st152
+ goto st150
case 48:
- goto st576
+ goto st586
case 92:
- goto st157
+ goto st155
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st579
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st589
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr178
}
- goto st55
-tr214:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st53
+tr212:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st152
- st152:
+ goto st150
+ st150:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof152
+ goto _test_eof150
}
- st_case_152:
-//line plugins/parsers/influx/machine.go:21109
+ st_case_150:
+//line plugins/parsers/influx/machine.go:21389
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 92:
- goto st157
+ goto st155
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st552
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st562
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr178
}
- goto st55
- st552:
+ goto st53
+ st562:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof552
+ goto _test_eof562
}
- st_case_552:
+ st_case_562:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr837
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr838
- case 12:
- goto tr622
+ goto tr851
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr837
+ goto tr850
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr839
+ goto tr852
case 69:
- goto st155
+ goto st153
case 92:
- goto st157
- case 101:
goto st155
+ case 101:
+ goto st153
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st552
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st562
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr850
}
- goto st55
-tr838:
- ( m.cs) = 553
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st53
+tr851:
+ ( m.cs) = 563
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr870:
- ( m.cs) = 553
-//line plugins/parsers/influx/machine.go.rl:77
+tr883:
+ ( m.cs) = 563
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr874:
- ( m.cs) = 553
-//line plugins/parsers/influx/machine.go.rl:77
+tr887:
+ ( m.cs) = 563
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr878:
- ( m.cs) = 553
-//line plugins/parsers/influx/machine.go.rl:77
+tr892:
+ ( m.cs) = 563
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st553:
+ st563:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof553
+ goto _test_eof563
}
- st_case_553:
-//line plugins/parsers/influx/machine.go:21264
+ st_case_563:
+//line plugins/parsers/influx/machine.go:21546
switch ( m.data)[( m.p)] {
- case 9:
- goto tr841
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr842
- case 12:
- goto tr482
+ goto tr855
case 13:
- goto st104
+ goto st102
case 32:
- goto tr841
+ goto tr854
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr182
+ goto tr180
case 45:
- goto tr843
+ goto tr856
case 61:
- goto st55
+ goto st53
case 92:
- goto tr186
+ goto tr184
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr844
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr857
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr854
}
- goto tr184
-tr842:
- ( m.cs) = 554
-//line plugins/parsers/influx/machine.go.rl:77
+ goto tr182
+tr855:
+ ( m.cs) = 564
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
- st554:
+ st564:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof554
+ goto _test_eof564
}
- st_case_554:
-//line plugins/parsers/influx/machine.go:21315
+ st_case_564:
+//line plugins/parsers/influx/machine.go:21598
switch ( m.data)[( m.p)] {
- case 9:
- goto tr841
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr842
- case 12:
- goto tr482
+ goto tr855
case 13:
- goto st104
+ goto st102
case 32:
- goto tr841
+ goto tr854
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr182
+ goto tr180
case 45:
- goto tr843
+ goto tr856
case 61:
- goto tr189
+ goto tr187
case 92:
- goto tr186
+ goto tr184
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr844
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr857
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr854
}
- goto tr184
-tr843:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr182
+tr856:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st153
- st153:
+ goto st151
+ st151:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof153
+ goto _test_eof151
}
- st_case_153:
-//line plugins/parsers/influx/machine.go:21355
+ st_case_151:
+//line plugins/parsers/influx/machine.go:21639
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr188
- case 12:
- goto tr1
+ goto tr186
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st555
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st565
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr178
}
- goto st57
-tr844:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st55
+tr857:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st555
- st555:
+ goto st565
+ st565:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof555
+ goto _test_eof565
}
- st_case_555:
-//line plugins/parsers/influx/machine.go:21393
+ st_case_565:
+//line plugins/parsers/influx/machine.go:21678
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st557
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st567
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
-tr849:
- ( m.cs) = 556
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st55
+tr862:
+ ( m.cs) = 566
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
goto _again
-tr846:
- ( m.cs) = 556
-//line plugins/parsers/influx/machine.go.rl:77
+tr859:
+ ( m.cs) = 566
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st556:
+ st566:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof556
+ goto _test_eof566
}
- st_case_556:
-//line plugins/parsers/influx/machine.go:21465
+ st_case_566:
+//line plugins/parsers/influx/machine.go:21751
switch ( m.data)[( m.p)] {
- case 9:
- goto tr848
case 10:
- goto st317
+ goto tr273
case 11:
- goto tr849
- case 12:
- goto tr495
+ goto tr862
case 13:
- goto st104
+ goto st102
case 32:
- goto tr848
+ goto tr861
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto tr186
+ goto tr184
}
- goto tr184
-tr186:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr861
+ }
+ goto tr182
+tr184:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st154
- st154:
+ goto st152
+ st152:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof154
+ goto _test_eof152
}
- st_case_154:
-//line plugins/parsers/influx/machine.go:21500
+ st_case_152:
+//line plugins/parsers/influx/machine.go:21785
switch ( m.data)[( m.p)] {
case 34:
- goto st57
+ goto st55
case 92:
- goto st57
+ goto st55
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -21511,671 +21796,689 @@ tr186:
case ( m.data)[( m.p)] >= 9:
goto tr8
}
- goto st12
- st557:
+ goto st10
+ st567:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof557
+ goto _test_eof567
}
- st_case_557:
+ st_case_567:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st558
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st568
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st558:
+ goto st55
+ st568:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof558
+ goto _test_eof568
}
- st_case_558:
+ st_case_568:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st559
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st569
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st559:
+ goto st55
+ st569:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof559
+ goto _test_eof569
}
- st_case_559:
+ st_case_569:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st560
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st570
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st560:
+ goto st55
+ st570:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof560
+ goto _test_eof570
}
- st_case_560:
+ st_case_570:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st561
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st571
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st561:
+ goto st55
+ st571:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof561
+ goto _test_eof571
}
- st_case_561:
+ st_case_571:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st562
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st572
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st562:
+ goto st55
+ st572:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof562
+ goto _test_eof572
}
- st_case_562:
+ st_case_572:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st563
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st573
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st563:
+ goto st55
+ st573:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof563
+ goto _test_eof573
}
- st_case_563:
+ st_case_573:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st564
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st574
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st564:
+ goto st55
+ st574:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof564
+ goto _test_eof574
}
- st_case_564:
+ st_case_574:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st565
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st575
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st565:
+ goto st55
+ st575:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof565
+ goto _test_eof575
}
- st_case_565:
+ st_case_575:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st566
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st576
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st566:
+ goto st55
+ st576:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof566
+ goto _test_eof576
}
- st_case_566:
+ st_case_576:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st567
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st577
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st567:
+ goto st55
+ st577:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof567
+ goto _test_eof577
}
- st_case_567:
+ st_case_577:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st568
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st578
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st568:
+ goto st55
+ st578:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof568
+ goto _test_eof578
}
- st_case_568:
+ st_case_578:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st569
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st579
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st569:
- if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof569
+ goto st55
+ st579:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof579
}
- st_case_569:
+ st_case_579:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st570
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st580
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st570:
+ goto st55
+ st580:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof570
+ goto _test_eof580
}
- st_case_570:
+ st_case_580:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st571
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st581
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st571:
+ goto st55
+ st581:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof571
+ goto _test_eof581
}
- st_case_571:
+ st_case_581:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st572
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st582
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st572:
+ goto st55
+ st582:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof572
+ goto _test_eof582
}
- st_case_572:
+ st_case_582:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st573
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st583
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st573:
+ goto st55
+ st583:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof573
+ goto _test_eof583
}
- st_case_573:
+ st_case_583:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st574
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st584
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr858
}
- goto st57
- st574:
+ goto st55
+ st584:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof574
+ goto _test_eof584
}
- st_case_574:
+ st_case_584:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr845
case 10:
- goto tr659
+ goto tr674
case 11:
- goto tr846
- case 12:
- goto tr490
+ goto tr859
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr845
+ goto tr858
case 34:
- goto tr128
+ goto tr126
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr189
+ goto tr187
case 92:
- goto st154
+ goto st152
}
- goto st57
- st155:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr858
+ }
+ goto st55
+ st153:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof155
+ goto _test_eof153
}
- st_case_155:
+ st_case_153:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr317
+ goto tr315
case 44:
- goto tr182
+ goto tr180
case 92:
- goto st157
+ goto st155
}
switch {
+ case ( m.data)[( m.p)] < 43:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
case ( m.data)[( m.p)] > 45:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st575
+ goto st585
}
- case ( m.data)[( m.p)] >= 43:
- goto st156
+ default:
+ goto st154
}
- goto st55
- st156:
+ goto st53
+ st154:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof156
+ goto _test_eof154
}
- st_case_156:
+ st_case_154:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 92:
- goto st157
+ goto st155
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st575
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st585
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr178
}
- goto st55
- st575:
+ goto st53
+ st585:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof575
+ goto _test_eof585
}
- st_case_575:
+ st_case_585:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr837
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr838
- case 12:
- goto tr622
+ goto tr851
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr837
+ goto tr850
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr839
+ goto tr852
case 92:
- goto st157
+ goto st155
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st575
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st585
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr850
}
- goto st55
-tr340:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st53
+tr338:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st157
- st157:
+ goto st155
+ st155:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof157
+ goto _test_eof155
}
- st_case_157:
-//line plugins/parsers/influx/machine.go:22174
+ st_case_155:
+//line plugins/parsers/influx/machine.go:22477
switch ( m.data)[( m.p)] {
case 34:
- goto st55
+ goto st53
case 92:
- goto st55
+ goto st53
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -22186,2279 +22489,2152 @@ tr340:
goto tr8
}
goto st1
- st576:
+ st586:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof576
+ goto _test_eof586
}
- st_case_576:
+ st_case_586:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr837
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr838
- case 12:
- goto tr622
+ goto tr851
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr837
+ goto tr850
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr839
+ goto tr852
case 46:
- goto st552
+ goto st562
case 69:
- goto st155
+ goto st153
case 92:
- goto st157
- case 101:
goto st155
+ case 101:
+ goto st153
case 105:
- goto st578
+ goto st588
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st577
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st587
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr850
}
- goto st55
- st577:
+ goto st53
+ st587:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof577
+ goto _test_eof587
}
- st_case_577:
+ st_case_587:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr837
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr838
- case 12:
- goto tr622
+ goto tr851
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr837
+ goto tr850
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr839
+ goto tr852
case 46:
- goto st552
+ goto st562
case 69:
- goto st155
+ goto st153
case 92:
- goto st157
- case 101:
goto st155
+ case 101:
+ goto st153
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st577
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st587
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr850
}
- goto st55
- st578:
+ goto st53
+ st588:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof578
+ goto _test_eof588
}
- st_case_578:
+ st_case_588:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr869
case 10:
- goto tr722
+ goto tr737
case 11:
- goto tr870
- case 12:
- goto tr804
+ goto tr883
case 13:
- goto tr725
+ goto tr739
case 32:
- goto tr869
+ goto tr882
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr871
+ goto tr884
case 92:
- goto st157
+ goto st155
}
- goto st55
- st579:
- if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof579
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr882
}
- st_case_579:
- switch ( m.data)[( m.p)] {
- case 9:
- goto tr837
+ goto st53
+ st589:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof589
+ }
+ st_case_589:
+ switch ( m.data)[( m.p)] {
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr838
- case 12:
- goto tr622
+ goto tr851
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr837
+ goto tr850
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr839
+ goto tr852
case 46:
- goto st552
+ goto st562
case 69:
- goto st155
+ goto st153
case 92:
- goto st157
- case 101:
goto st155
+ case 101:
+ goto st153
case 105:
- goto st578
+ goto st588
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st579
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st589
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr850
}
- goto st55
-tr215:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st53
+tr213:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st580
- st580:
+ goto st590
+ st590:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof580
+ goto _test_eof590
}
- st_case_580:
-//line plugins/parsers/influx/machine.go:22336
+ st_case_590:
+//line plugins/parsers/influx/machine.go:22641
switch ( m.data)[( m.p)] {
- case 9:
- goto tr837
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr838
- case 12:
- goto tr622
+ goto tr851
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr837
+ goto tr850
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr839
+ goto tr852
case 46:
- goto st552
+ goto st562
case 69:
- goto st155
+ goto st153
case 92:
- goto st157
- case 101:
goto st155
+ case 101:
+ goto st153
case 105:
- goto st578
+ goto st588
case 117:
- goto st581
+ goto st591
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st577
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st587
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr850
}
- goto st55
- st581:
+ goto st53
+ st591:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof581
+ goto _test_eof591
}
- st_case_581:
+ st_case_591:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr873
case 10:
- goto tr729
+ goto tr743
case 11:
- goto tr874
- case 12:
- goto tr809
+ goto tr887
case 13:
- goto tr732
+ goto tr745
case 32:
- goto tr873
+ goto tr886
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr875
+ goto tr888
case 92:
- goto st157
+ goto st155
}
- goto st55
-tr216:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr886
+ }
+ goto st53
+tr214:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st582
- st582:
+ goto st592
+ st592:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof582
+ goto _test_eof592
}
- st_case_582:
-//line plugins/parsers/influx/machine.go:22408
+ st_case_592:
+//line plugins/parsers/influx/machine.go:22713
switch ( m.data)[( m.p)] {
- case 9:
- goto tr837
case 10:
- goto tr515
+ goto tr532
case 11:
- goto tr838
- case 12:
- goto tr622
+ goto tr851
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr837
+ goto tr850
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr839
+ goto tr852
case 46:
- goto st552
+ goto st562
case 69:
- goto st155
+ goto st153
case 92:
- goto st157
- case 101:
goto st155
+ case 101:
+ goto st153
case 105:
- goto st578
+ goto st588
case 117:
- goto st581
+ goto st591
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st582
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st592
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr850
}
- goto st55
-tr217:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st53
+tr215:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st583
- st583:
+ goto st593
+ st593:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof583
+ goto _test_eof593
}
- st_case_583:
-//line plugins/parsers/influx/machine.go:22454
+ st_case_593:
+//line plugins/parsers/influx/machine.go:22760
switch ( m.data)[( m.p)] {
- case 9:
- goto tr877
case 10:
- goto tr736
+ goto tr891
case 11:
- goto tr878
- case 12:
- goto tr814
+ goto tr892
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr877
+ goto tr890
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr879
+ goto tr893
case 65:
- goto st158
+ goto st156
case 92:
- goto st157
+ goto st155
case 97:
- goto st161
+ goto st159
}
- goto st55
- st158:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr890
+ }
+ goto st53
+ st156:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof158
+ goto _test_eof156
}
- st_case_158:
+ st_case_156:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 76:
- goto st159
- case 92:
goto st157
+ case 92:
+ goto st155
}
- goto st55
- st159:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+ st157:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof159
+ goto _test_eof157
}
- st_case_159:
+ st_case_157:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 83:
- goto st160
+ goto st158
case 92:
- goto st157
+ goto st155
}
- goto st55
- st160:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+ st158:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof160
+ goto _test_eof158
}
- st_case_160:
+ st_case_158:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 69:
- goto st584
+ goto st594
case 92:
- goto st157
+ goto st155
}
- goto st55
- st584:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+ st594:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof584
+ goto _test_eof594
}
- st_case_584:
+ st_case_594:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr877
case 10:
- goto tr736
+ goto tr891
case 11:
- goto tr878
- case 12:
- goto tr814
+ goto tr892
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr877
+ goto tr890
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr879
+ goto tr893
case 92:
- goto st157
+ goto st155
}
- goto st55
- st161:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr890
+ }
+ goto st53
+ st159:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof161
+ goto _test_eof159
}
- st_case_161:
+ st_case_159:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 92:
- goto st157
+ goto st155
case 108:
- goto st162
+ goto st160
}
- goto st55
- st162:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+ st160:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof162
+ goto _test_eof160
}
- st_case_162:
+ st_case_160:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 92:
- goto st157
+ goto st155
case 115:
- goto st163
+ goto st161
}
- goto st55
- st163:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+ st161:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof163
+ goto _test_eof161
}
- st_case_163:
+ st_case_161:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 92:
- goto st157
+ goto st155
case 101:
- goto st584
+ goto st594
}
- goto st55
-tr218:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+tr216:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st585
- st585:
+ goto st595
+ st595:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof585
+ goto _test_eof595
}
- st_case_585:
-//line plugins/parsers/influx/machine.go:22685
+ st_case_595:
+//line plugins/parsers/influx/machine.go:22983
switch ( m.data)[( m.p)] {
- case 9:
- goto tr877
case 10:
- goto tr736
+ goto tr891
case 11:
- goto tr878
- case 12:
- goto tr814
+ goto tr892
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr877
+ goto tr890
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr879
+ goto tr893
case 82:
- goto st164
+ goto st162
case 92:
- goto st157
+ goto st155
case 114:
- goto st165
+ goto st163
}
- goto st55
- st164:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr890
+ }
+ goto st53
+ st162:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof164
+ goto _test_eof162
}
- st_case_164:
+ st_case_162:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 85:
- goto st160
+ goto st158
case 92:
- goto st157
+ goto st155
}
- goto st55
- st165:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+ st163:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof165
+ goto _test_eof163
}
- st_case_165:
+ st_case_163:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr180
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr181
- case 12:
- goto tr1
+ goto tr179
case 13:
- goto st8
+ goto st6
case 32:
- goto tr180
+ goto tr178
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr182
+ goto tr180
case 92:
- goto st157
+ goto st155
case 117:
- goto st163
+ goto st161
}
- goto st55
-tr219:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr178
+ }
+ goto st53
+tr217:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st586
- st586:
+ goto st596
+ st596:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof586
+ goto _test_eof596
}
- st_case_586:
-//line plugins/parsers/influx/machine.go:22778
+ st_case_596:
+//line plugins/parsers/influx/machine.go:23073
switch ( m.data)[( m.p)] {
- case 9:
- goto tr877
case 10:
- goto tr736
+ goto tr891
case 11:
- goto tr878
- case 12:
- goto tr814
+ goto tr892
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr877
+ goto tr890
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr879
+ goto tr893
case 92:
- goto st157
+ goto st155
case 97:
- goto st161
+ goto st159
}
- goto st55
-tr220:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr890
+ }
+ goto st53
+tr218:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st587
- st587:
+ goto st597
+ st597:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof587
+ goto _test_eof597
}
- st_case_587:
-//line plugins/parsers/influx/machine.go:22813
+ st_case_597:
+//line plugins/parsers/influx/machine.go:23107
switch ( m.data)[( m.p)] {
- case 9:
- goto tr877
case 10:
- goto tr736
+ goto tr891
case 11:
- goto tr878
- case 12:
- goto tr814
+ goto tr892
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr877
+ goto tr890
case 34:
- goto tr91
+ goto tr89
case 44:
- goto tr879
+ goto tr893
case 92:
- goto st157
+ goto st155
case 114:
- goto st165
+ goto st163
}
- goto st55
- st166:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr890
+ }
+ goto st53
+ st164:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof166
+ goto _test_eof164
}
- st_case_166:
+ st_case_164:
switch ( m.data)[( m.p)] {
- case 9:
- goto st166
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr339
- case 12:
- goto st9
+ goto tr337
case 13:
- goto st8
+ goto st6
case 32:
- goto st166
+ goto st164
case 34:
- goto tr118
+ goto tr116
case 35:
goto st6
case 44:
goto st6
case 92:
- goto tr340
+ goto tr338
}
- goto tr337
-tr339:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st164
+ }
+ goto tr335
+tr337:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st167
- st167:
+ goto st165
+ st165:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof167
+ goto _test_eof165
}
- st_case_167:
-//line plugins/parsers/influx/machine.go:22876
+ st_case_165:
+//line plugins/parsers/influx/machine.go:23168
switch ( m.data)[( m.p)] {
- case 9:
- goto tr341
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr342
- case 12:
- goto tr38
+ goto tr340
case 13:
- goto st8
+ goto st6
case 32:
- goto tr341
+ goto tr339
case 34:
- goto tr85
+ goto tr83
case 35:
- goto st55
+ goto st53
case 44:
- goto tr182
+ goto tr180
case 92:
- goto tr340
+ goto tr338
}
- goto tr337
-tr341:
- ( m.cs) = 168
-//line plugins/parsers/influx/machine.go.rl:77
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr339
+ }
+ goto tr335
+tr339:
+ ( m.cs) = 166
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st168:
+ st166:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof168
+ goto _test_eof166
}
- st_case_168:
-//line plugins/parsers/influx/machine.go:22918
+ st_case_166:
+//line plugins/parsers/influx/machine.go:23209
switch ( m.data)[( m.p)] {
- case 9:
- goto st168
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr344
- case 12:
- goto st11
+ goto tr342
case 13:
- goto st8
+ goto st6
case 32:
- goto st168
+ goto st166
case 34:
- goto tr124
+ goto tr122
case 35:
- goto tr160
+ goto tr158
case 44:
goto st6
case 61:
- goto tr337
+ goto tr335
case 92:
- goto tr186
+ goto tr184
}
- goto tr184
-tr344:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st166
+ }
+ goto tr182
+tr342:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st169
-tr345:
- ( m.cs) = 169
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st167
+tr343:
+ ( m.cs) = 167
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st169:
+ st167:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof169
+ goto _test_eof167
}
- st_case_169:
-//line plugins/parsers/influx/machine.go:22972
+ st_case_167:
+//line plugins/parsers/influx/machine.go:23262
switch ( m.data)[( m.p)] {
- case 9:
- goto tr341
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr345
- case 12:
- goto tr38
+ goto tr343
case 13:
- goto st8
+ goto st6
case 32:
- goto tr341
+ goto tr339
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr346
+ goto tr344
case 92:
- goto tr186
+ goto tr184
}
- goto tr184
-tr342:
- ( m.cs) = 170
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr339
+ }
+ goto tr182
+tr340:
+ ( m.cs) = 168
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st170:
+ st168:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof170
+ goto _test_eof168
}
- st_case_170:
-//line plugins/parsers/influx/machine.go:23018
+ st_case_168:
+//line plugins/parsers/influx/machine.go:23307
switch ( m.data)[( m.p)] {
- case 9:
- goto tr341
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr345
- case 12:
- goto tr38
+ goto tr343
case 13:
- goto st8
+ goto st6
case 32:
- goto tr341
+ goto tr339
case 34:
- goto tr124
+ goto tr122
case 44:
- goto tr182
+ goto tr180
case 61:
- goto tr337
+ goto tr335
case 92:
- goto tr186
+ goto tr184
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr339
}
- goto tr184
-tr522:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr182
+tr538:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st171
- st171:
+ goto st169
+ st169:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof171
+ goto _test_eof169
}
- st_case_171:
-//line plugins/parsers/influx/machine.go:23053
+ st_case_169:
+//line plugins/parsers/influx/machine.go:23341
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr105
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st588
+ goto st598
}
goto st6
-tr523:
-//line plugins/parsers/influx/machine.go.rl:19
+tr539:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st588
- st588:
+ goto st598
+ st598:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof588
+ goto _test_eof598
}
- st_case_588:
-//line plugins/parsers/influx/machine.go:23081
+ st_case_598:
+//line plugins/parsers/influx/machine.go:23365
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st589
+ goto st599
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st589:
+ st599:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof589
+ goto _test_eof599
}
- st_case_589:
+ st_case_599:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st590
+ goto st600
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st590:
+ st600:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof590
+ goto _test_eof600
}
- st_case_590:
+ st_case_600:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st591
+ goto st601
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st591:
+ st601:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof591
+ goto _test_eof601
}
- st_case_591:
+ st_case_601:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st592
+ goto st602
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st592:
+ st602:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof592
+ goto _test_eof602
}
- st_case_592:
+ st_case_602:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st593
+ goto st603
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st593:
+ st603:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof593
+ goto _test_eof603
}
- st_case_593:
+ st_case_603:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st594
+ goto st604
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st594:
+ st604:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof594
+ goto _test_eof604
}
- st_case_594:
+ st_case_604:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st595
+ goto st605
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st595:
+ st605:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof595
+ goto _test_eof605
}
- st_case_595:
+ st_case_605:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st596
+ goto st606
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st596:
+ st606:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof596
+ goto _test_eof606
}
- st_case_596:
+ st_case_606:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st597
+ goto st607
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st597:
+ st607:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof597
+ goto _test_eof607
}
- st_case_597:
+ st_case_607:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st598
+ goto st608
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st598:
+ st608:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof598
+ goto _test_eof608
}
- st_case_598:
+ st_case_608:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st599
+ goto st609
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st599:
+ st609:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof599
+ goto _test_eof609
}
- st_case_599:
+ st_case_609:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st600
+ goto st610
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st600:
+ st610:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof600
+ goto _test_eof610
}
- st_case_600:
+ st_case_610:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st601
+ goto st611
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st601:
+ st611:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof601
+ goto _test_eof611
}
- st_case_601:
+ st_case_611:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st602
+ goto st612
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st602:
+ st612:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof602
+ goto _test_eof612
}
- st_case_602:
+ st_case_612:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st603
+ goto st613
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st603:
+ st613:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof603
+ goto _test_eof613
}
- st_case_603:
+ st_case_613:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st604
+ goto st614
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st604:
+ st614:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof604
+ goto _test_eof614
}
- st_case_604:
+ st_case_614:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st605
+ goto st615
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st605:
+ st615:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof605
+ goto _test_eof615
}
- st_case_605:
+ st_case_615:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st606
+ goto st616
}
case ( m.data)[( m.p)] >= 9:
- goto tr658
+ goto tr673
}
goto st6
- st606:
+ st616:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof606
+ goto _test_eof616
}
- st_case_606:
+ st_case_616:
switch ( m.data)[( m.p)] {
case 10:
- goto tr659
- case 12:
- goto tr450
+ goto tr674
case 13:
- goto tr661
+ goto tr676
case 32:
- goto tr658
+ goto tr673
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr658
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr673
}
goto st6
-tr903:
-//line plugins/parsers/influx/machine.go.rl:19
+tr917:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st172
-tr518:
- ( m.cs) = 172
-//line plugins/parsers/influx/machine.go.rl:121
+ goto st170
+tr534:
+ ( m.cs) = 170
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr910:
- ( m.cs) = 172
-//line plugins/parsers/influx/machine.go.rl:103
+tr924:
+ ( m.cs) = 170
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr913:
- ( m.cs) = 172
-//line plugins/parsers/influx/machine.go.rl:112
+tr926:
+ ( m.cs) = 170
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr917:
- ( m.cs) = 172
-//line plugins/parsers/influx/machine.go.rl:130
+tr929:
+ ( m.cs) = 170
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st172:
+ st170:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof172
+ goto _test_eof170
}
- st_case_172:
-//line plugins/parsers/influx/machine.go:23667
+ st_case_170:
+//line plugins/parsers/influx/machine.go:23913
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 61:
goto st6
case 92:
- goto tr349
+ goto tr347
}
- goto tr348
-tr348:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto tr346
+tr346:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st173
- st173:
+ goto st171
+ st171:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof173
+ goto _test_eof171
}
- st_case_173:
-//line plugins/parsers/influx/machine.go:23700
+ st_case_171:
+//line plugins/parsers/influx/machine.go:23945
switch ( m.data)[( m.p)] {
case 9:
goto st6
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 32:
goto st6
case 34:
- goto tr100
+ goto tr98
case 44:
goto st6
case 61:
- goto tr351
+ goto tr349
case 92:
- goto st185
+ goto st183
}
- goto st173
-tr351:
-//line plugins/parsers/influx/machine.go.rl:99
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
+ }
+ goto st171
+tr349:
+//line plugins/parsers/influx/machine.go.rl:108
- key = m.text()
+ m.key = m.text()
- goto st174
- st174:
+ goto st172
+ st172:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof174
+ goto _test_eof172
}
- st_case_174:
-//line plugins/parsers/influx/machine.go:23733
+ st_case_172:
+//line plugins/parsers/influx/machine.go:23977
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr353
+ goto tr351
case 45:
- goto tr167
+ goto tr165
case 46:
- goto tr168
+ goto tr166
case 48:
- goto tr169
+ goto tr167
case 70:
- goto tr171
+ goto tr352
case 84:
- goto tr172
+ goto tr353
case 92:
- goto st76
+ goto st73
case 102:
- goto tr173
+ goto tr354
case 116:
- goto tr174
+ goto tr355
}
if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr170
+ goto tr168
}
goto st6
-tr353:
- ( m.cs) = 607
-//line plugins/parsers/influx/machine.go.rl:139
+tr351:
+ ( m.cs) = 617
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st607:
+ st617:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof607
+ goto _test_eof617
}
- st_case_607:
-//line plugins/parsers/influx/machine.go:23782
+ st_case_617:
+//line plugins/parsers/influx/machine.go:24022
switch ( m.data)[( m.p)] {
case 10:
- goto tr650
- case 12:
- goto st261
+ goto tr665
case 13:
- goto tr652
+ goto tr667
case 32:
- goto tr902
+ goto tr916
case 34:
- goto tr26
+ goto tr25
case 44:
- goto tr903
+ goto tr917
case 92:
- goto tr27
+ goto tr26
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr902
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr916
}
goto tr23
-tr169:
-//line plugins/parsers/influx/machine.go.rl:19
+tr167:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st608
- st608:
+ goto st618
+ st618:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof608
+ goto _test_eof618
}
- st_case_608:
-//line plugins/parsers/influx/machine.go:23814
+ st_case_618:
+//line plugins/parsers/influx/machine.go:24052
switch ( m.data)[( m.p)] {
case 10:
- goto tr515
- case 12:
- goto tr516
+ goto tr532
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr514
+ goto tr531
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr518
+ goto tr534
case 46:
- goto st315
+ goto st325
case 69:
- goto st175
+ goto st173
case 92:
- goto st76
+ goto st73
case 101:
- goto st175
+ goto st173
case 105:
- goto st613
+ goto st623
case 117:
- goto st614
+ goto st624
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st609
+ goto st619
}
case ( m.data)[( m.p)] >= 9:
- goto tr514
+ goto tr531
}
goto st6
- st609:
+ st619:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof609
+ goto _test_eof619
}
- st_case_609:
+ st_case_619:
switch ( m.data)[( m.p)] {
case 10:
- goto tr515
- case 12:
- goto tr516
+ goto tr532
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr514
+ goto tr531
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr518
+ goto tr534
case 46:
- goto st315
+ goto st325
case 69:
- goto st175
+ goto st173
case 92:
- goto st76
+ goto st73
case 101:
- goto st175
+ goto st173
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st609
+ goto st619
}
case ( m.data)[( m.p)] >= 9:
- goto tr514
+ goto tr531
}
goto st6
- st175:
+ st173:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof175
+ goto _test_eof173
}
- st_case_175:
+ st_case_173:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr354
+ goto tr356
case 43:
- goto st176
+ goto st174
case 45:
- goto st176
+ goto st174
case 92:
- goto st76
+ goto st73
}
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st612
+ goto st622
}
goto st6
-tr354:
- ( m.cs) = 610
-//line plugins/parsers/influx/machine.go.rl:139
+tr356:
+ ( m.cs) = 620
+//line plugins/parsers/influx/machine.go.rl:148
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st610:
+ st620:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof610
+ goto _test_eof620
}
- st_case_610:
-//line plugins/parsers/influx/machine.go:23929
+ st_case_620:
+//line plugins/parsers/influx/machine.go:24159
switch ( m.data)[( m.p)] {
case 10:
- goto st262
+ goto tr101
case 13:
- goto st34
+ goto st32
case 32:
- goto st261
+ goto st271
case 44:
- goto st37
+ goto st35
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st611
+ goto st621
}
case ( m.data)[( m.p)] >= 9:
- goto st261
+ goto st271
}
- goto tr105
- st611:
+ goto tr103
+ st621:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof611
+ goto _test_eof621
}
- st_case_611:
+ st_case_621:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr516
+ goto tr921
case 44:
- goto tr907
+ goto tr922
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st611
+ goto st621
}
case ( m.data)[( m.p)] >= 9:
- goto tr516
+ goto tr921
}
- goto tr105
- st176:
+ goto tr103
+ st174:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof176
+ goto _test_eof174
}
- st_case_176:
+ st_case_174:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st612
+ goto st622
}
goto st6
- st612:
+ st622:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof612
+ goto _test_eof622
}
- st_case_612:
+ st_case_622:
switch ( m.data)[( m.p)] {
case 10:
- goto tr515
- case 12:
- goto tr516
+ goto tr532
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr514
+ goto tr531
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr518
+ goto tr534
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st612
+ goto st622
}
case ( m.data)[( m.p)] >= 9:
- goto tr514
+ goto tr531
}
goto st6
- st613:
+ st623:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof613
+ goto _test_eof623
}
- st_case_613:
+ st_case_623:
switch ( m.data)[( m.p)] {
case 10:
- goto tr722
- case 12:
- goto tr909
+ goto tr737
case 13:
- goto tr725
+ goto tr739
case 32:
- goto tr908
+ goto tr923
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr910
+ goto tr924
case 92:
- goto st76
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr908
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr923
}
goto st6
- st614:
+ st624:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof614
+ goto _test_eof624
}
- st_case_614:
+ st_case_624:
switch ( m.data)[( m.p)] {
case 10:
- goto tr729
- case 12:
- goto tr912
+ goto tr743
case 13:
- goto tr732
+ goto tr745
case 32:
- goto tr911
+ goto tr925
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr913
+ goto tr926
case 92:
- goto st76
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr911
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr925
}
goto st6
-tr170:
-//line plugins/parsers/influx/machine.go.rl:19
+tr168:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st615
- st615:
+ goto st625
+ st625:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof615
+ goto _test_eof625
}
- st_case_615:
-//line plugins/parsers/influx/machine.go:24085
+ st_case_625:
+//line plugins/parsers/influx/machine.go:24305
switch ( m.data)[( m.p)] {
case 10:
- goto tr515
- case 12:
- goto tr516
+ goto tr532
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr514
+ goto tr531
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr518
+ goto tr534
case 46:
- goto st315
+ goto st325
case 69:
- goto st175
+ goto st173
case 92:
- goto st76
+ goto st73
case 101:
- goto st175
+ goto st173
case 105:
- goto st613
+ goto st623
case 117:
- goto st614
+ goto st624
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st615
+ goto st625
}
case ( m.data)[( m.p)] >= 9:
- goto tr514
+ goto tr531
}
goto st6
-tr171:
-//line plugins/parsers/influx/machine.go.rl:19
+tr352:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st616
- st616:
+ goto st626
+ st626:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof616
+ goto _test_eof626
}
- st_case_616:
-//line plugins/parsers/influx/machine.go:24132
+ st_case_626:
+//line plugins/parsers/influx/machine.go:24350
switch ( m.data)[( m.p)] {
case 10:
- goto tr736
- case 12:
- goto tr916
+ goto tr749
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr915
+ goto tr928
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr917
+ goto tr929
case 65:
- goto st177
+ goto st175
case 92:
- goto st76
+ goto st73
case 97:
- goto st180
+ goto st178
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr915
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
}
goto st6
- st177:
+ st175:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof177
+ goto _test_eof175
}
- st_case_177:
+ st_case_175:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 76:
- goto st178
+ goto st176
case 92:
- goto st76
+ goto st73
}
goto st6
- st178:
+ st176:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof178
+ goto _test_eof176
}
- st_case_178:
+ st_case_176:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 83:
- goto st179
+ goto st177
case 92:
- goto st76
+ goto st73
}
goto st6
- st179:
+ st177:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof179
+ goto _test_eof177
}
- st_case_179:
+ st_case_177:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 69:
- goto st617
+ goto st627
case 92:
- goto st76
+ goto st73
}
goto st6
- st617:
+ st627:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof617
+ goto _test_eof627
}
- st_case_617:
+ st_case_627:
switch ( m.data)[( m.p)] {
case 10:
- goto tr736
- case 12:
- goto tr916
+ goto tr749
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr915
+ goto tr928
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr917
+ goto tr929
case 92:
- goto st76
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr915
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
}
goto st6
- st180:
+ st178:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof180
+ goto _test_eof178
}
- st_case_180:
+ st_case_178:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
case 108:
- goto st181
+ goto st179
}
goto st6
- st181:
+ st179:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof181
+ goto _test_eof179
}
- st_case_181:
+ st_case_179:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
case 115:
- goto st182
+ goto st180
}
goto st6
- st182:
+ st180:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof182
+ goto _test_eof180
}
- st_case_182:
+ st_case_180:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
case 101:
- goto st617
+ goto st627
}
goto st6
-tr172:
-//line plugins/parsers/influx/machine.go.rl:19
+tr353:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st618
- st618:
+ goto st628
+ st628:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof618
+ goto _test_eof628
}
- st_case_618:
-//line plugins/parsers/influx/machine.go:24313
+ st_case_628:
+//line plugins/parsers/influx/machine.go:24503
switch ( m.data)[( m.p)] {
case 10:
- goto tr736
- case 12:
- goto tr916
+ goto tr749
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr915
+ goto tr928
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr917
+ goto tr929
case 82:
- goto st183
+ goto st181
case 92:
- goto st76
+ goto st73
case 114:
- goto st184
+ goto st182
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr915
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
}
goto st6
- st183:
+ st181:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof183
+ goto _test_eof181
}
- st_case_183:
+ st_case_181:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 85:
- goto st179
+ goto st177
case 92:
- goto st76
+ goto st73
}
goto st6
- st184:
+ st182:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof184
+ goto _test_eof182
}
- st_case_184:
+ st_case_182:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
case 117:
- goto st182
+ goto st180
}
goto st6
-tr173:
-//line plugins/parsers/influx/machine.go.rl:19
+tr354:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st619
- st619:
+ goto st629
+ st629:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof619
+ goto _test_eof629
}
- st_case_619:
-//line plugins/parsers/influx/machine.go:24389
+ st_case_629:
+//line plugins/parsers/influx/machine.go:24569
switch ( m.data)[( m.p)] {
case 10:
- goto tr736
- case 12:
- goto tr916
+ goto tr749
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr915
+ goto tr928
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr917
+ goto tr929
case 92:
- goto st76
+ goto st73
case 97:
- goto st180
+ goto st178
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr915
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
}
goto st6
-tr174:
-//line plugins/parsers/influx/machine.go.rl:19
+tr355:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st620
- st620:
+ goto st630
+ st630:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof620
+ goto _test_eof630
}
- st_case_620:
-//line plugins/parsers/influx/machine.go:24423
+ st_case_630:
+//line plugins/parsers/influx/machine.go:24601
switch ( m.data)[( m.p)] {
case 10:
- goto tr736
- case 12:
- goto tr916
+ goto tr749
case 13:
- goto tr739
+ goto tr751
case 32:
- goto tr915
+ goto tr928
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr917
+ goto tr929
case 92:
- goto st76
+ goto st73
case 114:
- goto st184
+ goto st182
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr915
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
}
goto st6
-tr349:
-//line plugins/parsers/influx/machine.go.rl:19
+tr347:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st185
- st185:
+ goto st183
+ st183:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof185
+ goto _test_eof183
}
- st_case_185:
-//line plugins/parsers/influx/machine.go:24457
+ st_case_183:
+//line plugins/parsers/influx/machine.go:24633
switch ( m.data)[( m.p)] {
case 34:
- goto st173
+ goto st171
case 92:
- goto st173
+ goto st171
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -24469,2691 +24645,3234 @@ tr349:
goto tr8
}
goto st3
- st621:
+ st631:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof621
+ goto _test_eof631
}
- st_case_621:
+ st_case_631:
switch ( m.data)[( m.p)] {
case 10:
- goto tr515
- case 12:
- goto tr516
+ goto tr532
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr514
+ goto tr531
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr518
+ goto tr534
case 46:
- goto st315
+ goto st325
case 69:
- goto st175
+ goto st173
case 92:
- goto st76
+ goto st73
case 101:
- goto st175
+ goto st173
case 105:
- goto st613
+ goto st623
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st609
+ goto st619
}
case ( m.data)[( m.p)] >= 9:
- goto tr514
+ goto tr531
}
goto st6
- st622:
+ st632:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof622
+ goto _test_eof632
}
- st_case_622:
+ st_case_632:
switch ( m.data)[( m.p)] {
case 10:
- goto tr515
- case 12:
- goto tr516
+ goto tr532
case 13:
- goto tr517
+ goto tr533
case 32:
- goto tr514
+ goto tr531
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr518
+ goto tr534
case 46:
- goto st315
+ goto st325
case 69:
- goto st175
+ goto st173
case 92:
- goto st76
+ goto st73
case 101:
- goto st175
+ goto st173
case 105:
- goto st613
+ goto st623
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st622
+ goto st632
}
case ( m.data)[( m.p)] >= 9:
- goto tr514
+ goto tr531
}
goto st6
-tr162:
-//line plugins/parsers/influx/machine.go.rl:19
+tr169:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st186
- st186:
+ goto st633
+ st633:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof186
+ goto _test_eof633
}
- st_case_186:
-//line plugins/parsers/influx/machine.go:24560
+ st_case_633:
+//line plugins/parsers/influx/machine.go:24732
switch ( m.data)[( m.p)] {
- case 9:
- goto st50
case 10:
- goto st7
- case 11:
- goto tr162
- case 12:
- goto st2
+ goto tr891
case 13:
- goto st8
+ goto tr751
case 32:
- goto st50
+ goto tr928
case 34:
- goto tr97
+ goto tr29
case 44:
- goto st6
- case 61:
- goto tr165
+ goto tr929
+ case 65:
+ goto st184
case 92:
- goto tr163
+ goto st73
+ case 97:
+ goto st187
}
- goto tr160
-tr140:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st187
- st187:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
+ }
+ goto st6
+ st184:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof187
+ goto _test_eof184
}
- st_case_187:
-//line plugins/parsers/influx/machine.go:24595
+ st_case_184:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
- case 11:
- goto tr61
- case 13:
- goto tr47
- case 32:
- goto tr60
- case 44:
- goto tr62
- case 46:
- goto st188
- case 48:
- goto st624
- case 61:
- goto tr47
+ goto tr28
+ case 34:
+ goto tr29
+ case 76:
+ goto st185
case 92:
- goto st23
+ goto st73
}
- switch {
- case ( m.data)[( m.p)] > 12:
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st627
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr60
+ goto st6
+ st185:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof185
}
- goto st17
-tr141:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st188
- st188:
+ st_case_185:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr28
+ case 34:
+ goto tr29
+ case 83:
+ goto st186
+ case 92:
+ goto st73
+ }
+ goto st6
+ st186:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof188
+ goto _test_eof186
}
- st_case_188:
-//line plugins/parsers/influx/machine.go:24636
+ st_case_186:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr28
+ case 34:
+ goto tr29
+ case 69:
+ goto st634
+ case 92:
+ goto st73
+ }
+ goto st6
+ st634:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof634
+ }
+ st_case_634:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr891
+ case 13:
+ goto tr751
+ case 32:
+ goto tr928
+ case 34:
+ goto tr29
+ case 44:
+ goto tr929
+ case 92:
+ goto st73
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
+ }
+ goto st6
+ st187:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof187
+ }
+ st_case_187:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr28
+ case 34:
+ goto tr29
+ case 92:
+ goto st73
+ case 108:
+ goto st188
+ }
+ goto st6
+ st188:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof188
+ }
+ st_case_188:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr28
+ case 34:
+ goto tr29
+ case 92:
+ goto st73
+ case 115:
+ goto st189
+ }
+ goto st6
+ st189:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof189
+ }
+ st_case_189:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr28
+ case 34:
+ goto tr29
+ case 92:
+ goto st73
+ case 101:
+ goto st634
+ }
+ goto st6
+tr170:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st635
+ st635:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof635
+ }
+ st_case_635:
+//line plugins/parsers/influx/machine.go:24885
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr891
+ case 13:
+ goto tr751
+ case 32:
+ goto tr928
+ case 34:
+ goto tr29
+ case 44:
+ goto tr929
+ case 82:
+ goto st190
+ case 92:
+ goto st73
+ case 114:
+ goto st191
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
+ }
+ goto st6
+ st190:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof190
+ }
+ st_case_190:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr28
+ case 34:
+ goto tr29
+ case 85:
+ goto st186
+ case 92:
+ goto st73
+ }
+ goto st6
+ st191:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof191
+ }
+ st_case_191:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr28
+ case 34:
+ goto tr29
+ case 92:
+ goto st73
+ case 117:
+ goto st189
+ }
+ goto st6
+tr171:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st636
+ st636:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof636
+ }
+ st_case_636:
+//line plugins/parsers/influx/machine.go:24951
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr891
+ case 13:
+ goto tr751
+ case 32:
+ goto tr928
+ case 34:
+ goto tr29
+ case 44:
+ goto tr929
+ case 92:
+ goto st73
+ case 97:
+ goto st187
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
+ }
+ goto st6
+tr172:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st637
+ st637:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof637
+ }
+ st_case_637:
+//line plugins/parsers/influx/machine.go:24983
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr891
+ case 13:
+ goto tr751
+ case 32:
+ goto tr928
+ case 34:
+ goto tr29
+ case 44:
+ goto tr929
+ case 92:
+ goto st73
+ case 114:
+ goto st191
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr928
+ }
+ goto st6
+tr160:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st192
+ st192:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof192
+ }
+ st_case_192:
+//line plugins/parsers/influx/machine.go:25015
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr28
+ case 11:
+ goto tr160
+ case 13:
+ goto st6
+ case 32:
+ goto st48
+ case 34:
+ goto tr95
+ case 44:
+ goto st6
+ case 61:
+ goto tr163
+ case 92:
+ goto tr161
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st48
+ }
+ goto tr158
+tr138:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st193
+ st193:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof193
+ }
+ st_case_193:
+//line plugins/parsers/influx/machine.go:25049
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr45
+ case 11:
+ goto tr59
+ case 13:
+ goto tr45
+ case 32:
+ goto tr58
+ case 44:
+ goto tr60
+ case 46:
+ goto st194
+ case 48:
+ goto st639
+ case 61:
+ goto tr45
+ case 92:
+ goto st21
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st642
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr58
+ }
+ goto st15
+tr139:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st194
+ st194:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof194
+ }
+ st_case_194:
+//line plugins/parsers/influx/machine.go:25090
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr45
+ case 11:
+ goto tr59
+ case 13:
+ goto tr45
+ case 32:
+ goto tr58
+ case 44:
+ goto tr60
+ case 61:
+ goto tr45
+ case 92:
+ goto st21
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st638
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr58
+ }
+ goto st15
+ st638:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof638
+ }
+ st_case_638:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr730
+ case 11:
+ goto tr731
+ case 13:
+ goto tr732
+ case 32:
+ goto tr729
+ case 44:
+ goto tr733
+ case 61:
+ goto tr130
+ case 69:
+ goto st195
+ case 92:
+ goto st21
+ case 101:
+ goto st195
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st638
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr729
+ }
+ goto st15
+ st195:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof195
+ }
+ st_case_195:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr45
+ case 11:
+ goto tr59
+ case 13:
+ goto tr45
+ case 32:
+ goto tr58
+ case 34:
+ goto st196
+ case 44:
+ goto tr60
+ case 61:
+ goto tr45
+ case 92:
+ goto st21
+ }
+ switch {
+ case ( m.data)[( m.p)] < 43:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr58
+ }
+ case ( m.data)[( m.p)] > 45:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st479
+ }
+ default:
+ goto st196
+ }
+ goto st15
+ st196:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof196
+ }
+ st_case_196:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr45
+ case 11:
+ goto tr59
+ case 13:
+ goto tr45
+ case 32:
+ goto tr58
+ case 44:
+ goto tr60
+ case 61:
+ goto tr45
+ case 92:
+ goto st21
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st479
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr58
+ }
+ goto st15
+ st639:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof639
+ }
+ st_case_639:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr730
case 11:
- goto tr61
+ goto tr731
case 13:
- goto tr47
+ goto tr732
case 32:
- goto tr60
+ goto tr729
case 44:
- goto tr62
+ goto tr733
+ case 46:
+ goto st638
case 61:
- goto tr47
+ goto tr130
+ case 69:
+ goto st195
case 92:
- goto st23
+ goto st21
+ case 101:
+ goto st195
+ case 105:
+ goto st641
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st640
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr729
+ }
+ goto st15
+ st640:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof640
+ }
+ st_case_640:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr730
+ case 11:
+ goto tr731
+ case 13:
+ goto tr732
+ case 32:
+ goto tr729
+ case 44:
+ goto tr733
+ case 46:
+ goto st638
+ case 61:
+ goto tr130
+ case 69:
+ goto st195
+ case 92:
+ goto st21
+ case 101:
+ goto st195
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st640
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr729
+ }
+ goto st15
+ st641:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof641
+ }
+ st_case_641:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr942
+ case 11:
+ goto tr943
+ case 13:
+ goto tr944
+ case 32:
+ goto tr941
+ case 44:
+ goto tr945
+ case 61:
+ goto tr130
+ case 92:
+ goto st21
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr941
+ }
+ goto st15
+ st642:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof642
+ }
+ st_case_642:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr730
+ case 11:
+ goto tr731
+ case 13:
+ goto tr732
+ case 32:
+ goto tr729
+ case 44:
+ goto tr733
+ case 46:
+ goto st638
+ case 61:
+ goto tr130
+ case 69:
+ goto st195
+ case 92:
+ goto st21
+ case 101:
+ goto st195
+ case 105:
+ goto st641
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st642
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr729
+ }
+ goto st15
+tr140:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st643
+ st643:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof643
+ }
+ st_case_643:
+//line plugins/parsers/influx/machine.go:25364
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr730
+ case 11:
+ goto tr731
+ case 13:
+ goto tr732
+ case 32:
+ goto tr729
+ case 44:
+ goto tr733
+ case 46:
+ goto st638
+ case 61:
+ goto tr130
+ case 69:
+ goto st195
+ case 92:
+ goto st21
+ case 101:
+ goto st195
+ case 105:
+ goto st641
+ case 117:
+ goto st644
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st640
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr729
+ }
+ goto st15
+ st644:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof644
+ }
+ st_case_644:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr948
+ case 11:
+ goto tr949
+ case 13:
+ goto tr950
+ case 32:
+ goto tr947
+ case 44:
+ goto tr951
+ case 61:
+ goto tr130
+ case 92:
+ goto st21
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr947
+ }
+ goto st15
+tr141:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st645
+ st645:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof645
+ }
+ st_case_645:
+//line plugins/parsers/influx/machine.go:25436
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr730
+ case 11:
+ goto tr731
+ case 13:
+ goto tr732
+ case 32:
+ goto tr729
+ case 44:
+ goto tr733
+ case 46:
+ goto st638
+ case 61:
+ goto tr130
+ case 69:
+ goto st195
+ case 92:
+ goto st21
+ case 101:
+ goto st195
+ case 105:
+ goto st641
+ case 117:
+ goto st644
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st623
+ goto st645
}
case ( m.data)[( m.p)] >= 9:
+ goto tr729
+ }
+ goto st15
+tr142:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st646
+ st646:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof646
+ }
+ st_case_646:
+//line plugins/parsers/influx/machine.go:25483
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr954
+ case 11:
+ goto tr955
+ case 13:
+ goto tr956
+ case 32:
+ goto tr953
+ case 44:
+ goto tr957
+ case 61:
+ goto tr130
+ case 65:
+ goto st197
+ case 92:
+ goto st21
+ case 97:
+ goto st200
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr953
+ }
+ goto st15
+ st197:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof197
+ }
+ st_case_197:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr45
+ case 11:
+ goto tr59
+ case 13:
+ goto tr45
+ case 32:
+ goto tr58
+ case 44:
goto tr60
+ case 61:
+ goto tr45
+ case 76:
+ goto st198
+ case 92:
+ goto st21
}
- goto st17
- st623:
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr58
+ }
+ goto st15
+ st198:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof623
+ goto _test_eof198
}
- st_case_623:
+ st_case_198:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr45
case 11:
- goto tr716
+ goto tr59
case 13:
- goto tr717
+ goto tr45
case 32:
- goto tr712
+ goto tr58
case 44:
- goto tr718
+ goto tr60
case 61:
- goto tr132
- case 69:
- goto st189
+ goto tr45
+ case 83:
+ goto st199
case 92:
- goto st23
- case 101:
- goto st189
+ goto st21
}
- switch {
- case ( m.data)[( m.p)] > 12:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st623
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr712
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr58
}
- goto st17
- st189:
+ goto st15
+ st199:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof189
+ goto _test_eof199
}
- st_case_189:
+ st_case_199:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
- goto tr61
+ goto tr59
case 13:
- goto tr47
+ goto tr45
case 32:
- goto tr60
- case 34:
- goto st190
+ goto tr58
case 44:
- goto tr62
+ goto tr60
case 61:
- goto tr47
+ goto tr45
+ case 69:
+ goto st647
case 92:
- goto st23
+ goto st21
}
- switch {
- case ( m.data)[( m.p)] < 43:
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
- }
- case ( m.data)[( m.p)] > 45:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st469
- }
- default:
- goto st190
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr58
}
- goto st17
- st190:
+ goto st15
+ st647:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof190
+ goto _test_eof647
}
- st_case_190:
+ st_case_647:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr954
case 11:
- goto tr61
+ goto tr955
case 13:
- goto tr47
+ goto tr956
case 32:
- goto tr60
+ goto tr953
case 44:
- goto tr62
+ goto tr957
case 61:
- goto tr47
+ goto tr130
case 92:
- goto st23
+ goto st21
}
- switch {
- case ( m.data)[( m.p)] > 12:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st469
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr60
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr953
}
- goto st17
- st624:
+ goto st15
+ st200:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof624
+ goto _test_eof200
}
- st_case_624:
+ st_case_200:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr45
case 11:
- goto tr716
+ goto tr59
case 13:
- goto tr717
+ goto tr45
case 32:
- goto tr712
+ goto tr58
case 44:
- goto tr718
- case 46:
- goto st623
+ goto tr60
case 61:
- goto tr132
- case 69:
- goto st189
+ goto tr45
case 92:
- goto st23
- case 101:
- goto st189
- case 105:
- goto st626
+ goto st21
+ case 108:
+ goto st201
}
- switch {
- case ( m.data)[( m.p)] > 12:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st625
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr712
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr58
}
- goto st17
- st625:
+ goto st15
+ st201:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof625
+ goto _test_eof201
}
- st_case_625:
+ st_case_201:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr45
case 11:
- goto tr716
+ goto tr59
case 13:
- goto tr717
+ goto tr45
case 32:
- goto tr712
+ goto tr58
case 44:
- goto tr718
- case 46:
- goto st623
+ goto tr60
case 61:
- goto tr132
- case 69:
- goto st189
+ goto tr45
case 92:
- goto st23
- case 101:
- goto st189
+ goto st21
+ case 115:
+ goto st202
}
- switch {
- case ( m.data)[( m.p)] > 12:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st625
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr712
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr58
}
- goto st17
- st626:
+ goto st15
+ st202:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof626
+ goto _test_eof202
}
- st_case_626:
+ st_case_202:
switch ( m.data)[( m.p)] {
case 10:
- goto tr925
+ goto tr45
case 11:
- goto tr926
+ goto tr59
case 13:
- goto tr927
+ goto tr45
case 32:
- goto tr724
+ goto tr58
case 44:
- goto tr928
+ goto tr60
case 61:
- goto tr132
+ goto tr45
case 92:
- goto st23
+ goto st21
+ case 101:
+ goto st647
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr724
+ goto tr58
}
- goto st17
- st627:
+ goto st15
+tr143:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st648
+ st648:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof627
+ goto _test_eof648
}
- st_case_627:
+ st_case_648:
+//line plugins/parsers/influx/machine.go:25706
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr954
case 11:
- goto tr716
+ goto tr955
case 13:
- goto tr717
+ goto tr956
case 32:
- goto tr712
+ goto tr953
case 44:
- goto tr718
- case 46:
- goto st623
+ goto tr957
case 61:
- goto tr132
- case 69:
- goto st189
+ goto tr130
+ case 82:
+ goto st203
case 92:
- goto st23
- case 101:
- goto st189
- case 105:
- goto st626
+ goto st21
+ case 114:
+ goto st204
}
- switch {
- case ( m.data)[( m.p)] > 12:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st627
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr712
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr953
}
- goto st17
-tr142:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st628
- st628:
+ goto st15
+ st203:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof628
+ goto _test_eof203
}
- st_case_628:
-//line plugins/parsers/influx/machine.go:24910
+ st_case_203:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr45
case 11:
- goto tr716
+ goto tr59
case 13:
- goto tr717
+ goto tr45
case 32:
- goto tr712
+ goto tr58
case 44:
- goto tr718
- case 46:
- goto st623
+ goto tr60
case 61:
- goto tr132
- case 69:
- goto st189
+ goto tr45
+ case 85:
+ goto st199
case 92:
- goto st23
- case 101:
- goto st189
- case 105:
- goto st626
- case 117:
- goto st629
+ goto st21
}
- switch {
- case ( m.data)[( m.p)] > 12:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st625
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr712
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr58
}
- goto st17
- st629:
+ goto st15
+ st204:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof629
+ goto _test_eof204
}
- st_case_629:
+ st_case_204:
switch ( m.data)[( m.p)] {
case 10:
- goto tr930
+ goto tr45
case 11:
- goto tr931
+ goto tr59
case 13:
- goto tr932
+ goto tr45
case 32:
- goto tr731
+ goto tr58
case 44:
- goto tr933
+ goto tr60
case 61:
- goto tr132
+ goto tr45
case 92:
- goto st23
+ goto st21
+ case 117:
+ goto st202
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr731
+ goto tr58
}
- goto st17
-tr143:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st15
+tr144:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st630
- st630:
+ goto st649
+ st649:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof630
+ goto _test_eof649
}
- st_case_630:
-//line plugins/parsers/influx/machine.go:24982
+ st_case_649:
+//line plugins/parsers/influx/machine.go:25796
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr954
case 11:
- goto tr716
+ goto tr955
case 13:
- goto tr717
+ goto tr956
case 32:
- goto tr712
+ goto tr953
case 44:
- goto tr718
- case 46:
- goto st623
+ goto tr957
case 61:
- goto tr132
- case 69:
- goto st189
+ goto tr130
case 92:
- goto st23
- case 101:
- goto st189
- case 105:
- goto st626
- case 117:
- goto st629
+ goto st21
+ case 97:
+ goto st200
}
- switch {
- case ( m.data)[( m.p)] > 12:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st630
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr712
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr953
}
- goto st17
-tr144:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st15
+tr145:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st631
- st631:
+ goto st650
+ st650:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof631
+ goto _test_eof650
}
- st_case_631:
-//line plugins/parsers/influx/machine.go:25029
+ st_case_650:
+//line plugins/parsers/influx/machine.go:25830
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 11:
- goto tr936
+ goto tr955
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr738
+ goto tr953
case 44:
- goto tr938
+ goto tr957
case 61:
- goto tr132
- case 65:
- goto st191
+ goto tr130
case 92:
- goto st23
- case 97:
- goto st194
+ goto st21
+ case 114:
+ goto st204
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr738
+ goto tr953
}
- goto st17
- st191:
+ goto st15
+tr121:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st205
+tr380:
+ ( m.cs) = 205
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+//line plugins/parsers/influx/machine.go.rl:86
+
+ err = m.handler.SetMeasurement(m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+ goto _again
+ st205:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof191
+ goto _test_eof205
}
- st_case_191:
+ st_case_205:
+//line plugins/parsers/influx/machine.go:25881
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr28
case 11:
- goto tr61
+ goto tr380
case 13:
- goto tr47
+ goto st6
case 32:
- goto tr60
+ goto tr117
+ case 34:
+ goto tr122
case 44:
- goto tr62
+ goto tr90
case 61:
- goto tr47
- case 76:
- goto st192
+ goto tr381
case 92:
- goto st23
+ goto tr123
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto tr117
}
- goto st17
- st192:
+ goto tr119
+tr118:
+ ( m.cs) = 206
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+//line plugins/parsers/influx/machine.go.rl:86
+
+ err = m.handler.SetMeasurement(m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+ goto _again
+ st206:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof192
+ goto _test_eof206
}
- st_case_192:
+ st_case_206:
+//line plugins/parsers/influx/machine.go:25926
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr28
case 11:
- goto tr61
+ goto tr380
case 13:
- goto tr47
+ goto st6
case 32:
- goto tr60
+ goto tr117
+ case 34:
+ goto tr122
case 44:
- goto tr62
+ goto tr90
case 61:
- goto tr47
- case 83:
- goto st193
+ goto tr80
case 92:
- goto st23
+ goto tr123
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto tr117
}
- goto st17
- st193:
+ goto tr119
+tr497:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st207
+ st207:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof193
+ goto _test_eof207
}
- st_case_193:
+ st_case_207:
+//line plugins/parsers/influx/machine.go:25960
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
- case 11:
- goto tr61
- case 13:
- goto tr47
- case 32:
- goto tr60
- case 44:
- goto tr62
- case 61:
- goto tr47
- case 69:
- goto st632
+ goto tr28
+ case 34:
+ goto tr29
case 92:
- goto st23
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st651
}
- goto st17
- st632:
+ goto st6
+tr498:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st651
+ st651:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof632
+ goto _test_eof651
}
- st_case_632:
+ st_case_651:
+//line plugins/parsers/influx/machine.go:25984
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
- case 11:
- goto tr936
+ goto tr600
case 13:
- goto tr937
+ goto tr602
case 32:
- goto tr738
- case 44:
- goto tr938
- case 61:
- goto tr132
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr738
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st652
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st17
- st194:
+ goto st6
+ st652:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof194
+ goto _test_eof652
}
- st_case_194:
+ st_case_652:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
- case 11:
- goto tr61
+ goto tr600
case 13:
- goto tr47
+ goto tr602
case 32:
- goto tr60
- case 44:
- goto tr62
- case 61:
- goto tr47
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
- case 108:
- goto st195
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st653
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st17
- st195:
+ goto st6
+ st653:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof195
+ goto _test_eof653
}
- st_case_195:
+ st_case_653:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
- case 11:
- goto tr61
+ goto tr600
case 13:
- goto tr47
+ goto tr602
case 32:
- goto tr60
- case 44:
- goto tr62
- case 61:
- goto tr47
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
- case 115:
- goto st196
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st654
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st17
- st196:
+ goto st6
+ st654:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof196
+ goto _test_eof654
}
- st_case_196:
+ st_case_654:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
- case 11:
- goto tr61
+ goto tr600
case 13:
- goto tr47
+ goto tr602
case 32:
- goto tr60
- case 44:
- goto tr62
- case 61:
- goto tr47
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
- case 101:
- goto st632
- }
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ goto st73
}
- goto st17
-tr145:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st633
- st633:
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st655
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
+ }
+ goto st6
+ st655:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof633
+ goto _test_eof655
}
- st_case_633:
-//line plugins/parsers/influx/machine.go:25252
+ st_case_655:
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
- case 11:
- goto tr936
+ goto tr600
case 13:
- goto tr937
+ goto tr602
case 32:
- goto tr738
- case 44:
- goto tr938
- case 61:
- goto tr132
- case 82:
- goto st197
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
- case 114:
- goto st198
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr738
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st656
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st17
- st197:
+ goto st6
+ st656:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof197
+ goto _test_eof656
}
- st_case_197:
+ st_case_656:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
- case 11:
- goto tr61
+ goto tr600
case 13:
- goto tr47
+ goto tr602
case 32:
- goto tr60
- case 44:
- goto tr62
- case 61:
- goto tr47
- case 85:
- goto st193
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st657
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st17
- st198:
+ goto st6
+ st657:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof198
+ goto _test_eof657
}
- st_case_198:
+ st_case_657:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
- case 11:
- goto tr61
+ goto tr600
case 13:
- goto tr47
+ goto tr602
case 32:
- goto tr60
- case 44:
- goto tr62
- case 61:
- goto tr47
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
- case 117:
- goto st196
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr60
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st658
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st17
-tr146:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st634
- st634:
+ goto st6
+ st658:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof634
+ goto _test_eof658
}
- st_case_634:
-//line plugins/parsers/influx/machine.go:25342
+ st_case_658:
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
- case 11:
- goto tr936
+ goto tr600
case 13:
- goto tr937
+ goto tr602
case 32:
- goto tr738
- case 44:
- goto tr938
- case 61:
- goto tr132
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
- case 97:
- goto st194
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr738
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st659
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st17
-tr147:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st635
- st635:
+ goto st6
+ st659:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof635
+ goto _test_eof659
}
- st_case_635:
-//line plugins/parsers/influx/machine.go:25376
+ st_case_659:
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
- case 11:
- goto tr936
+ goto tr600
case 13:
- goto tr937
+ goto tr602
case 32:
- goto tr738
- case 44:
- goto tr938
- case 61:
- goto tr132
+ goto tr599
+ case 34:
+ goto tr29
case 92:
- goto st23
- case 114:
- goto st198
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr738
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st660
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
- goto st17
-tr123:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st199
-tr373:
- ( m.cs) = 199
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
-//line plugins/parsers/influx/machine.go.rl:77
-
- err = m.handler.SetMeasurement(m.text())
- if err != nil {
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; goto _out }
- }
-
- goto _again
- st199:
+ goto st6
+ st660:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof199
+ goto _test_eof660
}
- st_case_199:
-//line plugins/parsers/influx/machine.go:25427
+ st_case_660:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr119
case 10:
- goto st7
- case 11:
- goto tr373
- case 12:
- goto tr38
+ goto tr600
case 13:
- goto st8
+ goto tr602
case 32:
- goto tr119
+ goto tr599
case 34:
- goto tr124
- case 44:
- goto tr92
- case 61:
- goto tr374
+ goto tr29
case 92:
- goto tr125
+ goto st73
}
- goto tr121
-tr120:
- ( m.cs) = 200
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
-//line plugins/parsers/influx/machine.go.rl:77
-
- err = m.handler.SetMeasurement(m.text())
- if err != nil {
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; goto _out }
- }
-
- goto _again
- st200:
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st661
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
+ }
+ goto st6
+ st661:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof200
+ goto _test_eof661
}
- st_case_200:
-//line plugins/parsers/influx/machine.go:25473
+ st_case_661:
switch ( m.data)[( m.p)] {
- case 9:
- goto tr119
case 10:
- goto st7
- case 11:
- goto tr373
- case 12:
- goto tr38
+ goto tr600
case 13:
- goto st8
+ goto tr602
case 32:
- goto tr119
+ goto tr599
case 34:
- goto tr124
- case 44:
- goto tr92
- case 61:
- goto tr82
+ goto tr29
case 92:
- goto tr125
+ goto st73
}
- goto tr121
-tr480:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st201
- st201:
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st662
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
+ }
+ goto st6
+ st662:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof201
+ goto _test_eof662
}
- st_case_201:
-//line plugins/parsers/influx/machine.go:25508
+ st_case_662:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr105
+ goto tr600
case 13:
- goto st8
+ goto tr602
+ case 32:
+ goto tr599
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st636
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st663
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr599
}
goto st6
-tr481:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st636
- st636:
+ st663:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof636
+ goto _test_eof663
}
- st_case_636:
-//line plugins/parsers/influx/machine.go:25536
+ st_case_663:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr600
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st637
+ goto st664
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr599
}
goto st6
- st637:
+ st664:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof637
+ goto _test_eof664
}
- st_case_637:
+ st_case_664:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr600
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st638
+ goto st665
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr599
}
goto st6
- st638:
+ st665:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof638
+ goto _test_eof665
}
- st_case_638:
+ st_case_665:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr600
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st639
+ goto st666
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr599
}
goto st6
- st639:
+ st666:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof639
+ goto _test_eof666
}
- st_case_639:
+ st_case_666:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr600
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st640
+ goto st667
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr599
}
goto st6
- st640:
+ st667:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof640
+ goto _test_eof667
}
- st_case_640:
+ st_case_667:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr600
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st641
+ goto st668
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr599
}
goto st6
- st641:
+ st668:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof641
+ goto _test_eof668
}
- st_case_641:
+ st_case_668:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr600
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st642
+ goto st669
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr599
}
goto st6
- st642:
+ st669:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof642
+ goto _test_eof669
}
- st_case_642:
+ st_case_669:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr600
case 13:
- goto tr586
+ goto tr602
case 32:
- goto tr583
+ goto tr599
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st643
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr583
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr599
}
goto st6
- st643:
+tr494:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st208
+tr981:
+ ( m.cs) = 208
+//line plugins/parsers/influx/machine.go.rl:130
+
+ err = m.handler.AddFloat(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+ goto _again
+tr986:
+ ( m.cs) = 208
+//line plugins/parsers/influx/machine.go.rl:112
+
+ err = m.handler.AddInt(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+ goto _again
+tr989:
+ ( m.cs) = 208
+//line plugins/parsers/influx/machine.go.rl:121
+
+ err = m.handler.AddUint(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+ goto _again
+tr992:
+ ( m.cs) = 208
+//line plugins/parsers/influx/machine.go.rl:139
+
+ err = m.handler.AddBool(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; goto _out }
+ }
+
+ goto _again
+ st208:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof643
+ goto _test_eof208
}
- st_case_643:
+ st_case_208:
+//line plugins/parsers/influx/machine.go:26532
switch ( m.data)[( m.p)] {
+ case 9:
+ goto st6
case 10:
- goto tr584
- case 12:
- goto tr450
- case 13:
- goto tr586
+ goto tr28
case 32:
- goto tr583
+ goto st6
case 34:
- goto tr31
+ goto tr384
+ case 44:
+ goto st6
+ case 61:
+ goto st6
case 92:
- goto st76
+ goto tr385
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st644
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr583
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
}
- goto st6
- st644:
+ goto tr383
+tr383:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st209
+ st209:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof644
+ goto _test_eof209
}
- st_case_644:
+ st_case_209:
+//line plugins/parsers/influx/machine.go:26564
switch ( m.data)[( m.p)] {
+ case 9:
+ goto st6
case 10:
- goto tr584
- case 12:
- goto tr450
- case 13:
- goto tr586
+ goto tr28
case 32:
- goto tr583
+ goto st6
case 34:
- goto tr31
+ goto tr98
+ case 44:
+ goto st6
+ case 61:
+ goto tr387
case 92:
- goto st76
+ goto st223
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st645
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr583
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto st6
}
- goto st6
- st645:
+ goto st209
+tr387:
+//line plugins/parsers/influx/machine.go.rl:108
+
+ m.key = m.text()
+
+ goto st210
+ st210:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof645
+ goto _test_eof210
}
- st_case_645:
+ st_case_210:
+//line plugins/parsers/influx/machine.go:26596
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
- case 13:
- goto tr586
- case 32:
- goto tr583
+ goto tr28
case 34:
- goto tr31
+ goto tr351
+ case 45:
+ goto tr389
+ case 46:
+ goto tr390
+ case 48:
+ goto tr391
+ case 70:
+ goto tr110
+ case 84:
+ goto tr111
case 92:
- goto st76
+ goto st73
+ case 102:
+ goto tr112
+ case 116:
+ goto tr113
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st646
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr583
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto tr392
}
goto st6
- st646:
+tr389:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st211
+ st211:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof646
+ goto _test_eof211
}
- st_case_646:
+ st_case_211:
+//line plugins/parsers/influx/machine.go:26634
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
- case 13:
- goto tr586
- case 32:
- goto tr583
+ goto tr28
case 34:
- goto tr31
+ goto tr29
+ case 46:
+ goto st212
+ case 48:
+ goto st672
case 92:
- goto st76
+ goto st73
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st647
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr583
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st675
}
goto st6
- st647:
+tr390:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st212
+ st212:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof647
+ goto _test_eof212
}
- st_case_647:
+ st_case_212:
+//line plugins/parsers/influx/machine.go:26662
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
- case 13:
- goto tr586
- case 32:
- goto tr583
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st648
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr583
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st670
}
goto st6
- st648:
+ st670:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof648
+ goto _test_eof670
}
- st_case_648:
+ st_case_670:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr758
case 13:
- goto tr586
+ goto tr638
case 32:
- goto tr583
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
+ case 69:
+ goto st213
case 92:
- goto st76
+ goto st73
+ case 101:
+ goto st213
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st649
+ goto st670
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr980
}
goto st6
- st649:
+ st213:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof649
+ goto _test_eof213
}
- st_case_649:
+ st_case_213:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
- case 13:
- goto tr586
- case 32:
- goto tr583
+ goto tr28
case 34:
- goto tr31
+ goto tr356
+ case 43:
+ goto st214
+ case 45:
+ goto st214
case 92:
- goto st76
+ goto st73
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st650
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr583
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st671
}
goto st6
- st650:
+ st214:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof650
+ goto _test_eof214
}
- st_case_650:
+ st_case_214:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
- case 13:
- goto tr586
- case 32:
- goto tr583
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
+ goto st73
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st651
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr583
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st671
}
goto st6
- st651:
+ st671:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof651
+ goto _test_eof671
}
- st_case_651:
+ st_case_671:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr758
case 13:
- goto tr586
+ goto tr638
case 32:
- goto tr583
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
case 92:
- goto st76
+ goto st73
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st652
+ goto st671
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr980
}
goto st6
- st652:
+ st672:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof652
+ goto _test_eof672
}
- st_case_652:
+ st_case_672:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr758
case 13:
- goto tr586
+ goto tr638
case 32:
- goto tr583
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
+ case 46:
+ goto st670
+ case 69:
+ goto st213
case 92:
- goto st76
+ goto st73
+ case 101:
+ goto st213
+ case 105:
+ goto st674
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st653
+ goto st673
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr980
}
goto st6
- st653:
+ st673:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof653
+ goto _test_eof673
}
- st_case_653:
+ st_case_673:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr758
case 13:
- goto tr586
+ goto tr638
case 32:
- goto tr583
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
+ case 46:
+ goto st670
+ case 69:
+ goto st213
case 92:
- goto st76
+ goto st73
+ case 101:
+ goto st213
}
switch {
- case ( m.data)[( m.p)] > 11:
+ case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st654
+ goto st673
}
case ( m.data)[( m.p)] >= 9:
- goto tr583
+ goto tr980
}
goto st6
- st654:
+ st674:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof654
+ goto _test_eof674
}
- st_case_654:
+ st_case_674:
switch ( m.data)[( m.p)] {
case 10:
- goto tr584
- case 12:
- goto tr450
+ goto tr791
case 13:
- goto tr586
+ goto tr793
case 32:
- goto tr583
+ goto tr985
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr986
case 92:
- goto st76
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr583
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr985
}
goto st6
-tr477:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st202
-tr962:
- ( m.cs) = 202
-//line plugins/parsers/influx/machine.go.rl:121
-
- err = m.handler.AddFloat(key, m.text())
- if err != nil {
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; goto _out }
- }
-
- goto _again
-tr967:
- ( m.cs) = 202
-//line plugins/parsers/influx/machine.go.rl:103
-
- err = m.handler.AddInt(key, m.text())
- if err != nil {
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; goto _out }
- }
-
- goto _again
-tr970:
- ( m.cs) = 202
-//line plugins/parsers/influx/machine.go.rl:112
-
- err = m.handler.AddUint(key, m.text())
- if err != nil {
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; goto _out }
- }
-
- goto _again
-tr973:
- ( m.cs) = 202
-//line plugins/parsers/influx/machine.go.rl:130
-
- err = m.handler.AddBool(key, m.text())
- if err != nil {
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; goto _out }
- }
-
- goto _again
- st202:
+ st675:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof202
+ goto _test_eof675
}
- st_case_202:
-//line plugins/parsers/influx/machine.go:26122
+ st_case_675:
switch ( m.data)[( m.p)] {
- case 9:
- goto st6
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr758
case 13:
- goto st8
+ goto tr638
case 32:
- goto st6
+ goto tr980
case 34:
- goto tr377
+ goto tr29
case 44:
- goto st6
- case 61:
- goto st6
+ goto tr981
+ case 46:
+ goto st670
+ case 69:
+ goto st213
case 92:
- goto tr378
+ goto st73
+ case 101:
+ goto st213
+ case 105:
+ goto st674
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st675
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
}
- goto tr376
-tr376:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st6
+tr391:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st203
- st203:
+ goto st676
+ st676:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof203
+ goto _test_eof676
}
- st_case_203:
-//line plugins/parsers/influx/machine.go:26155
+ st_case_676:
+//line plugins/parsers/influx/machine.go:26913
switch ( m.data)[( m.p)] {
- case 9:
- goto st6
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr758
case 13:
- goto st8
+ goto tr638
case 32:
- goto st6
+ goto tr980
case 34:
- goto tr100
+ goto tr29
case 44:
- goto st6
- case 61:
- goto tr380
+ goto tr981
+ case 46:
+ goto st670
+ case 69:
+ goto st213
case 92:
- goto st217
+ goto st73
+ case 101:
+ goto st213
+ case 105:
+ goto st674
+ case 117:
+ goto st677
}
- goto st203
-tr380:
-//line plugins/parsers/influx/machine.go.rl:99
-
- key = m.text()
-
- goto st204
- st204:
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st673
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
+ }
+ goto st6
+ st677:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof204
+ goto _test_eof677
}
- st_case_204:
-//line plugins/parsers/influx/machine.go:26188
+ st_case_677:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr797
case 13:
- goto st8
+ goto tr799
+ case 32:
+ goto tr988
case 34:
- goto tr353
- case 45:
- goto tr108
- case 46:
- goto tr109
- case 48:
- goto tr110
- case 70:
- goto tr112
- case 84:
- goto tr113
+ goto tr29
+ case 44:
+ goto tr989
case 92:
- goto st76
- case 102:
- goto tr114
- case 116:
- goto tr115
+ goto st73
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto tr111
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr988
}
goto st6
-tr108:
-//line plugins/parsers/influx/machine.go.rl:19
+tr392:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st205
- st205:
+ goto st678
+ st678:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof205
+ goto _test_eof678
}
- st_case_205:
-//line plugins/parsers/influx/machine.go:26230
+ st_case_678:
+//line plugins/parsers/influx/machine.go:26981
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr758
case 13:
- goto st8
+ goto tr638
+ case 32:
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
case 46:
- goto st206
- case 48:
- goto st657
+ goto st670
+ case 69:
+ goto st213
case 92:
- goto st76
+ goto st73
+ case 101:
+ goto st213
+ case 105:
+ goto st674
+ case 117:
+ goto st677
}
- if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st660
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st678
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
}
goto st6
-tr109:
-//line plugins/parsers/influx/machine.go.rl:19
+tr110:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st206
- st206:
+ goto st679
+ st679:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof206
+ goto _test_eof679
}
- st_case_206:
-//line plugins/parsers/influx/machine.go:26262
+ st_case_679:
+//line plugins/parsers/influx/machine.go:27026
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr803
case 13:
- goto st8
+ goto tr805
+ case 32:
+ goto tr991
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr992
+ case 65:
+ goto st215
case 92:
- goto st76
+ goto st73
+ case 97:
+ goto st218
}
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st655
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr991
}
goto st6
- st655:
+ st215:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof655
+ goto _test_eof215
}
- st_case_655:
+ st_case_215:
switch ( m.data)[( m.p)] {
case 10:
- goto tr620
- case 12:
- goto tr516
- case 13:
- goto tr623
- case 32:
- goto tr961
+ goto tr28
case 34:
- goto tr31
- case 44:
- goto tr962
- case 69:
- goto st207
+ goto tr29
+ case 76:
+ goto st216
case 92:
- goto st76
- case 101:
- goto st207
- }
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st655
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr961
+ goto st73
}
goto st6
- st207:
+ st216:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof207
+ goto _test_eof216
}
- st_case_207:
+ st_case_216:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr354
- case 43:
- goto st208
- case 45:
- goto st208
+ goto tr29
+ case 83:
+ goto st217
case 92:
- goto st76
- }
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st656
+ goto st73
}
goto st6
- st208:
+ st217:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof208
+ goto _test_eof217
}
- st_case_208:
+ st_case_217:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
+ case 69:
+ goto st680
case 92:
- goto st76
- }
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st656
+ goto st73
}
goto st6
- st656:
+ st680:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof656
+ goto _test_eof680
}
- st_case_656:
+ st_case_680:
switch ( m.data)[( m.p)] {
case 10:
- goto tr620
- case 12:
- goto tr516
+ goto tr803
case 13:
- goto tr623
+ goto tr805
case 32:
- goto tr961
+ goto tr991
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr962
+ goto tr992
case 92:
- goto st76
+ goto st73
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st656
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr961
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr991
}
goto st6
- st657:
+ st218:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof657
+ goto _test_eof218
}
- st_case_657:
+ st_case_218:
switch ( m.data)[( m.p)] {
case 10:
- goto tr620
- case 12:
- goto tr516
- case 13:
- goto tr623
- case 32:
- goto tr961
+ goto tr28
case 34:
- goto tr31
- case 44:
- goto tr962
- case 46:
- goto st655
- case 69:
- goto st207
+ goto tr29
case 92:
- goto st76
- case 101:
- goto st207
- case 105:
- goto st659
+ goto st73
+ case 108:
+ goto st219
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st658
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr961
+ goto st6
+ st219:
+ if ( m.p)++; ( m.p) == ( m.pe) {
+ goto _test_eof219
+ }
+ st_case_219:
+ switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr28
+ case 34:
+ goto tr29
+ case 92:
+ goto st73
+ case 115:
+ goto st220
}
goto st6
- st658:
+ st220:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof658
+ goto _test_eof220
}
- st_case_658:
+ st_case_220:
switch ( m.data)[( m.p)] {
case 10:
- goto tr620
- case 12:
- goto tr516
- case 13:
- goto tr623
- case 32:
- goto tr961
+ goto tr28
case 34:
- goto tr31
- case 44:
- goto tr962
- case 46:
- goto st655
- case 69:
- goto st207
+ goto tr29
case 92:
- goto st76
+ goto st73
case 101:
- goto st207
- }
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st658
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr961
+ goto st680
}
goto st6
- st659:
+tr111:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st681
+ st681:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof659
+ goto _test_eof681
}
- st_case_659:
+ st_case_681:
+//line plugins/parsers/influx/machine.go:27179
switch ( m.data)[( m.p)] {
case 10:
- goto tr778
- case 12:
- goto tr909
+ goto tr803
case 13:
- goto tr780
+ goto tr805
case 32:
- goto tr966
+ goto tr991
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr967
+ goto tr992
+ case 82:
+ goto st221
case 92:
- goto st76
+ goto st73
+ case 114:
+ goto st222
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr966
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr991
}
goto st6
- st660:
+ st221:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof660
+ goto _test_eof221
}
- st_case_660:
+ st_case_221:
switch ( m.data)[( m.p)] {
case 10:
- goto tr620
- case 12:
- goto tr516
- case 13:
- goto tr623
- case 32:
- goto tr961
+ goto tr28
case 34:
- goto tr31
- case 44:
- goto tr962
- case 46:
- goto st655
- case 69:
- goto st207
+ goto tr29
+ case 85:
+ goto st217
case 92:
- goto st76
- case 101:
- goto st207
- case 105:
- goto st659
- }
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st660
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr961
+ goto st73
}
goto st6
-tr110:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st661
- st661:
+ st222:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof661
+ goto _test_eof222
}
- st_case_661:
-//line plugins/parsers/influx/machine.go:26537
+ st_case_222:
switch ( m.data)[( m.p)] {
case 10:
- goto tr620
- case 12:
- goto tr516
- case 13:
- goto tr623
- case 32:
- goto tr961
+ goto tr28
case 34:
- goto tr31
- case 44:
- goto tr962
- case 46:
- goto st655
- case 69:
- goto st207
+ goto tr29
case 92:
- goto st76
- case 101:
- goto st207
- case 105:
- goto st659
+ goto st73
case 117:
- goto st662
- }
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st658
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr961
+ goto st220
}
goto st6
- st662:
+tr112:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st682
+ st682:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof662
+ goto _test_eof682
}
- st_case_662:
+ st_case_682:
+//line plugins/parsers/influx/machine.go:27245
switch ( m.data)[( m.p)] {
case 10:
- goto tr784
- case 12:
- goto tr912
+ goto tr803
case 13:
- goto tr786
+ goto tr805
case 32:
- goto tr969
+ goto tr991
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr970
+ goto tr992
case 92:
- goto st76
+ goto st73
+ case 97:
+ goto st218
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr969
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr991
}
goto st6
-tr111:
-//line plugins/parsers/influx/machine.go.rl:19
+tr113:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st663
- st663:
+ goto st683
+ st683:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof663
+ goto _test_eof683
}
- st_case_663:
-//line plugins/parsers/influx/machine.go:26609
+ st_case_683:
+//line plugins/parsers/influx/machine.go:27277
switch ( m.data)[( m.p)] {
case 10:
- goto tr620
- case 12:
- goto tr516
+ goto tr803
case 13:
- goto tr623
+ goto tr805
case 32:
- goto tr961
+ goto tr991
case 34:
- goto tr31
- case 44:
- goto tr962
- case 46:
- goto st655
- case 69:
- goto st207
+ goto tr29
+ case 44:
+ goto tr992
case 92:
- goto st76
- case 101:
- goto st207
- case 105:
- goto st659
- case 117:
- goto st662
+ goto st73
+ case 114:
+ goto st222
}
- switch {
- case ( m.data)[( m.p)] > 11:
- if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st663
- }
- case ( m.data)[( m.p)] >= 9:
- goto tr961
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr991
}
goto st6
-tr112:
-//line plugins/parsers/influx/machine.go.rl:19
+tr385:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st664
- st664:
+ goto st223
+ st223:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof664
+ goto _test_eof223
}
- st_case_664:
-//line plugins/parsers/influx/machine.go:26656
+ st_case_223:
+//line plugins/parsers/influx/machine.go:27309
switch ( m.data)[( m.p)] {
- case 10:
- goto tr790
- case 12:
- goto tr916
- case 13:
- goto tr792
- case 32:
- goto tr972
case 34:
- goto tr31
- case 44:
- goto tr973
- case 65:
goto st209
case 92:
- goto st76
- case 97:
- goto st212
+ goto st209
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr972
+ switch {
+ case ( m.data)[( m.p)] > 10:
+ if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
+ goto tr8
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr8
}
- goto st6
- st209:
+ goto st3
+tr106:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st224
+ st224:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof209
+ goto _test_eof224
}
- st_case_209:
+ st_case_224:
+//line plugins/parsers/influx/machine.go:27336
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
- case 76:
- goto st210
+ goto tr29
+ case 46:
+ goto st225
+ case 48:
+ goto st686
case 92:
- goto st76
+ goto st73
+ }
+ if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st689
}
goto st6
- st210:
+tr107:
+//line plugins/parsers/influx/machine.go.rl:28
+
+ m.pb = m.p
+
+ goto st225
+ st225:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof210
+ goto _test_eof225
}
- st_case_210:
+ st_case_225:
+//line plugins/parsers/influx/machine.go:27364
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
- case 83:
- goto st211
+ goto tr29
case 92:
- goto st76
+ goto st73
+ }
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st684
}
goto st6
- st211:
+ st684:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof211
+ goto _test_eof684
}
- st_case_211:
+ st_case_684:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr636
case 13:
- goto st8
+ goto tr638
+ case 32:
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
case 69:
- goto st665
+ goto st226
case 92:
- goto st76
+ goto st73
+ case 101:
+ goto st226
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st684
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
}
goto st6
- st665:
+ st226:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof665
+ goto _test_eof226
}
- st_case_665:
+ st_case_226:
switch ( m.data)[( m.p)] {
case 10:
- goto tr790
- case 12:
- goto tr916
- case 13:
- goto tr792
- case 32:
- goto tr972
+ goto tr28
case 34:
- goto tr31
- case 44:
- goto tr973
+ goto tr356
+ case 43:
+ goto st227
+ case 45:
+ goto st227
case 92:
- goto st76
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr972
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st685
}
goto st6
- st212:
+ st227:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof212
+ goto _test_eof227
}
- st_case_212:
+ st_case_227:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
- case 13:
- goto st8
+ goto tr28
case 34:
- goto tr31
+ goto tr29
case 92:
- goto st76
- case 108:
- goto st213
+ goto st73
+ }
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st685
}
goto st6
- st213:
+ st685:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof213
+ goto _test_eof685
}
- st_case_213:
+ st_case_685:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr636
case 13:
- goto st8
+ goto tr638
+ case 32:
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
case 92:
- goto st76
- case 115:
- goto st214
+ goto st73
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st685
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
}
goto st6
- st214:
+ st686:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof214
+ goto _test_eof686
}
- st_case_214:
+ st_case_686:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr636
case 13:
- goto st8
+ goto tr638
+ case 32:
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
+ case 46:
+ goto st684
+ case 69:
+ goto st226
case 92:
- goto st76
+ goto st73
case 101:
- goto st665
+ goto st226
+ case 105:
+ goto st688
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st687
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
}
goto st6
-tr113:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st666
- st666:
+ st687:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof666
+ goto _test_eof687
}
- st_case_666:
-//line plugins/parsers/influx/machine.go:26837
+ st_case_687:
switch ( m.data)[( m.p)] {
case 10:
- goto tr790
- case 12:
- goto tr916
+ goto tr636
case 13:
- goto tr792
+ goto tr638
case 32:
- goto tr972
+ goto tr980
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr973
- case 82:
- goto st215
+ goto tr981
+ case 46:
+ goto st684
+ case 69:
+ goto st226
case 92:
- goto st76
- case 114:
- goto st216
+ goto st73
+ case 101:
+ goto st226
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr972
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st687
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
}
goto st6
- st215:
+ st688:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof215
+ goto _test_eof688
}
- st_case_215:
+ st_case_688:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr817
case 13:
- goto st8
+ goto tr793
+ case 32:
+ goto tr985
case 34:
- goto tr31
- case 85:
- goto st211
+ goto tr29
+ case 44:
+ goto tr986
case 92:
- goto st76
+ goto st73
+ }
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr985
}
goto st6
- st216:
+ st689:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof216
+ goto _test_eof689
}
- st_case_216:
+ st_case_689:
switch ( m.data)[( m.p)] {
case 10:
- goto st7
- case 12:
- goto tr8
+ goto tr636
case 13:
- goto st8
+ goto tr638
+ case 32:
+ goto tr980
case 34:
- goto tr31
+ goto tr29
+ case 44:
+ goto tr981
+ case 46:
+ goto st684
+ case 69:
+ goto st226
case 92:
- goto st76
- case 117:
- goto st214
+ goto st73
+ case 101:
+ goto st226
+ case 105:
+ goto st688
+ }
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st689
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
}
goto st6
-tr114:
-//line plugins/parsers/influx/machine.go.rl:19
+tr108:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st667
- st667:
+ goto st690
+ st690:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof667
+ goto _test_eof690
}
- st_case_667:
-//line plugins/parsers/influx/machine.go:26913
+ st_case_690:
+//line plugins/parsers/influx/machine.go:27615
switch ( m.data)[( m.p)] {
case 10:
- goto tr790
- case 12:
- goto tr916
+ goto tr636
case 13:
- goto tr792
+ goto tr638
case 32:
- goto tr972
+ goto tr980
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr973
+ goto tr981
+ case 46:
+ goto st684
+ case 69:
+ goto st226
case 92:
- goto st76
- case 97:
- goto st212
+ goto st73
+ case 101:
+ goto st226
+ case 105:
+ goto st688
+ case 117:
+ goto st691
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr972
+ switch {
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st687
+ }
+ case ( m.data)[( m.p)] >= 9:
+ goto tr980
}
goto st6
-tr115:
-//line plugins/parsers/influx/machine.go.rl:19
-
- m.pb = m.p
-
- goto st668
- st668:
+ st691:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof668
+ goto _test_eof691
}
- st_case_668:
-//line plugins/parsers/influx/machine.go:26947
+ st_case_691:
switch ( m.data)[( m.p)] {
case 10:
- goto tr790
- case 12:
- goto tr916
+ goto tr822
case 13:
- goto tr792
+ goto tr799
case 32:
- goto tr972
+ goto tr988
case 34:
- goto tr31
+ goto tr29
case 44:
- goto tr973
+ goto tr989
case 92:
- goto st76
- case 114:
- goto st216
+ goto st73
}
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 {
- goto tr972
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto tr988
}
goto st6
-tr378:
-//line plugins/parsers/influx/machine.go.rl:19
+tr109:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st217
- st217:
+ goto st692
+ st692:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof217
+ goto _test_eof692
}
- st_case_217:
-//line plugins/parsers/influx/machine.go:26981
+ st_case_692:
+//line plugins/parsers/influx/machine.go:27683
switch ( m.data)[( m.p)] {
+ case 10:
+ goto tr636
+ case 13:
+ goto tr638
+ case 32:
+ goto tr980
case 34:
- goto st203
+ goto tr29
+ case 44:
+ goto tr981
+ case 46:
+ goto st684
+ case 69:
+ goto st226
case 92:
- goto st203
+ goto st73
+ case 101:
+ goto st226
+ case 105:
+ goto st688
+ case 117:
+ goto st691
}
switch {
- case ( m.data)[( m.p)] > 10:
- if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr8
+ case ( m.data)[( m.p)] > 12:
+ if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
+ goto st692
}
case ( m.data)[( m.p)] >= 9:
- goto tr8
+ goto tr980
}
- goto st3
-tr96:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st6
+tr94:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st218
- st218:
+ goto st228
+ st228:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof218
+ goto _test_eof228
}
- st_case_218:
-//line plugins/parsers/influx/machine.go:27008
+ st_case_228:
+//line plugins/parsers/influx/machine.go:27728
switch ( m.data)[( m.p)] {
- case 9:
- goto st32
case 10:
- goto st7
+ goto tr28
case 11:
- goto tr96
- case 12:
- goto st2
+ goto tr94
case 13:
- goto st8
+ goto st6
case 32:
- goto st32
+ goto st30
case 34:
- goto tr97
+ goto tr95
case 44:
goto st6
case 61:
- goto tr101
+ goto tr99
case 92:
- goto tr98
+ goto tr96
}
- goto tr94
-tr74:
-//line plugins/parsers/influx/machine.go.rl:19
+ if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
+ goto st30
+ }
+ goto tr92
+tr72:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st219
- st219:
+ goto st229
+ st229:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof219
+ goto _test_eof229
}
- st_case_219:
-//line plugins/parsers/influx/machine.go:27043
+ st_case_229:
+//line plugins/parsers/influx/machine.go:27762
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 46:
- goto st220
+ goto st230
case 48:
- goto st670
+ goto st694
case 92:
- goto st96
+ goto st94
}
switch {
case ( m.data)[( m.p)] > 12:
if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st673
+ goto st697
}
case ( m.data)[( m.p)] >= 9:
goto tr1
}
goto st1
-tr75:
-//line plugins/parsers/influx/machine.go.rl:19
+tr73:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st220
- st220:
+ goto st230
+ st230:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof220
+ goto _test_eof230
}
- st_case_220:
-//line plugins/parsers/influx/machine.go:27082
+ st_case_230:
+//line plugins/parsers/influx/machine.go:27801
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 92:
- goto st96
+ goto st94
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st669
+ goto st693
}
case ( m.data)[( m.p)] >= 9:
goto tr1
}
goto st1
- st669:
+ st693:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof669
+ goto _test_eof693
}
- st_case_669:
+ st_case_693:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 11:
- goto tr798
+ goto tr812
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr622
+ goto tr811
case 44:
- goto tr799
+ goto tr813
case 69:
- goto st221
+ goto st231
case 92:
- goto st96
+ goto st94
case 101:
- goto st221
+ goto st231
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st669
+ goto st693
}
case ( m.data)[( m.p)] >= 9:
- goto tr622
+ goto tr811
}
goto st1
- st221:
+ st231:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof221
+ goto _test_eof231
}
- st_case_221:
+ st_case_231:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 34:
- goto st222
+ goto st232
case 44:
goto tr4
case 92:
- goto st96
+ goto st94
}
switch {
case ( m.data)[( m.p)] < 43:
@@ -27162,1690 +27881,1690 @@ tr75:
}
case ( m.data)[( m.p)] > 45:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st520
+ goto st530
}
default:
- goto st222
+ goto st232
}
goto st1
- st222:
+ st232:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof222
+ goto _test_eof232
}
- st_case_222:
+ st_case_232:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 92:
- goto st96
+ goto st94
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st520
+ goto st530
}
case ( m.data)[( m.p)] >= 9:
goto tr1
}
goto st1
- st670:
+ st694:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof670
+ goto _test_eof694
}
- st_case_670:
+ st_case_694:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 11:
- goto tr798
+ goto tr812
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr622
+ goto tr811
case 44:
- goto tr799
+ goto tr813
case 46:
- goto st669
+ goto st693
case 69:
- goto st221
+ goto st231
case 92:
- goto st96
+ goto st94
case 101:
- goto st221
+ goto st231
case 105:
- goto st672
+ goto st696
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st671
+ goto st695
}
case ( m.data)[( m.p)] >= 9:
- goto tr622
+ goto tr811
}
goto st1
- st671:
+ st695:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof671
+ goto _test_eof695
}
- st_case_671:
+ st_case_695:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 11:
- goto tr798
+ goto tr812
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr622
+ goto tr811
case 44:
- goto tr799
+ goto tr813
case 46:
- goto st669
+ goto st693
case 69:
- goto st221
+ goto st231
case 92:
- goto st96
+ goto st94
case 101:
- goto st221
+ goto st231
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st671
+ goto st695
}
case ( m.data)[( m.p)] >= 9:
- goto tr622
+ goto tr811
}
goto st1
- st672:
+ st696:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof672
+ goto _test_eof696
}
- st_case_672:
+ st_case_696:
switch ( m.data)[( m.p)] {
case 10:
- goto tr925
+ goto tr942
case 11:
- goto tr981
+ goto tr1006
case 13:
- goto tr927
+ goto tr944
case 32:
- goto tr804
+ goto tr1005
case 44:
- goto tr982
+ goto tr1007
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr804
+ goto tr1005
}
goto st1
- st673:
+ st697:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof673
+ goto _test_eof697
}
- st_case_673:
+ st_case_697:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 11:
- goto tr798
+ goto tr812
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr622
+ goto tr811
case 44:
- goto tr799
+ goto tr813
case 46:
- goto st669
+ goto st693
case 69:
- goto st221
+ goto st231
case 92:
- goto st96
+ goto st94
case 101:
- goto st221
+ goto st231
case 105:
- goto st672
+ goto st696
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st673
+ goto st697
}
case ( m.data)[( m.p)] >= 9:
- goto tr622
+ goto tr811
}
goto st1
-tr76:
-//line plugins/parsers/influx/machine.go.rl:19
+tr74:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st674
- st674:
+ goto st698
+ st698:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof674
+ goto _test_eof698
}
- st_case_674:
-//line plugins/parsers/influx/machine.go:27340
+ st_case_698:
+//line plugins/parsers/influx/machine.go:28059
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 11:
- goto tr798
+ goto tr812
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr622
+ goto tr811
case 44:
- goto tr799
+ goto tr813
case 46:
- goto st669
+ goto st693
case 69:
- goto st221
+ goto st231
case 92:
- goto st96
+ goto st94
case 101:
- goto st221
+ goto st231
case 105:
- goto st672
+ goto st696
case 117:
- goto st675
+ goto st699
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st671
+ goto st695
}
case ( m.data)[( m.p)] >= 9:
- goto tr622
+ goto tr811
}
goto st1
- st675:
+ st699:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof675
+ goto _test_eof699
}
- st_case_675:
+ st_case_699:
switch ( m.data)[( m.p)] {
case 10:
- goto tr930
+ goto tr948
case 11:
- goto tr984
+ goto tr1010
case 13:
- goto tr932
+ goto tr950
case 32:
- goto tr809
+ goto tr1009
case 44:
- goto tr985
+ goto tr1011
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr809
+ goto tr1009
}
goto st1
-tr77:
-//line plugins/parsers/influx/machine.go.rl:19
+tr75:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st676
- st676:
+ goto st700
+ st700:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof676
+ goto _test_eof700
}
- st_case_676:
-//line plugins/parsers/influx/machine.go:27408
+ st_case_700:
+//line plugins/parsers/influx/machine.go:28127
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 11:
- goto tr798
+ goto tr812
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr622
+ goto tr811
case 44:
- goto tr799
+ goto tr813
case 46:
- goto st669
+ goto st693
case 69:
- goto st221
+ goto st231
case 92:
- goto st96
+ goto st94
case 101:
- goto st221
+ goto st231
case 105:
- goto st672
+ goto st696
case 117:
- goto st675
+ goto st699
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st676
+ goto st700
}
case ( m.data)[( m.p)] >= 9:
- goto tr622
+ goto tr811
}
goto st1
-tr78:
-//line plugins/parsers/influx/machine.go.rl:19
+tr76:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st677
- st677:
+ goto st701
+ st701:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof677
+ goto _test_eof701
}
- st_case_677:
-//line plugins/parsers/influx/machine.go:27453
+ st_case_701:
+//line plugins/parsers/influx/machine.go:28172
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 11:
- goto tr987
+ goto tr1014
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr814
+ goto tr1013
case 44:
- goto tr988
+ goto tr1015
case 65:
- goto st223
+ goto st233
case 92:
- goto st96
+ goto st94
case 97:
- goto st226
+ goto st236
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr814
+ goto tr1013
}
goto st1
- st223:
+ st233:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof223
+ goto _test_eof233
}
- st_case_223:
+ st_case_233:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 76:
- goto st224
+ goto st234
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
goto st1
- st224:
+ st234:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof224
+ goto _test_eof234
}
- st_case_224:
+ st_case_234:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 83:
- goto st225
+ goto st235
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
goto st1
- st225:
+ st235:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof225
+ goto _test_eof235
}
- st_case_225:
+ st_case_235:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 69:
- goto st678
+ goto st702
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
goto st1
- st678:
+ st702:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof678
+ goto _test_eof702
}
- st_case_678:
+ st_case_702:
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 11:
- goto tr987
+ goto tr1014
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr814
+ goto tr1013
case 44:
- goto tr988
+ goto tr1015
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr814
+ goto tr1013
}
goto st1
- st226:
+ st236:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof226
+ goto _test_eof236
}
- st_case_226:
+ st_case_236:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 92:
- goto st96
+ goto st94
case 108:
- goto st227
+ goto st237
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
goto st1
- st227:
+ st237:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof227
+ goto _test_eof237
}
- st_case_227:
+ st_case_237:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 92:
- goto st96
+ goto st94
case 115:
- goto st228
+ goto st238
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
goto st1
- st228:
+ st238:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof228
+ goto _test_eof238
}
- st_case_228:
+ st_case_238:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 92:
- goto st96
+ goto st94
case 101:
- goto st678
+ goto st702
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
goto st1
-tr79:
-//line plugins/parsers/influx/machine.go.rl:19
+tr77:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st679
- st679:
+ goto st703
+ st703:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof679
+ goto _test_eof703
}
- st_case_679:
-//line plugins/parsers/influx/machine.go:27660
+ st_case_703:
+//line plugins/parsers/influx/machine.go:28379
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 11:
- goto tr987
+ goto tr1014
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr814
+ goto tr1013
case 44:
- goto tr988
+ goto tr1015
case 82:
- goto st229
+ goto st239
case 92:
- goto st96
+ goto st94
case 114:
- goto st230
+ goto st240
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr814
+ goto tr1013
}
goto st1
- st229:
+ st239:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof229
+ goto _test_eof239
}
- st_case_229:
+ st_case_239:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 85:
- goto st225
+ goto st235
case 92:
- goto st96
+ goto st94
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
goto st1
- st230:
+ st240:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof230
+ goto _test_eof240
}
- st_case_230:
+ st_case_240:
switch ( m.data)[( m.p)] {
case 10:
- goto tr47
+ goto tr45
case 11:
goto tr3
case 13:
- goto tr47
+ goto tr45
case 32:
goto tr1
case 44:
goto tr4
case 92:
- goto st96
+ goto st94
case 117:
- goto st228
+ goto st238
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
goto tr1
}
goto st1
-tr80:
-//line plugins/parsers/influx/machine.go.rl:19
+tr78:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st680
- st680:
+ goto st704
+ st704:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof680
+ goto _test_eof704
}
- st_case_680:
-//line plugins/parsers/influx/machine.go:27744
+ st_case_704:
+//line plugins/parsers/influx/machine.go:28463
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 11:
- goto tr987
+ goto tr1014
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr814
+ goto tr1013
case 44:
- goto tr988
+ goto tr1015
case 92:
- goto st96
+ goto st94
case 97:
- goto st226
+ goto st236
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr814
+ goto tr1013
}
goto st1
-tr81:
-//line plugins/parsers/influx/machine.go.rl:19
+tr79:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st681
- st681:
+ goto st705
+ st705:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof681
+ goto _test_eof705
}
- st_case_681:
-//line plugins/parsers/influx/machine.go:27776
+ st_case_705:
+//line plugins/parsers/influx/machine.go:28495
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 11:
- goto tr987
+ goto tr1014
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr814
+ goto tr1013
case 44:
- goto tr988
+ goto tr1015
case 92:
- goto st96
+ goto st94
case 114:
- goto st230
+ goto st240
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr814
+ goto tr1013
}
goto st1
-tr44:
-//line plugins/parsers/influx/machine.go.rl:19
+tr42:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st231
-tr405:
- ( m.cs) = 231
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st241
+tr422:
+ ( m.cs) = 241
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st231:
+ st241:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof231
+ goto _test_eof241
}
- st_case_231:
-//line plugins/parsers/influx/machine.go:27825
+ st_case_241:
+//line plugins/parsers/influx/machine.go:28544
switch ( m.data)[( m.p)] {
case 10:
- goto tr404
+ goto tr421
case 11:
- goto tr405
+ goto tr422
case 13:
- goto tr404
+ goto tr421
case 32:
- goto tr38
+ goto tr36
case 44:
goto tr4
case 61:
- goto tr406
+ goto tr423
case 92:
- goto tr45
+ goto tr43
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr38
+ goto tr36
}
- goto tr41
-tr40:
- ( m.cs) = 232
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr39
+tr38:
+ ( m.cs) = 242
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st232:
+ st242:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof232
+ goto _test_eof242
}
- st_case_232:
-//line plugins/parsers/influx/machine.go:27868
+ st_case_242:
+//line plugins/parsers/influx/machine.go:28587
switch ( m.data)[( m.p)] {
case 10:
- goto tr404
+ goto tr421
case 11:
- goto tr405
+ goto tr422
case 13:
- goto tr404
+ goto tr421
case 32:
- goto tr38
+ goto tr36
case 44:
goto tr4
case 61:
- goto tr33
+ goto tr31
case 92:
- goto tr45
+ goto tr43
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr38
+ goto tr36
}
- goto tr41
-tr445:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr39
+tr462:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st233
- st233:
+ goto st243
+ st243:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof233
+ goto _test_eof243
}
- st_case_233:
-//line plugins/parsers/influx/machine.go:27900
+ st_case_243:
+//line plugins/parsers/influx/machine.go:28619
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st682
+ goto st706
}
- goto tr407
-tr446:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr424
+tr463:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st682
- st682:
+ goto st706
+ st706:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof682
+ goto _test_eof706
}
- st_case_682:
-//line plugins/parsers/influx/machine.go:27916
+ st_case_706:
+//line plugins/parsers/influx/machine.go:28635
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st683
+ goto st707
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st683:
+ goto tr424
+ st707:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof683
+ goto _test_eof707
}
- st_case_683:
+ st_case_707:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st684
+ goto st708
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st684:
+ goto tr424
+ st708:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof684
+ goto _test_eof708
}
- st_case_684:
+ st_case_708:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st685
+ goto st709
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st685:
+ goto tr424
+ st709:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof685
+ goto _test_eof709
}
- st_case_685:
+ st_case_709:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st686
+ goto st710
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st686:
+ goto tr424
+ st710:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof686
+ goto _test_eof710
}
- st_case_686:
+ st_case_710:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st687
+ goto st711
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st687:
+ goto tr424
+ st711:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof687
+ goto _test_eof711
}
- st_case_687:
+ st_case_711:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st688
+ goto st712
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st688:
+ goto tr424
+ st712:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof688
+ goto _test_eof712
}
- st_case_688:
+ st_case_712:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st689
+ goto st713
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st689:
+ goto tr424
+ st713:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof689
+ goto _test_eof713
}
- st_case_689:
+ st_case_713:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st690
+ goto st714
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st690:
+ goto tr424
+ st714:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof690
+ goto _test_eof714
}
- st_case_690:
+ st_case_714:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st691
+ goto st715
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st691:
+ goto tr424
+ st715:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof691
+ goto _test_eof715
}
- st_case_691:
+ st_case_715:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st692
+ goto st716
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st692:
+ goto tr424
+ st716:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof692
+ goto _test_eof716
}
- st_case_692:
+ st_case_716:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st693
+ goto st717
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st693:
+ goto tr424
+ st717:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof693
+ goto _test_eof717
}
- st_case_693:
+ st_case_717:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st694
+ goto st718
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st694:
+ goto tr424
+ st718:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof694
+ goto _test_eof718
}
- st_case_694:
+ st_case_718:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st695
+ goto st719
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st695:
+ goto tr424
+ st719:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof695
+ goto _test_eof719
}
- st_case_695:
+ st_case_719:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st696
+ goto st720
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st696:
+ goto tr424
+ st720:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof696
+ goto _test_eof720
}
- st_case_696:
+ st_case_720:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st697
+ goto st721
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st697:
+ goto tr424
+ st721:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof697
+ goto _test_eof721
}
- st_case_697:
+ st_case_721:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st698
+ goto st722
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st698:
+ goto tr424
+ st722:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof698
+ goto _test_eof722
}
- st_case_698:
+ st_case_722:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st699
+ goto st723
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st699:
+ goto tr424
+ st723:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof699
+ goto _test_eof723
}
- st_case_699:
+ st_case_723:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st700
+ goto st724
}
case ( m.data)[( m.p)] >= 9:
- goto tr450
+ goto tr467
}
- goto tr407
- st700:
+ goto tr424
+ st724:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof700
+ goto _test_eof724
}
- st_case_700:
+ st_case_724:
switch ( m.data)[( m.p)] {
case 10:
- goto tr451
+ goto tr468
case 13:
- goto tr453
+ goto tr470
case 32:
- goto tr450
+ goto tr467
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr450
+ goto tr467
}
- goto tr407
+ goto tr424
tr15:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st234
- st234:
+ goto st244
+ st244:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof234
+ goto _test_eof244
}
- st_case_234:
-//line plugins/parsers/influx/machine.go:28336
+ st_case_244:
+//line plugins/parsers/influx/machine.go:29055
switch ( m.data)[( m.p)] {
case 46:
- goto st235
+ goto st245
case 48:
- goto st702
+ goto st726
}
if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st705
+ goto st729
}
goto tr8
tr16:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st235
- st235:
+ goto st245
+ st245:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof235
+ goto _test_eof245
}
- st_case_235:
-//line plugins/parsers/influx/machine.go:28358
+ st_case_245:
+//line plugins/parsers/influx/machine.go:29077
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st701
+ goto st725
}
goto tr8
- st701:
+ st725:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof701
+ goto _test_eof725
}
- st_case_701:
+ st_case_725:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr516
+ goto tr921
case 44:
- goto tr907
+ goto tr922
case 69:
- goto st236
+ goto st246
case 101:
- goto st236
+ goto st246
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st701
+ goto st725
}
case ( m.data)[( m.p)] >= 9:
- goto tr516
+ goto tr921
}
- goto tr105
- st236:
+ goto tr103
+ st246:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof236
+ goto _test_eof246
}
- st_case_236:
+ st_case_246:
switch ( m.data)[( m.p)] {
case 34:
- goto st237
+ goto st247
case 43:
- goto st237
+ goto st247
case 45:
- goto st237
+ goto st247
}
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st611
+ goto st621
}
goto tr8
- st237:
+ st247:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof237
+ goto _test_eof247
}
- st_case_237:
+ st_case_247:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st611
+ goto st621
}
goto tr8
- st702:
+ st726:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof702
+ goto _test_eof726
}
- st_case_702:
+ st_case_726:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr516
+ goto tr921
case 44:
- goto tr907
+ goto tr922
case 46:
- goto st701
+ goto st725
case 69:
- goto st236
+ goto st246
case 101:
- goto st236
+ goto st246
case 105:
- goto st704
+ goto st728
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st703
+ goto st727
}
case ( m.data)[( m.p)] >= 9:
- goto tr516
+ goto tr921
}
- goto tr105
- st703:
+ goto tr103
+ st727:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof703
+ goto _test_eof727
}
- st_case_703:
+ st_case_727:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr516
+ goto tr921
case 44:
- goto tr907
+ goto tr922
case 46:
- goto st701
+ goto st725
case 69:
- goto st236
+ goto st246
case 101:
- goto st236
+ goto st246
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st703
+ goto st727
}
case ( m.data)[( m.p)] >= 9:
- goto tr516
+ goto tr921
}
- goto tr105
- st704:
+ goto tr103
+ st728:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof704
+ goto _test_eof728
}
- st_case_704:
+ st_case_728:
switch ( m.data)[( m.p)] {
case 10:
- goto tr925
+ goto tr942
case 13:
- goto tr927
+ goto tr944
case 32:
- goto tr909
+ goto tr1041
case 44:
- goto tr1014
+ goto tr1042
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr909
+ goto tr1041
}
- goto tr105
- st705:
+ goto tr103
+ st729:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof705
+ goto _test_eof729
}
- st_case_705:
+ st_case_729:
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr516
+ goto tr921
case 44:
- goto tr907
+ goto tr922
case 46:
- goto st701
+ goto st725
case 69:
- goto st236
+ goto st246
case 101:
- goto st236
+ goto st246
case 105:
- goto st704
+ goto st728
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st705
+ goto st729
}
case ( m.data)[( m.p)] >= 9:
- goto tr516
+ goto tr921
}
- goto tr105
+ goto tr103
tr17:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st706
- st706:
+ goto st730
+ st730:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof706
+ goto _test_eof730
}
- st_case_706:
-//line plugins/parsers/influx/machine.go:28541
+ st_case_730:
+//line plugins/parsers/influx/machine.go:29260
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr516
+ goto tr921
case 44:
- goto tr907
+ goto tr922
case 46:
- goto st701
+ goto st725
case 69:
- goto st236
+ goto st246
case 101:
- goto st236
+ goto st246
case 105:
- goto st704
+ goto st728
case 117:
- goto st707
+ goto st731
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st703
+ goto st727
}
case ( m.data)[( m.p)] >= 9:
- goto tr516
+ goto tr921
}
- goto tr105
- st707:
+ goto tr103
+ st731:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof707
+ goto _test_eof731
}
- st_case_707:
+ st_case_731:
switch ( m.data)[( m.p)] {
case 10:
- goto tr930
+ goto tr948
case 13:
- goto tr932
+ goto tr950
case 32:
- goto tr912
+ goto tr1044
case 44:
- goto tr1016
+ goto tr1045
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr912
+ goto tr1044
}
- goto tr105
+ goto tr103
tr18:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st708
- st708:
+ goto st732
+ st732:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof708
+ goto _test_eof732
}
- st_case_708:
-//line plugins/parsers/influx/machine.go:28601
+ st_case_732:
+//line plugins/parsers/influx/machine.go:29320
switch ( m.data)[( m.p)] {
case 10:
- goto tr715
+ goto tr730
case 13:
- goto tr717
+ goto tr732
case 32:
- goto tr516
+ goto tr921
case 44:
- goto tr907
+ goto tr922
case 46:
- goto st701
+ goto st725
case 69:
- goto st236
+ goto st246
case 101:
- goto st236
+ goto st246
case 105:
- goto st704
+ goto st728
case 117:
- goto st707
+ goto st731
}
switch {
case ( m.data)[( m.p)] > 12:
if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 {
- goto st708
+ goto st732
}
case ( m.data)[( m.p)] >= 9:
- goto tr516
+ goto tr921
}
- goto tr105
+ goto tr103
tr19:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st709
- st709:
+ goto st733
+ st733:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof709
+ goto _test_eof733
}
- st_case_709:
-//line plugins/parsers/influx/machine.go:28642
+ st_case_733:
+//line plugins/parsers/influx/machine.go:29361
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr916
+ goto tr1047
case 44:
- goto tr1018
+ goto tr1048
case 65:
- goto st238
+ goto st248
case 97:
- goto st241
+ goto st251
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr916
+ goto tr1047
}
- goto tr105
- st238:
+ goto tr103
+ st248:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof238
+ goto _test_eof248
}
- st_case_238:
+ st_case_248:
if ( m.data)[( m.p)] == 76 {
- goto st239
+ goto st249
}
goto tr8
- st239:
+ st249:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof239
+ goto _test_eof249
}
- st_case_239:
+ st_case_249:
if ( m.data)[( m.p)] == 83 {
- goto st240
+ goto st250
}
goto tr8
- st240:
+ st250:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof240
+ goto _test_eof250
}
- st_case_240:
+ st_case_250:
if ( m.data)[( m.p)] == 69 {
- goto st710
+ goto st734
}
goto tr8
- st710:
+ st734:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof710
+ goto _test_eof734
}
- st_case_710:
+ st_case_734:
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr916
+ goto tr1047
case 44:
- goto tr1018
+ goto tr1048
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr916
+ goto tr1047
}
- goto tr105
- st241:
+ goto tr103
+ st251:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof241
+ goto _test_eof251
}
- st_case_241:
+ st_case_251:
if ( m.data)[( m.p)] == 108 {
- goto st242
+ goto st252
}
goto tr8
- st242:
+ st252:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof242
+ goto _test_eof252
}
- st_case_242:
+ st_case_252:
if ( m.data)[( m.p)] == 115 {
- goto st243
+ goto st253
}
goto tr8
- st243:
+ st253:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof243
+ goto _test_eof253
}
- st_case_243:
+ st_case_253:
if ( m.data)[( m.p)] == 101 {
- goto st710
+ goto st734
}
goto tr8
tr20:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st711
- st711:
+ goto st735
+ st735:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof711
+ goto _test_eof735
}
- st_case_711:
-//line plugins/parsers/influx/machine.go:28745
+ st_case_735:
+//line plugins/parsers/influx/machine.go:29464
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr916
+ goto tr1047
case 44:
- goto tr1018
+ goto tr1048
case 82:
- goto st244
+ goto st254
case 114:
- goto st245
+ goto st255
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr916
+ goto tr1047
}
- goto tr105
- st244:
+ goto tr103
+ st254:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof244
+ goto _test_eof254
}
- st_case_244:
+ st_case_254:
if ( m.data)[( m.p)] == 85 {
- goto st240
+ goto st250
}
goto tr8
- st245:
+ st255:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof245
+ goto _test_eof255
}
- st_case_245:
+ st_case_255:
if ( m.data)[( m.p)] == 117 {
- goto st243
+ goto st253
}
goto tr8
tr21:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st712
- st712:
+ goto st736
+ st736:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof712
+ goto _test_eof736
}
- st_case_712:
-//line plugins/parsers/influx/machine.go:28793
+ st_case_736:
+//line plugins/parsers/influx/machine.go:29512
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr916
+ goto tr1047
case 44:
- goto tr1018
+ goto tr1048
case 97:
- goto st241
+ goto st251
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr916
+ goto tr1047
}
- goto tr105
+ goto tr103
tr22:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st713
- st713:
+ goto st737
+ st737:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof713
+ goto _test_eof737
}
- st_case_713:
-//line plugins/parsers/influx/machine.go:28821
+ st_case_737:
+//line plugins/parsers/influx/machine.go:29540
switch ( m.data)[( m.p)] {
case 10:
- goto tr935
+ goto tr954
case 13:
- goto tr937
+ goto tr956
case 32:
- goto tr916
+ goto tr1047
case 44:
- goto tr1018
+ goto tr1048
case 114:
- goto st245
+ goto st255
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto tr916
+ goto tr1047
}
- goto tr105
+ goto tr103
tr9:
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st246
- st246:
+ goto st256
+ st256:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof246
+ goto _test_eof256
}
- st_case_246:
-//line plugins/parsers/influx/machine.go:28849
+ st_case_256:
+//line plugins/parsers/influx/machine.go:29568
switch ( m.data)[( m.p)] {
case 10:
goto tr8
@@ -28866,203 +29585,218 @@ tr9:
goto st2
}
goto tr6
- st247:
+ st257:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof247
+ goto _test_eof257
}
- st_case_247:
+ st_case_257:
if ( m.data)[( m.p)] == 10 {
- goto tr421
+ goto tr438
}
- goto st247
-tr421:
-//line plugins/parsers/influx/machine.go.rl:69
-
- {goto st715 }
-
- goto st714
- st714:
-//line plugins/parsers/influx/machine.go.rl:157
+ goto st257
+tr438:
+//line plugins/parsers/influx/machine.go.rl:166
m.lineno++
m.sol = m.p
m.sol++ // next char will be the first column in the line
+//line plugins/parsers/influx/machine.go.rl:78
+
+ {goto st739 }
+
+ goto st738
+ st738:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof714
+ goto _test_eof738
}
- st_case_714:
-//line plugins/parsers/influx/machine.go:28896
+ st_case_738:
+//line plugins/parsers/influx/machine.go:29615
goto st0
- st250:
+ st260:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof250
+ goto _test_eof260
}
- st_case_250:
+ st_case_260:
switch ( m.data)[( m.p)] {
case 32:
- goto tr35
+ goto tr33
case 35:
- goto tr35
+ goto tr33
case 44:
- goto tr35
+ goto tr33
case 92:
- goto tr425
+ goto tr442
}
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
- goto tr35
+ goto tr33
}
case ( m.data)[( m.p)] >= 9:
- goto tr35
+ goto tr33
}
- goto tr424
-tr424:
-//line plugins/parsers/influx/machine.go.rl:73
+ goto tr441
+tr441:
+//line plugins/parsers/influx/machine.go.rl:82
- foundMetric = true
+ m.beginMetric = true
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st717
- st717:
+ goto st740
+ st740:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof717
+ goto _test_eof740
}
- st_case_717:
-//line plugins/parsers/influx/machine.go:28937
+ st_case_740:
+//line plugins/parsers/influx/machine.go:29656
switch ( m.data)[( m.p)] {
case 9:
goto tr2
case 10:
- goto tr1026
+ goto tr1056
case 12:
goto tr2
case 13:
- goto tr1027
+ goto tr1057
case 32:
goto tr2
case 44:
- goto tr1028
+ goto tr1058
case 92:
- goto st258
+ goto st268
}
- goto st717
-tr1026:
- ( m.cs) = 718
-//line plugins/parsers/influx/machine.go.rl:77
+ goto st740
+tr443:
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
+ goto st741
+tr1056:
+ ( m.cs) = 741
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
+//line plugins/parsers/influx/machine.go.rl:166
+
+ m.lineno++
+ m.sol = m.p
+ m.sol++ // next char will be the first column in the line
+
goto _again
-tr1030:
- ( m.cs) = 718
-//line plugins/parsers/influx/machine.go.rl:90
+tr1060:
+ ( m.cs) = 741
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
- goto _again
- st718:
-//line plugins/parsers/influx/machine.go.rl:157
+//line plugins/parsers/influx/machine.go.rl:166
m.lineno++
m.sol = m.p
m.sol++ // next char will be the first column in the line
-//line plugins/parsers/influx/machine.go.rl:163
+ goto _again
+ st741:
+//line plugins/parsers/influx/machine.go.rl:172
- ( m.cs) = 715;
+ m.finishMetric = true
+ ( m.cs) = 739;
{( m.p)++; goto _out }
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof718
+ goto _test_eof741
}
- st_case_718:
-//line plugins/parsers/influx/machine.go:28997
+ st_case_741:
+//line plugins/parsers/influx/machine.go:29731
goto st0
-tr1027:
- ( m.cs) = 251
-//line plugins/parsers/influx/machine.go.rl:77
+tr1057:
+ ( m.cs) = 261
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr1031:
- ( m.cs) = 251
-//line plugins/parsers/influx/machine.go.rl:90
+tr1061:
+ ( m.cs) = 261
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st251:
+ st261:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof251
+ goto _test_eof261
}
- st_case_251:
-//line plugins/parsers/influx/machine.go:29030
+ st_case_261:
+//line plugins/parsers/influx/machine.go:29764
if ( m.data)[( m.p)] == 10 {
- goto st718
+ goto tr443
}
goto st0
-tr1028:
- ( m.cs) = 252
-//line plugins/parsers/influx/machine.go.rl:77
+tr1058:
+ ( m.cs) = 262
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
-tr1032:
- ( m.cs) = 252
-//line plugins/parsers/influx/machine.go.rl:90
+tr1062:
+ ( m.cs) = 262
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; goto _out }
}
goto _again
- st252:
+ st262:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof252
+ goto _test_eof262
}
- st_case_252:
-//line plugins/parsers/influx/machine.go:29066
+ st_case_262:
+//line plugins/parsers/influx/machine.go:29800
switch ( m.data)[( m.p)] {
case 32:
goto tr2
@@ -29071,7 +29805,7 @@ tr1032:
case 61:
goto tr2
case 92:
- goto tr428
+ goto tr445
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -29081,28 +29815,28 @@ tr1032:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto tr427
-tr427:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr444
+tr444:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st253
- st253:
+ goto st263
+ st263:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof253
+ goto _test_eof263
}
- st_case_253:
-//line plugins/parsers/influx/machine.go:29097
+ st_case_263:
+//line plugins/parsers/influx/machine.go:29831
switch ( m.data)[( m.p)] {
case 32:
goto tr2
case 44:
goto tr2
case 61:
- goto tr430
+ goto tr447
case 92:
- goto st256
+ goto st266
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -29112,19 +29846,19 @@ tr427:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto st253
-tr430:
-//line plugins/parsers/influx/machine.go.rl:86
+ goto st263
+tr447:
+//line plugins/parsers/influx/machine.go.rl:95
- key = m.text()
+ m.key = m.text()
- goto st254
- st254:
+ goto st264
+ st264:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof254
+ goto _test_eof264
}
- st_case_254:
-//line plugins/parsers/influx/machine.go:29128
+ st_case_264:
+//line plugins/parsers/influx/machine.go:29862
switch ( m.data)[( m.p)] {
case 32:
goto tr2
@@ -29133,7 +29867,7 @@ tr430:
case 61:
goto tr2
case 92:
- goto tr433
+ goto tr450
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -29143,52 +29877,52 @@ tr430:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto tr432
-tr432:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto tr449
+tr449:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st719
- st719:
+ goto st742
+ st742:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof719
+ goto _test_eof742
}
- st_case_719:
-//line plugins/parsers/influx/machine.go:29159
+ st_case_742:
+//line plugins/parsers/influx/machine.go:29893
switch ( m.data)[( m.p)] {
case 9:
goto tr2
case 10:
- goto tr1030
+ goto tr1060
case 12:
goto tr2
case 13:
- goto tr1031
+ goto tr1061
case 32:
goto tr2
case 44:
- goto tr1032
+ goto tr1062
case 61:
goto tr2
case 92:
- goto st255
+ goto st265
}
- goto st719
-tr433:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st742
+tr450:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st255
- st255:
+ goto st265
+ st265:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof255
+ goto _test_eof265
}
- st_case_255:
-//line plugins/parsers/influx/machine.go:29190
+ st_case_265:
+//line plugins/parsers/influx/machine.go:29924
if ( m.data)[( m.p)] == 92 {
- goto st720
+ goto st743
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -29198,49 +29932,49 @@ tr433:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto st719
- st720:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st742
+ st743:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof720
+ goto _test_eof743
}
- st_case_720:
-//line plugins/parsers/influx/machine.go:29211
+ st_case_743:
+//line plugins/parsers/influx/machine.go:29945
switch ( m.data)[( m.p)] {
case 9:
goto tr2
case 10:
- goto tr1030
+ goto tr1060
case 12:
goto tr2
case 13:
- goto tr1031
+ goto tr1061
case 32:
goto tr2
case 44:
- goto tr1032
+ goto tr1062
case 61:
goto tr2
case 92:
- goto st255
+ goto st265
}
- goto st719
-tr428:
-//line plugins/parsers/influx/machine.go.rl:19
+ goto st742
+tr445:
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st256
- st256:
+ goto st266
+ st266:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof256
+ goto _test_eof266
}
- st_case_256:
-//line plugins/parsers/influx/machine.go:29242
+ st_case_266:
+//line plugins/parsers/influx/machine.go:29976
if ( m.data)[( m.p)] == 92 {
- goto st257
+ goto st267
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -29250,25 +29984,25 @@ tr428:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto st253
- st257:
-//line plugins/parsers/influx/machine.go.rl:234
+ goto st263
+ st267:
+//line plugins/parsers/influx/machine.go.rl:248
( m.p)--
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof257
+ goto _test_eof267
}
- st_case_257:
-//line plugins/parsers/influx/machine.go:29263
+ st_case_267:
+//line plugins/parsers/influx/machine.go:29997
switch ( m.data)[( m.p)] {
case 32:
goto tr2
case 44:
goto tr2
case 61:
- goto tr430
+ goto tr447
case 92:
- goto st256
+ goto st266
}
switch {
case ( m.data)[( m.p)] > 10:
@@ -29278,23 +30012,23 @@ tr428:
case ( m.data)[( m.p)] >= 9:
goto tr2
}
- goto st253
-tr425:
-//line plugins/parsers/influx/machine.go.rl:73
+ goto st263
+tr442:
+//line plugins/parsers/influx/machine.go.rl:82
- foundMetric = true
+ m.beginMetric = true
-//line plugins/parsers/influx/machine.go.rl:19
+//line plugins/parsers/influx/machine.go.rl:28
m.pb = m.p
- goto st258
- st258:
+ goto st268
+ st268:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof258
+ goto _test_eof268
}
- st_case_258:
-//line plugins/parsers/influx/machine.go:29298
+ st_case_268:
+//line plugins/parsers/influx/machine.go:30032
switch {
case ( m.data)[( m.p)] > 10:
if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 {
@@ -29303,83 +30037,66 @@ tr425:
case ( m.data)[( m.p)] >= 9:
goto st0
}
- goto st717
- st715:
- if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof715
- }
- st_case_715:
- switch ( m.data)[( m.p)] {
- case 10:
- goto st716
- case 13:
- goto st248
- case 32:
- goto st715
- case 35:
- goto st249
- }
- if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st715
- }
- goto tr1023
- st716:
-//line plugins/parsers/influx/machine.go.rl:157
+ goto st740
+tr439:
+//line plugins/parsers/influx/machine.go.rl:166
m.lineno++
m.sol = m.p
m.sol++ // next char will be the first column in the line
+ goto st739
+ st739:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof716
+ goto _test_eof739
}
- st_case_716:
-//line plugins/parsers/influx/machine.go:29338
+ st_case_739:
+//line plugins/parsers/influx/machine.go:30055
switch ( m.data)[( m.p)] {
case 10:
- goto st716
+ goto tr439
case 13:
- goto st248
+ goto st258
case 32:
- goto st715
+ goto st739
case 35:
- goto st249
+ goto st259
}
if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 {
- goto st715
+ goto st739
}
- goto tr1023
- st248:
+ goto tr1053
+ st258:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof248
+ goto _test_eof258
}
- st_case_248:
+ st_case_258:
if ( m.data)[( m.p)] == 10 {
- goto st716
+ goto tr439
}
goto st0
- st249:
+ st259:
if ( m.p)++; ( m.p) == ( m.pe) {
- goto _test_eof249
+ goto _test_eof259
}
- st_case_249:
+ st_case_259:
if ( m.data)[( m.p)] == 10 {
- goto st716
+ goto tr439
}
- goto st249
+ goto st259
st_out:
- _test_eof259: ( m.cs) = 259; goto _test_eof
+ _test_eof269: ( m.cs) = 269; goto _test_eof
_test_eof1: ( m.cs) = 1; goto _test_eof
_test_eof2: ( m.cs) = 2; goto _test_eof
_test_eof3: ( m.cs) = 3; goto _test_eof
_test_eof4: ( m.cs) = 4; goto _test_eof
_test_eof5: ( m.cs) = 5; goto _test_eof
_test_eof6: ( m.cs) = 6; goto _test_eof
+ _test_eof270: ( m.cs) = 270; goto _test_eof
+ _test_eof271: ( m.cs) = 271; goto _test_eof
+ _test_eof272: ( m.cs) = 272; goto _test_eof
_test_eof7: ( m.cs) = 7; goto _test_eof
_test_eof8: ( m.cs) = 8; goto _test_eof
- _test_eof260: ( m.cs) = 260; goto _test_eof
- _test_eof261: ( m.cs) = 261; goto _test_eof
- _test_eof262: ( m.cs) = 262; goto _test_eof
_test_eof9: ( m.cs) = 9; goto _test_eof
_test_eof10: ( m.cs) = 10; goto _test_eof
_test_eof11: ( m.cs) = 11; goto _test_eof
@@ -29403,26 +30120,14 @@ tr425:
_test_eof29: ( m.cs) = 29; goto _test_eof
_test_eof30: ( m.cs) = 30; goto _test_eof
_test_eof31: ( m.cs) = 31; goto _test_eof
- _test_eof32: ( m.cs) = 32; goto _test_eof
- _test_eof33: ( m.cs) = 33; goto _test_eof
- _test_eof263: ( m.cs) = 263; goto _test_eof
- _test_eof264: ( m.cs) = 264; goto _test_eof
- _test_eof34: ( m.cs) = 34; goto _test_eof
- _test_eof35: ( m.cs) = 35; goto _test_eof
- _test_eof265: ( m.cs) = 265; goto _test_eof
- _test_eof266: ( m.cs) = 266; goto _test_eof
- _test_eof267: ( m.cs) = 267; goto _test_eof
- _test_eof36: ( m.cs) = 36; goto _test_eof
- _test_eof268: ( m.cs) = 268; goto _test_eof
- _test_eof269: ( m.cs) = 269; goto _test_eof
- _test_eof270: ( m.cs) = 270; goto _test_eof
- _test_eof271: ( m.cs) = 271; goto _test_eof
- _test_eof272: ( m.cs) = 272; goto _test_eof
_test_eof273: ( m.cs) = 273; goto _test_eof
_test_eof274: ( m.cs) = 274; goto _test_eof
+ _test_eof32: ( m.cs) = 32; goto _test_eof
+ _test_eof33: ( m.cs) = 33; goto _test_eof
_test_eof275: ( m.cs) = 275; goto _test_eof
_test_eof276: ( m.cs) = 276; goto _test_eof
_test_eof277: ( m.cs) = 277; goto _test_eof
+ _test_eof34: ( m.cs) = 34; goto _test_eof
_test_eof278: ( m.cs) = 278; goto _test_eof
_test_eof279: ( m.cs) = 279; goto _test_eof
_test_eof280: ( m.cs) = 280; goto _test_eof
@@ -29431,31 +30136,31 @@ tr425:
_test_eof283: ( m.cs) = 283; goto _test_eof
_test_eof284: ( m.cs) = 284; goto _test_eof
_test_eof285: ( m.cs) = 285; goto _test_eof
- _test_eof37: ( m.cs) = 37; goto _test_eof
- _test_eof38: ( m.cs) = 38; goto _test_eof
_test_eof286: ( m.cs) = 286; goto _test_eof
_test_eof287: ( m.cs) = 287; goto _test_eof
_test_eof288: ( m.cs) = 288; goto _test_eof
- _test_eof39: ( m.cs) = 39; goto _test_eof
- _test_eof40: ( m.cs) = 40; goto _test_eof
- _test_eof41: ( m.cs) = 41; goto _test_eof
- _test_eof42: ( m.cs) = 42; goto _test_eof
- _test_eof43: ( m.cs) = 43; goto _test_eof
_test_eof289: ( m.cs) = 289; goto _test_eof
_test_eof290: ( m.cs) = 290; goto _test_eof
_test_eof291: ( m.cs) = 291; goto _test_eof
_test_eof292: ( m.cs) = 292; goto _test_eof
- _test_eof44: ( m.cs) = 44; goto _test_eof
_test_eof293: ( m.cs) = 293; goto _test_eof
_test_eof294: ( m.cs) = 294; goto _test_eof
_test_eof295: ( m.cs) = 295; goto _test_eof
+ _test_eof35: ( m.cs) = 35; goto _test_eof
+ _test_eof36: ( m.cs) = 36; goto _test_eof
_test_eof296: ( m.cs) = 296; goto _test_eof
_test_eof297: ( m.cs) = 297; goto _test_eof
_test_eof298: ( m.cs) = 298; goto _test_eof
+ _test_eof37: ( m.cs) = 37; goto _test_eof
+ _test_eof38: ( m.cs) = 38; goto _test_eof
+ _test_eof39: ( m.cs) = 39; goto _test_eof
+ _test_eof40: ( m.cs) = 40; goto _test_eof
+ _test_eof41: ( m.cs) = 41; goto _test_eof
_test_eof299: ( m.cs) = 299; goto _test_eof
_test_eof300: ( m.cs) = 300; goto _test_eof
_test_eof301: ( m.cs) = 301; goto _test_eof
_test_eof302: ( m.cs) = 302; goto _test_eof
+ _test_eof42: ( m.cs) = 42; goto _test_eof
_test_eof303: ( m.cs) = 303; goto _test_eof
_test_eof304: ( m.cs) = 304; goto _test_eof
_test_eof305: ( m.cs) = 305; goto _test_eof
@@ -29468,6 +30173,18 @@ tr425:
_test_eof312: ( m.cs) = 312; goto _test_eof
_test_eof313: ( m.cs) = 313; goto _test_eof
_test_eof314: ( m.cs) = 314; goto _test_eof
+ _test_eof315: ( m.cs) = 315; goto _test_eof
+ _test_eof316: ( m.cs) = 316; goto _test_eof
+ _test_eof317: ( m.cs) = 317; goto _test_eof
+ _test_eof318: ( m.cs) = 318; goto _test_eof
+ _test_eof319: ( m.cs) = 319; goto _test_eof
+ _test_eof320: ( m.cs) = 320; goto _test_eof
+ _test_eof321: ( m.cs) = 321; goto _test_eof
+ _test_eof322: ( m.cs) = 322; goto _test_eof
+ _test_eof323: ( m.cs) = 323; goto _test_eof
+ _test_eof324: ( m.cs) = 324; goto _test_eof
+ _test_eof43: ( m.cs) = 43; goto _test_eof
+ _test_eof44: ( m.cs) = 44; goto _test_eof
_test_eof45: ( m.cs) = 45; goto _test_eof
_test_eof46: ( m.cs) = 46; goto _test_eof
_test_eof47: ( m.cs) = 47; goto _test_eof
@@ -29476,30 +30193,18 @@ tr425:
_test_eof50: ( m.cs) = 50; goto _test_eof
_test_eof51: ( m.cs) = 51; goto _test_eof
_test_eof52: ( m.cs) = 52; goto _test_eof
+ _test_eof325: ( m.cs) = 325; goto _test_eof
+ _test_eof326: ( m.cs) = 326; goto _test_eof
+ _test_eof327: ( m.cs) = 327; goto _test_eof
_test_eof53: ( m.cs) = 53; goto _test_eof
_test_eof54: ( m.cs) = 54; goto _test_eof
- _test_eof315: ( m.cs) = 315; goto _test_eof
- _test_eof316: ( m.cs) = 316; goto _test_eof
- _test_eof317: ( m.cs) = 317; goto _test_eof
_test_eof55: ( m.cs) = 55; goto _test_eof
_test_eof56: ( m.cs) = 56; goto _test_eof
_test_eof57: ( m.cs) = 57; goto _test_eof
_test_eof58: ( m.cs) = 58; goto _test_eof
- _test_eof59: ( m.cs) = 59; goto _test_eof
- _test_eof60: ( m.cs) = 60; goto _test_eof
- _test_eof318: ( m.cs) = 318; goto _test_eof
- _test_eof319: ( m.cs) = 319; goto _test_eof
- _test_eof61: ( m.cs) = 61; goto _test_eof
- _test_eof320: ( m.cs) = 320; goto _test_eof
- _test_eof321: ( m.cs) = 321; goto _test_eof
- _test_eof322: ( m.cs) = 322; goto _test_eof
- _test_eof323: ( m.cs) = 323; goto _test_eof
- _test_eof324: ( m.cs) = 324; goto _test_eof
- _test_eof325: ( m.cs) = 325; goto _test_eof
- _test_eof326: ( m.cs) = 326; goto _test_eof
- _test_eof327: ( m.cs) = 327; goto _test_eof
_test_eof328: ( m.cs) = 328; goto _test_eof
_test_eof329: ( m.cs) = 329; goto _test_eof
+ _test_eof59: ( m.cs) = 59; goto _test_eof
_test_eof330: ( m.cs) = 330; goto _test_eof
_test_eof331: ( m.cs) = 331; goto _test_eof
_test_eof332: ( m.cs) = 332; goto _test_eof
@@ -29510,11 +30215,9 @@ tr425:
_test_eof337: ( m.cs) = 337; goto _test_eof
_test_eof338: ( m.cs) = 338; goto _test_eof
_test_eof339: ( m.cs) = 339; goto _test_eof
- _test_eof62: ( m.cs) = 62; goto _test_eof
_test_eof340: ( m.cs) = 340; goto _test_eof
_test_eof341: ( m.cs) = 341; goto _test_eof
_test_eof342: ( m.cs) = 342; goto _test_eof
- _test_eof63: ( m.cs) = 63; goto _test_eof
_test_eof343: ( m.cs) = 343; goto _test_eof
_test_eof344: ( m.cs) = 344; goto _test_eof
_test_eof345: ( m.cs) = 345; goto _test_eof
@@ -29522,9 +30225,11 @@ tr425:
_test_eof347: ( m.cs) = 347; goto _test_eof
_test_eof348: ( m.cs) = 348; goto _test_eof
_test_eof349: ( m.cs) = 349; goto _test_eof
+ _test_eof60: ( m.cs) = 60; goto _test_eof
_test_eof350: ( m.cs) = 350; goto _test_eof
_test_eof351: ( m.cs) = 351; goto _test_eof
_test_eof352: ( m.cs) = 352; goto _test_eof
+ _test_eof61: ( m.cs) = 61; goto _test_eof
_test_eof353: ( m.cs) = 353; goto _test_eof
_test_eof354: ( m.cs) = 354; goto _test_eof
_test_eof355: ( m.cs) = 355; goto _test_eof
@@ -29535,37 +30240,37 @@ tr425:
_test_eof360: ( m.cs) = 360; goto _test_eof
_test_eof361: ( m.cs) = 361; goto _test_eof
_test_eof362: ( m.cs) = 362; goto _test_eof
- _test_eof64: ( m.cs) = 64; goto _test_eof
- _test_eof65: ( m.cs) = 65; goto _test_eof
- _test_eof66: ( m.cs) = 66; goto _test_eof
- _test_eof67: ( m.cs) = 67; goto _test_eof
- _test_eof68: ( m.cs) = 68; goto _test_eof
_test_eof363: ( m.cs) = 363; goto _test_eof
- _test_eof69: ( m.cs) = 69; goto _test_eof
- _test_eof70: ( m.cs) = 70; goto _test_eof
- _test_eof71: ( m.cs) = 71; goto _test_eof
- _test_eof72: ( m.cs) = 72; goto _test_eof
- _test_eof73: ( m.cs) = 73; goto _test_eof
_test_eof364: ( m.cs) = 364; goto _test_eof
_test_eof365: ( m.cs) = 365; goto _test_eof
_test_eof366: ( m.cs) = 366; goto _test_eof
- _test_eof74: ( m.cs) = 74; goto _test_eof
- _test_eof75: ( m.cs) = 75; goto _test_eof
_test_eof367: ( m.cs) = 367; goto _test_eof
_test_eof368: ( m.cs) = 368; goto _test_eof
- _test_eof76: ( m.cs) = 76; goto _test_eof
_test_eof369: ( m.cs) = 369; goto _test_eof
- _test_eof77: ( m.cs) = 77; goto _test_eof
_test_eof370: ( m.cs) = 370; goto _test_eof
_test_eof371: ( m.cs) = 371; goto _test_eof
_test_eof372: ( m.cs) = 372; goto _test_eof
+ _test_eof62: ( m.cs) = 62; goto _test_eof
+ _test_eof63: ( m.cs) = 63; goto _test_eof
+ _test_eof64: ( m.cs) = 64; goto _test_eof
+ _test_eof65: ( m.cs) = 65; goto _test_eof
+ _test_eof66: ( m.cs) = 66; goto _test_eof
_test_eof373: ( m.cs) = 373; goto _test_eof
+ _test_eof67: ( m.cs) = 67; goto _test_eof
+ _test_eof68: ( m.cs) = 68; goto _test_eof
+ _test_eof69: ( m.cs) = 69; goto _test_eof
+ _test_eof70: ( m.cs) = 70; goto _test_eof
+ _test_eof71: ( m.cs) = 71; goto _test_eof
_test_eof374: ( m.cs) = 374; goto _test_eof
_test_eof375: ( m.cs) = 375; goto _test_eof
_test_eof376: ( m.cs) = 376; goto _test_eof
+ _test_eof72: ( m.cs) = 72; goto _test_eof
+ _test_eof73: ( m.cs) = 73; goto _test_eof
+ _test_eof74: ( m.cs) = 74; goto _test_eof
_test_eof377: ( m.cs) = 377; goto _test_eof
_test_eof378: ( m.cs) = 378; goto _test_eof
_test_eof379: ( m.cs) = 379; goto _test_eof
+ _test_eof75: ( m.cs) = 75; goto _test_eof
_test_eof380: ( m.cs) = 380; goto _test_eof
_test_eof381: ( m.cs) = 381; goto _test_eof
_test_eof382: ( m.cs) = 382; goto _test_eof
@@ -29576,6 +30281,18 @@ tr425:
_test_eof387: ( m.cs) = 387; goto _test_eof
_test_eof388: ( m.cs) = 388; goto _test_eof
_test_eof389: ( m.cs) = 389; goto _test_eof
+ _test_eof390: ( m.cs) = 390; goto _test_eof
+ _test_eof391: ( m.cs) = 391; goto _test_eof
+ _test_eof392: ( m.cs) = 392; goto _test_eof
+ _test_eof393: ( m.cs) = 393; goto _test_eof
+ _test_eof394: ( m.cs) = 394; goto _test_eof
+ _test_eof395: ( m.cs) = 395; goto _test_eof
+ _test_eof396: ( m.cs) = 396; goto _test_eof
+ _test_eof397: ( m.cs) = 397; goto _test_eof
+ _test_eof398: ( m.cs) = 398; goto _test_eof
+ _test_eof399: ( m.cs) = 399; goto _test_eof
+ _test_eof76: ( m.cs) = 76; goto _test_eof
+ _test_eof77: ( m.cs) = 77; goto _test_eof
_test_eof78: ( m.cs) = 78; goto _test_eof
_test_eof79: ( m.cs) = 79; goto _test_eof
_test_eof80: ( m.cs) = 80; goto _test_eof
@@ -29588,40 +30305,28 @@ tr425:
_test_eof87: ( m.cs) = 87; goto _test_eof
_test_eof88: ( m.cs) = 88; goto _test_eof
_test_eof89: ( m.cs) = 89; goto _test_eof
+ _test_eof400: ( m.cs) = 400; goto _test_eof
+ _test_eof401: ( m.cs) = 401; goto _test_eof
+ _test_eof402: ( m.cs) = 402; goto _test_eof
+ _test_eof403: ( m.cs) = 403; goto _test_eof
_test_eof90: ( m.cs) = 90; goto _test_eof
_test_eof91: ( m.cs) = 91; goto _test_eof
- _test_eof390: ( m.cs) = 390; goto _test_eof
- _test_eof391: ( m.cs) = 391; goto _test_eof
- _test_eof392: ( m.cs) = 392; goto _test_eof
- _test_eof393: ( m.cs) = 393; goto _test_eof
_test_eof92: ( m.cs) = 92; goto _test_eof
_test_eof93: ( m.cs) = 93; goto _test_eof
+ _test_eof404: ( m.cs) = 404; goto _test_eof
+ _test_eof405: ( m.cs) = 405; goto _test_eof
_test_eof94: ( m.cs) = 94; goto _test_eof
_test_eof95: ( m.cs) = 95; goto _test_eof
- _test_eof394: ( m.cs) = 394; goto _test_eof
- _test_eof395: ( m.cs) = 395; goto _test_eof
+ _test_eof406: ( m.cs) = 406; goto _test_eof
_test_eof96: ( m.cs) = 96; goto _test_eof
_test_eof97: ( m.cs) = 97; goto _test_eof
- _test_eof396: ( m.cs) = 396; goto _test_eof
- _test_eof98: ( m.cs) = 98; goto _test_eof
- _test_eof99: ( m.cs) = 99; goto _test_eof
- _test_eof397: ( m.cs) = 397; goto _test_eof
- _test_eof398: ( m.cs) = 398; goto _test_eof
- _test_eof100: ( m.cs) = 100; goto _test_eof
- _test_eof399: ( m.cs) = 399; goto _test_eof
- _test_eof400: ( m.cs) = 400; goto _test_eof
- _test_eof101: ( m.cs) = 101; goto _test_eof
- _test_eof102: ( m.cs) = 102; goto _test_eof
- _test_eof401: ( m.cs) = 401; goto _test_eof
- _test_eof402: ( m.cs) = 402; goto _test_eof
- _test_eof403: ( m.cs) = 403; goto _test_eof
- _test_eof404: ( m.cs) = 404; goto _test_eof
- _test_eof405: ( m.cs) = 405; goto _test_eof
- _test_eof406: ( m.cs) = 406; goto _test_eof
_test_eof407: ( m.cs) = 407; goto _test_eof
_test_eof408: ( m.cs) = 408; goto _test_eof
+ _test_eof98: ( m.cs) = 98; goto _test_eof
_test_eof409: ( m.cs) = 409; goto _test_eof
_test_eof410: ( m.cs) = 410; goto _test_eof
+ _test_eof99: ( m.cs) = 99; goto _test_eof
+ _test_eof100: ( m.cs) = 100; goto _test_eof
_test_eof411: ( m.cs) = 411; goto _test_eof
_test_eof412: ( m.cs) = 412; goto _test_eof
_test_eof413: ( m.cs) = 413; goto _test_eof
@@ -29630,26 +30335,26 @@ tr425:
_test_eof416: ( m.cs) = 416; goto _test_eof
_test_eof417: ( m.cs) = 417; goto _test_eof
_test_eof418: ( m.cs) = 418; goto _test_eof
- _test_eof103: ( m.cs) = 103; goto _test_eof
_test_eof419: ( m.cs) = 419; goto _test_eof
_test_eof420: ( m.cs) = 420; goto _test_eof
_test_eof421: ( m.cs) = 421; goto _test_eof
- _test_eof104: ( m.cs) = 104; goto _test_eof
- _test_eof105: ( m.cs) = 105; goto _test_eof
_test_eof422: ( m.cs) = 422; goto _test_eof
_test_eof423: ( m.cs) = 423; goto _test_eof
_test_eof424: ( m.cs) = 424; goto _test_eof
- _test_eof106: ( m.cs) = 106; goto _test_eof
_test_eof425: ( m.cs) = 425; goto _test_eof
_test_eof426: ( m.cs) = 426; goto _test_eof
_test_eof427: ( m.cs) = 427; goto _test_eof
_test_eof428: ( m.cs) = 428; goto _test_eof
+ _test_eof101: ( m.cs) = 101; goto _test_eof
_test_eof429: ( m.cs) = 429; goto _test_eof
_test_eof430: ( m.cs) = 430; goto _test_eof
_test_eof431: ( m.cs) = 431; goto _test_eof
+ _test_eof102: ( m.cs) = 102; goto _test_eof
+ _test_eof103: ( m.cs) = 103; goto _test_eof
_test_eof432: ( m.cs) = 432; goto _test_eof
_test_eof433: ( m.cs) = 433; goto _test_eof
_test_eof434: ( m.cs) = 434; goto _test_eof
+ _test_eof104: ( m.cs) = 104; goto _test_eof
_test_eof435: ( m.cs) = 435; goto _test_eof
_test_eof436: ( m.cs) = 436; goto _test_eof
_test_eof437: ( m.cs) = 437; goto _test_eof
@@ -29660,7 +30365,6 @@ tr425:
_test_eof442: ( m.cs) = 442; goto _test_eof
_test_eof443: ( m.cs) = 443; goto _test_eof
_test_eof444: ( m.cs) = 444; goto _test_eof
- _test_eof107: ( m.cs) = 107; goto _test_eof
_test_eof445: ( m.cs) = 445; goto _test_eof
_test_eof446: ( m.cs) = 446; goto _test_eof
_test_eof447: ( m.cs) = 447; goto _test_eof
@@ -29671,6 +30375,7 @@ tr425:
_test_eof452: ( m.cs) = 452; goto _test_eof
_test_eof453: ( m.cs) = 453; goto _test_eof
_test_eof454: ( m.cs) = 454; goto _test_eof
+ _test_eof105: ( m.cs) = 105; goto _test_eof
_test_eof455: ( m.cs) = 455; goto _test_eof
_test_eof456: ( m.cs) = 456; goto _test_eof
_test_eof457: ( m.cs) = 457; goto _test_eof
@@ -29683,16 +30388,9 @@ tr425:
_test_eof464: ( m.cs) = 464; goto _test_eof
_test_eof465: ( m.cs) = 465; goto _test_eof
_test_eof466: ( m.cs) = 466; goto _test_eof
- _test_eof108: ( m.cs) = 108; goto _test_eof
- _test_eof109: ( m.cs) = 109; goto _test_eof
- _test_eof110: ( m.cs) = 110; goto _test_eof
- _test_eof111: ( m.cs) = 111; goto _test_eof
- _test_eof112: ( m.cs) = 112; goto _test_eof
_test_eof467: ( m.cs) = 467; goto _test_eof
- _test_eof113: ( m.cs) = 113; goto _test_eof
_test_eof468: ( m.cs) = 468; goto _test_eof
_test_eof469: ( m.cs) = 469; goto _test_eof
- _test_eof114: ( m.cs) = 114; goto _test_eof
_test_eof470: ( m.cs) = 470; goto _test_eof
_test_eof471: ( m.cs) = 471; goto _test_eof
_test_eof472: ( m.cs) = 472; goto _test_eof
@@ -29700,38 +30398,45 @@ tr425:
_test_eof474: ( m.cs) = 474; goto _test_eof
_test_eof475: ( m.cs) = 475; goto _test_eof
_test_eof476: ( m.cs) = 476; goto _test_eof
+ _test_eof106: ( m.cs) = 106; goto _test_eof
+ _test_eof107: ( m.cs) = 107; goto _test_eof
+ _test_eof108: ( m.cs) = 108; goto _test_eof
+ _test_eof109: ( m.cs) = 109; goto _test_eof
+ _test_eof110: ( m.cs) = 110; goto _test_eof
_test_eof477: ( m.cs) = 477; goto _test_eof
+ _test_eof111: ( m.cs) = 111; goto _test_eof
_test_eof478: ( m.cs) = 478; goto _test_eof
- _test_eof115: ( m.cs) = 115; goto _test_eof
- _test_eof116: ( m.cs) = 116; goto _test_eof
- _test_eof117: ( m.cs) = 117; goto _test_eof
_test_eof479: ( m.cs) = 479; goto _test_eof
- _test_eof118: ( m.cs) = 118; goto _test_eof
- _test_eof119: ( m.cs) = 119; goto _test_eof
- _test_eof120: ( m.cs) = 120; goto _test_eof
+ _test_eof112: ( m.cs) = 112; goto _test_eof
_test_eof480: ( m.cs) = 480; goto _test_eof
- _test_eof121: ( m.cs) = 121; goto _test_eof
- _test_eof122: ( m.cs) = 122; goto _test_eof
_test_eof481: ( m.cs) = 481; goto _test_eof
_test_eof482: ( m.cs) = 482; goto _test_eof
- _test_eof123: ( m.cs) = 123; goto _test_eof
- _test_eof124: ( m.cs) = 124; goto _test_eof
- _test_eof125: ( m.cs) = 125; goto _test_eof
- _test_eof126: ( m.cs) = 126; goto _test_eof
_test_eof483: ( m.cs) = 483; goto _test_eof
_test_eof484: ( m.cs) = 484; goto _test_eof
_test_eof485: ( m.cs) = 485; goto _test_eof
- _test_eof127: ( m.cs) = 127; goto _test_eof
_test_eof486: ( m.cs) = 486; goto _test_eof
_test_eof487: ( m.cs) = 487; goto _test_eof
_test_eof488: ( m.cs) = 488; goto _test_eof
+ _test_eof113: ( m.cs) = 113; goto _test_eof
+ _test_eof114: ( m.cs) = 114; goto _test_eof
+ _test_eof115: ( m.cs) = 115; goto _test_eof
_test_eof489: ( m.cs) = 489; goto _test_eof
+ _test_eof116: ( m.cs) = 116; goto _test_eof
+ _test_eof117: ( m.cs) = 117; goto _test_eof
+ _test_eof118: ( m.cs) = 118; goto _test_eof
_test_eof490: ( m.cs) = 490; goto _test_eof
+ _test_eof119: ( m.cs) = 119; goto _test_eof
+ _test_eof120: ( m.cs) = 120; goto _test_eof
_test_eof491: ( m.cs) = 491; goto _test_eof
_test_eof492: ( m.cs) = 492; goto _test_eof
+ _test_eof121: ( m.cs) = 121; goto _test_eof
+ _test_eof122: ( m.cs) = 122; goto _test_eof
+ _test_eof123: ( m.cs) = 123; goto _test_eof
+ _test_eof124: ( m.cs) = 124; goto _test_eof
_test_eof493: ( m.cs) = 493; goto _test_eof
_test_eof494: ( m.cs) = 494; goto _test_eof
_test_eof495: ( m.cs) = 495; goto _test_eof
+ _test_eof125: ( m.cs) = 125; goto _test_eof
_test_eof496: ( m.cs) = 496; goto _test_eof
_test_eof497: ( m.cs) = 497; goto _test_eof
_test_eof498: ( m.cs) = 498; goto _test_eof
@@ -29742,8 +30447,6 @@ tr425:
_test_eof503: ( m.cs) = 503; goto _test_eof
_test_eof504: ( m.cs) = 504; goto _test_eof
_test_eof505: ( m.cs) = 505; goto _test_eof
- _test_eof128: ( m.cs) = 128; goto _test_eof
- _test_eof129: ( m.cs) = 129; goto _test_eof
_test_eof506: ( m.cs) = 506; goto _test_eof
_test_eof507: ( m.cs) = 507; goto _test_eof
_test_eof508: ( m.cs) = 508; goto _test_eof
@@ -29753,44 +30456,38 @@ tr425:
_test_eof512: ( m.cs) = 512; goto _test_eof
_test_eof513: ( m.cs) = 513; goto _test_eof
_test_eof514: ( m.cs) = 514; goto _test_eof
- _test_eof130: ( m.cs) = 130; goto _test_eof
- _test_eof131: ( m.cs) = 131; goto _test_eof
- _test_eof132: ( m.cs) = 132; goto _test_eof
_test_eof515: ( m.cs) = 515; goto _test_eof
- _test_eof133: ( m.cs) = 133; goto _test_eof
- _test_eof134: ( m.cs) = 134; goto _test_eof
- _test_eof135: ( m.cs) = 135; goto _test_eof
+ _test_eof126: ( m.cs) = 126; goto _test_eof
+ _test_eof127: ( m.cs) = 127; goto _test_eof
_test_eof516: ( m.cs) = 516; goto _test_eof
- _test_eof136: ( m.cs) = 136; goto _test_eof
- _test_eof137: ( m.cs) = 137; goto _test_eof
_test_eof517: ( m.cs) = 517; goto _test_eof
_test_eof518: ( m.cs) = 518; goto _test_eof
- _test_eof138: ( m.cs) = 138; goto _test_eof
- _test_eof139: ( m.cs) = 139; goto _test_eof
- _test_eof140: ( m.cs) = 140; goto _test_eof
_test_eof519: ( m.cs) = 519; goto _test_eof
_test_eof520: ( m.cs) = 520; goto _test_eof
- _test_eof141: ( m.cs) = 141; goto _test_eof
_test_eof521: ( m.cs) = 521; goto _test_eof
- _test_eof142: ( m.cs) = 142; goto _test_eof
_test_eof522: ( m.cs) = 522; goto _test_eof
_test_eof523: ( m.cs) = 523; goto _test_eof
_test_eof524: ( m.cs) = 524; goto _test_eof
+ _test_eof128: ( m.cs) = 128; goto _test_eof
+ _test_eof129: ( m.cs) = 129; goto _test_eof
+ _test_eof130: ( m.cs) = 130; goto _test_eof
_test_eof525: ( m.cs) = 525; goto _test_eof
+ _test_eof131: ( m.cs) = 131; goto _test_eof
+ _test_eof132: ( m.cs) = 132; goto _test_eof
+ _test_eof133: ( m.cs) = 133; goto _test_eof
_test_eof526: ( m.cs) = 526; goto _test_eof
+ _test_eof134: ( m.cs) = 134; goto _test_eof
+ _test_eof135: ( m.cs) = 135; goto _test_eof
_test_eof527: ( m.cs) = 527; goto _test_eof
_test_eof528: ( m.cs) = 528; goto _test_eof
+ _test_eof136: ( m.cs) = 136; goto _test_eof
+ _test_eof137: ( m.cs) = 137; goto _test_eof
+ _test_eof138: ( m.cs) = 138; goto _test_eof
_test_eof529: ( m.cs) = 529; goto _test_eof
- _test_eof143: ( m.cs) = 143; goto _test_eof
- _test_eof144: ( m.cs) = 144; goto _test_eof
- _test_eof145: ( m.cs) = 145; goto _test_eof
_test_eof530: ( m.cs) = 530; goto _test_eof
- _test_eof146: ( m.cs) = 146; goto _test_eof
- _test_eof147: ( m.cs) = 147; goto _test_eof
- _test_eof148: ( m.cs) = 148; goto _test_eof
+ _test_eof139: ( m.cs) = 139; goto _test_eof
_test_eof531: ( m.cs) = 531; goto _test_eof
- _test_eof149: ( m.cs) = 149; goto _test_eof
- _test_eof150: ( m.cs) = 150; goto _test_eof
+ _test_eof140: ( m.cs) = 140; goto _test_eof
_test_eof532: ( m.cs) = 532; goto _test_eof
_test_eof533: ( m.cs) = 533; goto _test_eof
_test_eof534: ( m.cs) = 534; goto _test_eof
@@ -29799,8 +30496,16 @@ tr425:
_test_eof537: ( m.cs) = 537; goto _test_eof
_test_eof538: ( m.cs) = 538; goto _test_eof
_test_eof539: ( m.cs) = 539; goto _test_eof
+ _test_eof141: ( m.cs) = 141; goto _test_eof
+ _test_eof142: ( m.cs) = 142; goto _test_eof
+ _test_eof143: ( m.cs) = 143; goto _test_eof
_test_eof540: ( m.cs) = 540; goto _test_eof
+ _test_eof144: ( m.cs) = 144; goto _test_eof
+ _test_eof145: ( m.cs) = 145; goto _test_eof
+ _test_eof146: ( m.cs) = 146; goto _test_eof
_test_eof541: ( m.cs) = 541; goto _test_eof
+ _test_eof147: ( m.cs) = 147; goto _test_eof
+ _test_eof148: ( m.cs) = 148; goto _test_eof
_test_eof542: ( m.cs) = 542; goto _test_eof
_test_eof543: ( m.cs) = 543; goto _test_eof
_test_eof544: ( m.cs) = 544; goto _test_eof
@@ -29811,25 +30516,25 @@ tr425:
_test_eof549: ( m.cs) = 549; goto _test_eof
_test_eof550: ( m.cs) = 550; goto _test_eof
_test_eof551: ( m.cs) = 551; goto _test_eof
- _test_eof151: ( m.cs) = 151; goto _test_eof
- _test_eof152: ( m.cs) = 152; goto _test_eof
_test_eof552: ( m.cs) = 552; goto _test_eof
_test_eof553: ( m.cs) = 553; goto _test_eof
_test_eof554: ( m.cs) = 554; goto _test_eof
- _test_eof153: ( m.cs) = 153; goto _test_eof
_test_eof555: ( m.cs) = 555; goto _test_eof
_test_eof556: ( m.cs) = 556; goto _test_eof
- _test_eof154: ( m.cs) = 154; goto _test_eof
_test_eof557: ( m.cs) = 557; goto _test_eof
_test_eof558: ( m.cs) = 558; goto _test_eof
_test_eof559: ( m.cs) = 559; goto _test_eof
_test_eof560: ( m.cs) = 560; goto _test_eof
_test_eof561: ( m.cs) = 561; goto _test_eof
+ _test_eof149: ( m.cs) = 149; goto _test_eof
+ _test_eof150: ( m.cs) = 150; goto _test_eof
_test_eof562: ( m.cs) = 562; goto _test_eof
_test_eof563: ( m.cs) = 563; goto _test_eof
_test_eof564: ( m.cs) = 564; goto _test_eof
+ _test_eof151: ( m.cs) = 151; goto _test_eof
_test_eof565: ( m.cs) = 565; goto _test_eof
_test_eof566: ( m.cs) = 566; goto _test_eof
+ _test_eof152: ( m.cs) = 152; goto _test_eof
_test_eof567: ( m.cs) = 567; goto _test_eof
_test_eof568: ( m.cs) = 568; goto _test_eof
_test_eof569: ( m.cs) = 569; goto _test_eof
@@ -29838,10 +30543,7 @@ tr425:
_test_eof572: ( m.cs) = 572; goto _test_eof
_test_eof573: ( m.cs) = 573; goto _test_eof
_test_eof574: ( m.cs) = 574; goto _test_eof
- _test_eof155: ( m.cs) = 155; goto _test_eof
- _test_eof156: ( m.cs) = 156; goto _test_eof
_test_eof575: ( m.cs) = 575; goto _test_eof
- _test_eof157: ( m.cs) = 157; goto _test_eof
_test_eof576: ( m.cs) = 576; goto _test_eof
_test_eof577: ( m.cs) = 577; goto _test_eof
_test_eof578: ( m.cs) = 578; goto _test_eof
@@ -29850,34 +30552,37 @@ tr425:
_test_eof581: ( m.cs) = 581; goto _test_eof
_test_eof582: ( m.cs) = 582; goto _test_eof
_test_eof583: ( m.cs) = 583; goto _test_eof
- _test_eof158: ( m.cs) = 158; goto _test_eof
- _test_eof159: ( m.cs) = 159; goto _test_eof
- _test_eof160: ( m.cs) = 160; goto _test_eof
_test_eof584: ( m.cs) = 584; goto _test_eof
- _test_eof161: ( m.cs) = 161; goto _test_eof
- _test_eof162: ( m.cs) = 162; goto _test_eof
- _test_eof163: ( m.cs) = 163; goto _test_eof
+ _test_eof153: ( m.cs) = 153; goto _test_eof
+ _test_eof154: ( m.cs) = 154; goto _test_eof
_test_eof585: ( m.cs) = 585; goto _test_eof
- _test_eof164: ( m.cs) = 164; goto _test_eof
- _test_eof165: ( m.cs) = 165; goto _test_eof
+ _test_eof155: ( m.cs) = 155; goto _test_eof
_test_eof586: ( m.cs) = 586; goto _test_eof
_test_eof587: ( m.cs) = 587; goto _test_eof
- _test_eof166: ( m.cs) = 166; goto _test_eof
- _test_eof167: ( m.cs) = 167; goto _test_eof
- _test_eof168: ( m.cs) = 168; goto _test_eof
- _test_eof169: ( m.cs) = 169; goto _test_eof
- _test_eof170: ( m.cs) = 170; goto _test_eof
- _test_eof171: ( m.cs) = 171; goto _test_eof
_test_eof588: ( m.cs) = 588; goto _test_eof
_test_eof589: ( m.cs) = 589; goto _test_eof
_test_eof590: ( m.cs) = 590; goto _test_eof
_test_eof591: ( m.cs) = 591; goto _test_eof
_test_eof592: ( m.cs) = 592; goto _test_eof
_test_eof593: ( m.cs) = 593; goto _test_eof
+ _test_eof156: ( m.cs) = 156; goto _test_eof
+ _test_eof157: ( m.cs) = 157; goto _test_eof
+ _test_eof158: ( m.cs) = 158; goto _test_eof
_test_eof594: ( m.cs) = 594; goto _test_eof
+ _test_eof159: ( m.cs) = 159; goto _test_eof
+ _test_eof160: ( m.cs) = 160; goto _test_eof
+ _test_eof161: ( m.cs) = 161; goto _test_eof
_test_eof595: ( m.cs) = 595; goto _test_eof
+ _test_eof162: ( m.cs) = 162; goto _test_eof
+ _test_eof163: ( m.cs) = 163; goto _test_eof
_test_eof596: ( m.cs) = 596; goto _test_eof
_test_eof597: ( m.cs) = 597; goto _test_eof
+ _test_eof164: ( m.cs) = 164; goto _test_eof
+ _test_eof165: ( m.cs) = 165; goto _test_eof
+ _test_eof166: ( m.cs) = 166; goto _test_eof
+ _test_eof167: ( m.cs) = 167; goto _test_eof
+ _test_eof168: ( m.cs) = 168; goto _test_eof
+ _test_eof169: ( m.cs) = 169; goto _test_eof
_test_eof598: ( m.cs) = 598; goto _test_eof
_test_eof599: ( m.cs) = 599; goto _test_eof
_test_eof600: ( m.cs) = 600; goto _test_eof
@@ -29887,68 +30592,65 @@ tr425:
_test_eof604: ( m.cs) = 604; goto _test_eof
_test_eof605: ( m.cs) = 605; goto _test_eof
_test_eof606: ( m.cs) = 606; goto _test_eof
- _test_eof172: ( m.cs) = 172; goto _test_eof
- _test_eof173: ( m.cs) = 173; goto _test_eof
- _test_eof174: ( m.cs) = 174; goto _test_eof
_test_eof607: ( m.cs) = 607; goto _test_eof
_test_eof608: ( m.cs) = 608; goto _test_eof
_test_eof609: ( m.cs) = 609; goto _test_eof
- _test_eof175: ( m.cs) = 175; goto _test_eof
_test_eof610: ( m.cs) = 610; goto _test_eof
_test_eof611: ( m.cs) = 611; goto _test_eof
- _test_eof176: ( m.cs) = 176; goto _test_eof
_test_eof612: ( m.cs) = 612; goto _test_eof
_test_eof613: ( m.cs) = 613; goto _test_eof
_test_eof614: ( m.cs) = 614; goto _test_eof
_test_eof615: ( m.cs) = 615; goto _test_eof
_test_eof616: ( m.cs) = 616; goto _test_eof
- _test_eof177: ( m.cs) = 177; goto _test_eof
- _test_eof178: ( m.cs) = 178; goto _test_eof
- _test_eof179: ( m.cs) = 179; goto _test_eof
+ _test_eof170: ( m.cs) = 170; goto _test_eof
+ _test_eof171: ( m.cs) = 171; goto _test_eof
+ _test_eof172: ( m.cs) = 172; goto _test_eof
_test_eof617: ( m.cs) = 617; goto _test_eof
- _test_eof180: ( m.cs) = 180; goto _test_eof
- _test_eof181: ( m.cs) = 181; goto _test_eof
- _test_eof182: ( m.cs) = 182; goto _test_eof
_test_eof618: ( m.cs) = 618; goto _test_eof
- _test_eof183: ( m.cs) = 183; goto _test_eof
- _test_eof184: ( m.cs) = 184; goto _test_eof
_test_eof619: ( m.cs) = 619; goto _test_eof
+ _test_eof173: ( m.cs) = 173; goto _test_eof
_test_eof620: ( m.cs) = 620; goto _test_eof
- _test_eof185: ( m.cs) = 185; goto _test_eof
_test_eof621: ( m.cs) = 621; goto _test_eof
+ _test_eof174: ( m.cs) = 174; goto _test_eof
_test_eof622: ( m.cs) = 622; goto _test_eof
- _test_eof186: ( m.cs) = 186; goto _test_eof
- _test_eof187: ( m.cs) = 187; goto _test_eof
- _test_eof188: ( m.cs) = 188; goto _test_eof
_test_eof623: ( m.cs) = 623; goto _test_eof
- _test_eof189: ( m.cs) = 189; goto _test_eof
- _test_eof190: ( m.cs) = 190; goto _test_eof
_test_eof624: ( m.cs) = 624; goto _test_eof
_test_eof625: ( m.cs) = 625; goto _test_eof
_test_eof626: ( m.cs) = 626; goto _test_eof
+ _test_eof175: ( m.cs) = 175; goto _test_eof
+ _test_eof176: ( m.cs) = 176; goto _test_eof
+ _test_eof177: ( m.cs) = 177; goto _test_eof
_test_eof627: ( m.cs) = 627; goto _test_eof
+ _test_eof178: ( m.cs) = 178; goto _test_eof
+ _test_eof179: ( m.cs) = 179; goto _test_eof
+ _test_eof180: ( m.cs) = 180; goto _test_eof
_test_eof628: ( m.cs) = 628; goto _test_eof
+ _test_eof181: ( m.cs) = 181; goto _test_eof
+ _test_eof182: ( m.cs) = 182; goto _test_eof
_test_eof629: ( m.cs) = 629; goto _test_eof
_test_eof630: ( m.cs) = 630; goto _test_eof
+ _test_eof183: ( m.cs) = 183; goto _test_eof
_test_eof631: ( m.cs) = 631; goto _test_eof
- _test_eof191: ( m.cs) = 191; goto _test_eof
- _test_eof192: ( m.cs) = 192; goto _test_eof
- _test_eof193: ( m.cs) = 193; goto _test_eof
_test_eof632: ( m.cs) = 632; goto _test_eof
- _test_eof194: ( m.cs) = 194; goto _test_eof
- _test_eof195: ( m.cs) = 195; goto _test_eof
- _test_eof196: ( m.cs) = 196; goto _test_eof
_test_eof633: ( m.cs) = 633; goto _test_eof
- _test_eof197: ( m.cs) = 197; goto _test_eof
- _test_eof198: ( m.cs) = 198; goto _test_eof
+ _test_eof184: ( m.cs) = 184; goto _test_eof
+ _test_eof185: ( m.cs) = 185; goto _test_eof
+ _test_eof186: ( m.cs) = 186; goto _test_eof
_test_eof634: ( m.cs) = 634; goto _test_eof
+ _test_eof187: ( m.cs) = 187; goto _test_eof
+ _test_eof188: ( m.cs) = 188; goto _test_eof
+ _test_eof189: ( m.cs) = 189; goto _test_eof
_test_eof635: ( m.cs) = 635; goto _test_eof
- _test_eof199: ( m.cs) = 199; goto _test_eof
- _test_eof200: ( m.cs) = 200; goto _test_eof
- _test_eof201: ( m.cs) = 201; goto _test_eof
+ _test_eof190: ( m.cs) = 190; goto _test_eof
+ _test_eof191: ( m.cs) = 191; goto _test_eof
_test_eof636: ( m.cs) = 636; goto _test_eof
_test_eof637: ( m.cs) = 637; goto _test_eof
+ _test_eof192: ( m.cs) = 192; goto _test_eof
+ _test_eof193: ( m.cs) = 193; goto _test_eof
+ _test_eof194: ( m.cs) = 194; goto _test_eof
_test_eof638: ( m.cs) = 638; goto _test_eof
+ _test_eof195: ( m.cs) = 195; goto _test_eof
+ _test_eof196: ( m.cs) = 196; goto _test_eof
_test_eof639: ( m.cs) = 639; goto _test_eof
_test_eof640: ( m.cs) = 640; goto _test_eof
_test_eof641: ( m.cs) = 641; goto _test_eof
@@ -29957,22 +30659,26 @@ tr425:
_test_eof644: ( m.cs) = 644; goto _test_eof
_test_eof645: ( m.cs) = 645; goto _test_eof
_test_eof646: ( m.cs) = 646; goto _test_eof
+ _test_eof197: ( m.cs) = 197; goto _test_eof
+ _test_eof198: ( m.cs) = 198; goto _test_eof
+ _test_eof199: ( m.cs) = 199; goto _test_eof
_test_eof647: ( m.cs) = 647; goto _test_eof
+ _test_eof200: ( m.cs) = 200; goto _test_eof
+ _test_eof201: ( m.cs) = 201; goto _test_eof
+ _test_eof202: ( m.cs) = 202; goto _test_eof
_test_eof648: ( m.cs) = 648; goto _test_eof
+ _test_eof203: ( m.cs) = 203; goto _test_eof
+ _test_eof204: ( m.cs) = 204; goto _test_eof
_test_eof649: ( m.cs) = 649; goto _test_eof
_test_eof650: ( m.cs) = 650; goto _test_eof
+ _test_eof205: ( m.cs) = 205; goto _test_eof
+ _test_eof206: ( m.cs) = 206; goto _test_eof
+ _test_eof207: ( m.cs) = 207; goto _test_eof
_test_eof651: ( m.cs) = 651; goto _test_eof
_test_eof652: ( m.cs) = 652; goto _test_eof
_test_eof653: ( m.cs) = 653; goto _test_eof
_test_eof654: ( m.cs) = 654; goto _test_eof
- _test_eof202: ( m.cs) = 202; goto _test_eof
- _test_eof203: ( m.cs) = 203; goto _test_eof
- _test_eof204: ( m.cs) = 204; goto _test_eof
- _test_eof205: ( m.cs) = 205; goto _test_eof
- _test_eof206: ( m.cs) = 206; goto _test_eof
_test_eof655: ( m.cs) = 655; goto _test_eof
- _test_eof207: ( m.cs) = 207; goto _test_eof
- _test_eof208: ( m.cs) = 208; goto _test_eof
_test_eof656: ( m.cs) = 656; goto _test_eof
_test_eof657: ( m.cs) = 657; goto _test_eof
_test_eof658: ( m.cs) = 658; goto _test_eof
@@ -29982,26 +30688,19 @@ tr425:
_test_eof662: ( m.cs) = 662; goto _test_eof
_test_eof663: ( m.cs) = 663; goto _test_eof
_test_eof664: ( m.cs) = 664; goto _test_eof
- _test_eof209: ( m.cs) = 209; goto _test_eof
- _test_eof210: ( m.cs) = 210; goto _test_eof
- _test_eof211: ( m.cs) = 211; goto _test_eof
_test_eof665: ( m.cs) = 665; goto _test_eof
- _test_eof212: ( m.cs) = 212; goto _test_eof
- _test_eof213: ( m.cs) = 213; goto _test_eof
- _test_eof214: ( m.cs) = 214; goto _test_eof
_test_eof666: ( m.cs) = 666; goto _test_eof
- _test_eof215: ( m.cs) = 215; goto _test_eof
- _test_eof216: ( m.cs) = 216; goto _test_eof
_test_eof667: ( m.cs) = 667; goto _test_eof
_test_eof668: ( m.cs) = 668; goto _test_eof
- _test_eof217: ( m.cs) = 217; goto _test_eof
- _test_eof218: ( m.cs) = 218; goto _test_eof
- _test_eof219: ( m.cs) = 219; goto _test_eof
- _test_eof220: ( m.cs) = 220; goto _test_eof
_test_eof669: ( m.cs) = 669; goto _test_eof
- _test_eof221: ( m.cs) = 221; goto _test_eof
- _test_eof222: ( m.cs) = 222; goto _test_eof
+ _test_eof208: ( m.cs) = 208; goto _test_eof
+ _test_eof209: ( m.cs) = 209; goto _test_eof
+ _test_eof210: ( m.cs) = 210; goto _test_eof
+ _test_eof211: ( m.cs) = 211; goto _test_eof
+ _test_eof212: ( m.cs) = 212; goto _test_eof
_test_eof670: ( m.cs) = 670; goto _test_eof
+ _test_eof213: ( m.cs) = 213; goto _test_eof
+ _test_eof214: ( m.cs) = 214; goto _test_eof
_test_eof671: ( m.cs) = 671; goto _test_eof
_test_eof672: ( m.cs) = 672; goto _test_eof
_test_eof673: ( m.cs) = 673; goto _test_eof
@@ -30009,24 +30708,26 @@ tr425:
_test_eof675: ( m.cs) = 675; goto _test_eof
_test_eof676: ( m.cs) = 676; goto _test_eof
_test_eof677: ( m.cs) = 677; goto _test_eof
- _test_eof223: ( m.cs) = 223; goto _test_eof
- _test_eof224: ( m.cs) = 224; goto _test_eof
- _test_eof225: ( m.cs) = 225; goto _test_eof
_test_eof678: ( m.cs) = 678; goto _test_eof
- _test_eof226: ( m.cs) = 226; goto _test_eof
- _test_eof227: ( m.cs) = 227; goto _test_eof
- _test_eof228: ( m.cs) = 228; goto _test_eof
_test_eof679: ( m.cs) = 679; goto _test_eof
- _test_eof229: ( m.cs) = 229; goto _test_eof
- _test_eof230: ( m.cs) = 230; goto _test_eof
+ _test_eof215: ( m.cs) = 215; goto _test_eof
+ _test_eof216: ( m.cs) = 216; goto _test_eof
+ _test_eof217: ( m.cs) = 217; goto _test_eof
_test_eof680: ( m.cs) = 680; goto _test_eof
+ _test_eof218: ( m.cs) = 218; goto _test_eof
+ _test_eof219: ( m.cs) = 219; goto _test_eof
+ _test_eof220: ( m.cs) = 220; goto _test_eof
_test_eof681: ( m.cs) = 681; goto _test_eof
- _test_eof231: ( m.cs) = 231; goto _test_eof
- _test_eof232: ( m.cs) = 232; goto _test_eof
- _test_eof233: ( m.cs) = 233; goto _test_eof
+ _test_eof221: ( m.cs) = 221; goto _test_eof
+ _test_eof222: ( m.cs) = 222; goto _test_eof
_test_eof682: ( m.cs) = 682; goto _test_eof
_test_eof683: ( m.cs) = 683; goto _test_eof
+ _test_eof223: ( m.cs) = 223; goto _test_eof
+ _test_eof224: ( m.cs) = 224; goto _test_eof
+ _test_eof225: ( m.cs) = 225; goto _test_eof
_test_eof684: ( m.cs) = 684; goto _test_eof
+ _test_eof226: ( m.cs) = 226; goto _test_eof
+ _test_eof227: ( m.cs) = 227; goto _test_eof
_test_eof685: ( m.cs) = 685; goto _test_eof
_test_eof686: ( m.cs) = 686; goto _test_eof
_test_eof687: ( m.cs) = 687; goto _test_eof
@@ -30035,7 +30736,12 @@ tr425:
_test_eof690: ( m.cs) = 690; goto _test_eof
_test_eof691: ( m.cs) = 691; goto _test_eof
_test_eof692: ( m.cs) = 692; goto _test_eof
+ _test_eof228: ( m.cs) = 228; goto _test_eof
+ _test_eof229: ( m.cs) = 229; goto _test_eof
+ _test_eof230: ( m.cs) = 230; goto _test_eof
_test_eof693: ( m.cs) = 693; goto _test_eof
+ _test_eof231: ( m.cs) = 231; goto _test_eof
+ _test_eof232: ( m.cs) = 232; goto _test_eof
_test_eof694: ( m.cs) = 694; goto _test_eof
_test_eof695: ( m.cs) = 695; goto _test_eof
_test_eof696: ( m.cs) = 696; goto _test_eof
@@ -30043,708 +30749,841 @@ tr425:
_test_eof698: ( m.cs) = 698; goto _test_eof
_test_eof699: ( m.cs) = 699; goto _test_eof
_test_eof700: ( m.cs) = 700; goto _test_eof
+ _test_eof701: ( m.cs) = 701; goto _test_eof
+ _test_eof233: ( m.cs) = 233; goto _test_eof
_test_eof234: ( m.cs) = 234; goto _test_eof
_test_eof235: ( m.cs) = 235; goto _test_eof
- _test_eof701: ( m.cs) = 701; goto _test_eof
+ _test_eof702: ( m.cs) = 702; goto _test_eof
_test_eof236: ( m.cs) = 236; goto _test_eof
_test_eof237: ( m.cs) = 237; goto _test_eof
- _test_eof702: ( m.cs) = 702; goto _test_eof
+ _test_eof238: ( m.cs) = 238; goto _test_eof
_test_eof703: ( m.cs) = 703; goto _test_eof
+ _test_eof239: ( m.cs) = 239; goto _test_eof
+ _test_eof240: ( m.cs) = 240; goto _test_eof
_test_eof704: ( m.cs) = 704; goto _test_eof
_test_eof705: ( m.cs) = 705; goto _test_eof
+ _test_eof241: ( m.cs) = 241; goto _test_eof
+ _test_eof242: ( m.cs) = 242; goto _test_eof
+ _test_eof243: ( m.cs) = 243; goto _test_eof
_test_eof706: ( m.cs) = 706; goto _test_eof
_test_eof707: ( m.cs) = 707; goto _test_eof
_test_eof708: ( m.cs) = 708; goto _test_eof
_test_eof709: ( m.cs) = 709; goto _test_eof
- _test_eof238: ( m.cs) = 238; goto _test_eof
- _test_eof239: ( m.cs) = 239; goto _test_eof
- _test_eof240: ( m.cs) = 240; goto _test_eof
_test_eof710: ( m.cs) = 710; goto _test_eof
- _test_eof241: ( m.cs) = 241; goto _test_eof
- _test_eof242: ( m.cs) = 242; goto _test_eof
- _test_eof243: ( m.cs) = 243; goto _test_eof
_test_eof711: ( m.cs) = 711; goto _test_eof
- _test_eof244: ( m.cs) = 244; goto _test_eof
- _test_eof245: ( m.cs) = 245; goto _test_eof
_test_eof712: ( m.cs) = 712; goto _test_eof
_test_eof713: ( m.cs) = 713; goto _test_eof
- _test_eof246: ( m.cs) = 246; goto _test_eof
- _test_eof247: ( m.cs) = 247; goto _test_eof
_test_eof714: ( m.cs) = 714; goto _test_eof
- _test_eof250: ( m.cs) = 250; goto _test_eof
+ _test_eof715: ( m.cs) = 715; goto _test_eof
+ _test_eof716: ( m.cs) = 716; goto _test_eof
_test_eof717: ( m.cs) = 717; goto _test_eof
_test_eof718: ( m.cs) = 718; goto _test_eof
+ _test_eof719: ( m.cs) = 719; goto _test_eof
+ _test_eof720: ( m.cs) = 720; goto _test_eof
+ _test_eof721: ( m.cs) = 721; goto _test_eof
+ _test_eof722: ( m.cs) = 722; goto _test_eof
+ _test_eof723: ( m.cs) = 723; goto _test_eof
+ _test_eof724: ( m.cs) = 724; goto _test_eof
+ _test_eof244: ( m.cs) = 244; goto _test_eof
+ _test_eof245: ( m.cs) = 245; goto _test_eof
+ _test_eof725: ( m.cs) = 725; goto _test_eof
+ _test_eof246: ( m.cs) = 246; goto _test_eof
+ _test_eof247: ( m.cs) = 247; goto _test_eof
+ _test_eof726: ( m.cs) = 726; goto _test_eof
+ _test_eof727: ( m.cs) = 727; goto _test_eof
+ _test_eof728: ( m.cs) = 728; goto _test_eof
+ _test_eof729: ( m.cs) = 729; goto _test_eof
+ _test_eof730: ( m.cs) = 730; goto _test_eof
+ _test_eof731: ( m.cs) = 731; goto _test_eof
+ _test_eof732: ( m.cs) = 732; goto _test_eof
+ _test_eof733: ( m.cs) = 733; goto _test_eof
+ _test_eof248: ( m.cs) = 248; goto _test_eof
+ _test_eof249: ( m.cs) = 249; goto _test_eof
+ _test_eof250: ( m.cs) = 250; goto _test_eof
+ _test_eof734: ( m.cs) = 734; goto _test_eof
_test_eof251: ( m.cs) = 251; goto _test_eof
_test_eof252: ( m.cs) = 252; goto _test_eof
_test_eof253: ( m.cs) = 253; goto _test_eof
+ _test_eof735: ( m.cs) = 735; goto _test_eof
_test_eof254: ( m.cs) = 254; goto _test_eof
- _test_eof719: ( m.cs) = 719; goto _test_eof
_test_eof255: ( m.cs) = 255; goto _test_eof
- _test_eof720: ( m.cs) = 720; goto _test_eof
+ _test_eof736: ( m.cs) = 736; goto _test_eof
+ _test_eof737: ( m.cs) = 737; goto _test_eof
_test_eof256: ( m.cs) = 256; goto _test_eof
_test_eof257: ( m.cs) = 257; goto _test_eof
+ _test_eof738: ( m.cs) = 738; goto _test_eof
+ _test_eof260: ( m.cs) = 260; goto _test_eof
+ _test_eof740: ( m.cs) = 740; goto _test_eof
+ _test_eof741: ( m.cs) = 741; goto _test_eof
+ _test_eof261: ( m.cs) = 261; goto _test_eof
+ _test_eof262: ( m.cs) = 262; goto _test_eof
+ _test_eof263: ( m.cs) = 263; goto _test_eof
+ _test_eof264: ( m.cs) = 264; goto _test_eof
+ _test_eof742: ( m.cs) = 742; goto _test_eof
+ _test_eof265: ( m.cs) = 265; goto _test_eof
+ _test_eof743: ( m.cs) = 743; goto _test_eof
+ _test_eof266: ( m.cs) = 266; goto _test_eof
+ _test_eof267: ( m.cs) = 267; goto _test_eof
+ _test_eof268: ( m.cs) = 268; goto _test_eof
+ _test_eof739: ( m.cs) = 739; goto _test_eof
_test_eof258: ( m.cs) = 258; goto _test_eof
- _test_eof715: ( m.cs) = 715; goto _test_eof
- _test_eof716: ( m.cs) = 716; goto _test_eof
- _test_eof248: ( m.cs) = 248; goto _test_eof
- _test_eof249: ( m.cs) = 249; goto _test_eof
+ _test_eof259: ( m.cs) = 259; goto _test_eof
_test_eof: {}
if ( m.p) == ( m.eof) {
switch ( m.cs) {
- case 9, 250:
-//line plugins/parsers/influx/machine.go.rl:23
+ case 7, 260:
+//line plugins/parsers/influx/machine.go.rl:32
+
+ err = ErrNameParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+ case 2, 3, 4, 5, 6, 27, 30, 31, 34, 35, 36, 48, 49, 50, 51, 52, 72, 73, 75, 92, 102, 104, 140, 152, 155, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256:
+//line plugins/parsers/influx/machine.go.rl:39
+
+ err = ErrFieldParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+ case 12, 13, 14, 21, 23, 24, 262, 263, 264, 265, 266, 267:
+//line plugins/parsers/influx/machine.go.rl:46
+
+ err = ErrTagParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+ case 243:
+//line plugins/parsers/influx/machine.go.rl:53
+
+ err = ErrTimestampParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+ case 740:
+//line plugins/parsers/influx/machine.go.rl:86
+
+ err = m.handler.SetMeasurement(m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+ }
+
+ case 742, 743:
+//line plugins/parsers/influx/machine.go.rl:99
+
+ err = m.handler.AddTag(m.key, m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+ }
+
+ case 270, 271, 272, 273, 274, 276, 277, 296, 297, 298, 300, 301, 304, 305, 326, 327, 328, 329, 331, 375, 376, 378, 379, 401, 402, 407, 408, 410, 430, 431, 433, 434, 456, 457, 617, 620:
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 9, 37, 39, 164, 166:
+//line plugins/parsers/influx/machine.go.rl:32
err = ErrNameParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+//line plugins/parsers/influx/machine.go.rl:39
+
+ err = ErrFieldParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+ case 33, 74, 103, 169, 207:
+//line plugins/parsers/influx/machine.go.rl:39
+
+ err = ErrFieldParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+//line plugins/parsers/influx/machine.go.rl:53
+
+ err = ErrTimestampParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+ case 19, 43, 44, 45, 57, 58, 60, 62, 67, 69, 70, 76, 77, 78, 83, 85, 87, 88, 96, 97, 99, 100, 101, 106, 107, 108, 121, 122, 136, 137:
+//line plugins/parsers/influx/machine.go.rl:46
+
+ err = ErrTagParse
+ ( m.p)--
+
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 2, 3, 4, 5, 6, 7, 8, 29, 32, 33, 36, 37, 38, 50, 51, 52, 53, 54, 74, 76, 77, 94, 104, 106, 142, 154, 157, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246:
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 14, 15, 16, 23, 25, 26, 252, 253, 254, 255, 256, 257:
-//line plugins/parsers/influx/machine.go.rl:37
+ case 59:
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 233:
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:53
err = ErrTimestampParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 259:
-//line plugins/parsers/influx/machine.go.rl:73
+ case 269:
+//line plugins/parsers/influx/machine.go.rl:82
+
+ m.beginMetric = true
+
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 1:
+//line plugins/parsers/influx/machine.go.rl:86
- foundMetric = true
+ err = m.handler.SetMeasurement(m.text())
+ if err != nil {
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+ }
+
+//line plugins/parsers/influx/machine.go.rl:46
+
+ err = ErrTagParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
- case 289, 292, 296, 364, 388, 389, 393, 394, 395, 519, 553, 554, 556, 717:
-//line plugins/parsers/influx/machine.go.rl:77
+ case 299, 302, 306, 374, 398, 399, 403, 404, 405, 529, 563, 564, 566:
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 340, 341, 342, 344, 363, 419, 443, 444, 448, 468, 484, 485, 487, 719, 720:
-//line plugins/parsers/influx/machine.go.rl:90
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 15, 22:
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 613, 659, 704:
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:46
- err = m.handler.AddInt(key, m.text())
+ err = ErrTagParse
+ ( m.p)--
+
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+ case 350, 351, 352, 354, 373, 429, 453, 454, 458, 478, 494, 495, 497:
+//line plugins/parsers/influx/machine.go.rl:99
+
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 614, 662, 707:
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 623, 674, 688, 728:
//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 315, 608, 609, 611, 612, 615, 621, 622, 655, 656, 657, 658, 660, 661, 663, 701, 702, 703, 705, 706, 708:
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 624, 677, 691, 731:
//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 616, 617, 618, 619, 620, 664, 665, 666, 667, 668, 709, 710, 711, 712, 713:
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 325, 618, 619, 621, 622, 625, 631, 632, 670, 671, 672, 673, 675, 676, 678, 684, 685, 686, 687, 689, 690, 692, 725, 726, 727, 729, 730, 732:
//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 265, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 320, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 367, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 399, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 422, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700:
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:178
- err = m.handler.SetTimestamp(m.text())
+ m.finishMetric = true
+
+ case 626, 627, 628, 629, 630, 633, 634, 635, 636, 637, 679, 680, 681, 682, 683, 733, 734, 735, 736, 737:
+//line plugins/parsers/influx/machine.go.rl:139
+
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 11, 39, 41, 166, 168:
-//line plugins/parsers/influx/machine.go.rl:23
+//line plugins/parsers/influx/machine.go.rl:178
- err = ErrNameParse
- ( m.p)--
+ m.finishMetric = true
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
+ case 275, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 330, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 377, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 409, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 432, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724:
+//line plugins/parsers/influx/machine.go.rl:157
+
+ err = m.handler.SetTimestamp(m.text())
+ if err != nil {
+ ( m.p)--
-//line plugins/parsers/influx/machine.go.rl:30
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+ }
- err = ErrFieldParse
- ( m.p)--
+//line plugins/parsers/influx/machine.go.rl:178
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
+ m.finishMetric = true
- case 35, 75, 105, 171, 201:
-//line plugins/parsers/influx/machine.go.rl:30
+ case 8:
+//line plugins/parsers/influx/machine.go.rl:32
- err = ErrFieldParse
+ err = ErrNameParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:86
- err = ErrTimestampParse
- ( m.p)--
+ err = m.handler.SetMeasurement(m.text())
+ if err != nil {
+ ( m.p)--
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+ }
- case 21, 45, 46, 47, 59, 60, 62, 64, 69, 71, 72, 78, 79, 80, 85, 87, 89, 90, 98, 99, 101, 102, 103, 108, 109, 110, 123, 124, 138, 139:
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:30
+ case 98:
+//line plugins/parsers/influx/machine.go.rl:46
- err = ErrFieldParse
+ err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 61:
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:39
- err = ErrTagParse
+ err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:53
err = ErrTimestampParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 1:
-//line plugins/parsers/influx/machine.go.rl:77
+ case 10, 11, 25, 26, 28, 29, 40, 41, 53, 54, 55, 56, 71, 90, 91, 93, 95, 138, 139, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 153, 154, 156, 157, 158, 159, 160, 161, 162, 163, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240:
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
+
+//line plugins/parsers/influx/machine.go.rl:39
+
+ err = ErrFieldParse
+ ( m.p)--
+
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 524, 578, 672:
-//line plugins/parsers/influx/machine.go.rl:77
+ case 534, 588, 696:
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:103
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 527, 581, 675:
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 537, 591, 699:
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
+//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 396, 520, 521, 522, 523, 525, 526, 528, 552, 575, 576, 577, 579, 580, 582, 669, 670, 671, 673, 674, 676:
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 406, 530, 531, 532, 533, 535, 536, 538, 562, 585, 586, 587, 589, 590, 592, 693, 694, 695, 697, 698, 700:
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:121
+//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 529, 530, 531, 532, 533, 583, 584, 585, 586, 587, 677, 678, 679, 680, 681:
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 539, 540, 541, 542, 543, 593, 594, 595, 596, 597, 701, 702, 703, 704, 705:
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:130
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 293, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 390, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 555, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574:
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 303, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 400, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 565, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584:
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:148
+//line plugins/parsers/influx/machine.go.rl:157
err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 17, 24:
-//line plugins/parsers/influx/machine.go.rl:90
+//line plugins/parsers/influx/machine.go.rl:178
+
+ m.finishMetric = true
+
+ case 16, 17, 18, 20, 46, 47, 63, 64, 65, 66, 68, 79, 80, 81, 82, 84, 86, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 123, 124, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204:
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 473, 509, 626:
-//line plugins/parsers/influx/machine.go.rl:90
+//line plugins/parsers/influx/machine.go.rl:39
- err = m.handler.AddTag(key, m.text())
- if err != nil {
- ( m.p)--
+ err = ErrFieldParse
+ ( m.p)--
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
- }
+ ( m.cs) = 257;
+ {( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:103
+ case 483, 519, 641:
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 476, 512, 629:
-//line plugins/parsers/influx/machine.go.rl:90
+//line plugins/parsers/influx/machine.go.rl:112
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:112
-
- err = m.handler.AddUint(key, m.text())
- if err != nil {
- ( m.p)--
+//line plugins/parsers/influx/machine.go.rl:178
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
- }
+ m.finishMetric = true
- case 467, 469, 470, 471, 472, 474, 475, 477, 483, 506, 507, 508, 510, 511, 513, 623, 624, 625, 627, 628, 630:
-//line plugins/parsers/influx/machine.go.rl:90
+ case 486, 522, 644:
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
//line plugins/parsers/influx/machine.go.rl:121
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 478, 479, 480, 481, 482, 514, 515, 516, 517, 518, 631, 632, 633, 634, 635:
-//line plugins/parsers/influx/machine.go.rl:90
+//line plugins/parsers/influx/machine.go.rl:178
- err = m.handler.AddTag(key, m.text())
+ m.finishMetric = true
+
+ case 477, 479, 480, 481, 482, 484, 485, 487, 493, 516, 517, 518, 520, 521, 523, 638, 639, 640, 642, 643, 645:
+//line plugins/parsers/influx/machine.go.rl:99
+
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
//line plugins/parsers/influx/machine.go.rl:130
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 343, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 445, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 486, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505:
-//line plugins/parsers/influx/machine.go.rl:90
-
- err = m.handler.AddTag(key, m.text())
- if err != nil {
- ( m.p)--
+//line plugins/parsers/influx/machine.go.rl:178
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
- }
+ m.finishMetric = true
-//line plugins/parsers/influx/machine.go.rl:148
+ case 488, 489, 490, 491, 492, 524, 525, 526, 527, 528, 646, 647, 648, 649, 650:
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.SetTimestamp(m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
- case 10:
-//line plugins/parsers/influx/machine.go.rl:23
-
- err = ErrNameParse
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
-
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:139
- err = m.handler.SetMeasurement(m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:37
-
- err = ErrTagParse
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
-
- case 100:
-//line plugins/parsers/influx/machine.go.rl:37
-
- err = ErrTagParse
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
-
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:178
- err = ErrFieldParse
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
-
-//line plugins/parsers/influx/machine.go.rl:44
-
- err = ErrTimestampParse
- ( m.p)--
+ m.finishMetric = true
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
-
- case 12, 13, 27, 28, 30, 31, 42, 43, 55, 56, 57, 58, 73, 92, 93, 95, 97, 140, 141, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 155, 156, 158, 159, 160, 161, 162, 163, 164, 165, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230:
-//line plugins/parsers/influx/machine.go.rl:77
+ case 353, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 455, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 496, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515:
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.SetMeasurement(m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:37
-
- err = ErrTagParse
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
-
-//line plugins/parsers/influx/machine.go.rl:30
-
- err = ErrFieldParse
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
-
- case 18, 19, 20, 22, 48, 49, 65, 66, 67, 68, 70, 81, 82, 83, 84, 86, 88, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 125, 126, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198:
-//line plugins/parsers/influx/machine.go.rl:90
+//line plugins/parsers/influx/machine.go.rl:157
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.SetTimestamp(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:37
-
- err = ErrTagParse
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
-
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:178
- err = ErrFieldParse
- ( m.p)--
-
- ( m.cs) = 247;
- {( m.p)++; ( m.cs) = 0; goto _out }
+ m.finishMetric = true
- case 40, 167, 169, 170, 199, 200, 231, 232:
-//line plugins/parsers/influx/machine.go.rl:23
+ case 38, 165, 167, 168, 205, 206, 241, 242:
+//line plugins/parsers/influx/machine.go.rl:32
err = ErrNameParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:77
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 44, 91, 153:
-//line plugins/parsers/influx/machine.go.rl:77
+ case 42, 89, 151:
+//line plugins/parsers/influx/machine.go.rl:86
err = m.handler.SetMeasurement(m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:53
err = ErrTimestampParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
- case 63, 107, 127:
-//line plugins/parsers/influx/machine.go.rl:90
+ case 61, 105, 125:
+//line plugins/parsers/influx/machine.go.rl:99
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
}
-//line plugins/parsers/influx/machine.go.rl:37
+//line plugins/parsers/influx/machine.go.rl:46
err = ErrTagParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:30
+//line plugins/parsers/influx/machine.go.rl:39
err = ErrFieldParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go.rl:44
+//line plugins/parsers/influx/machine.go.rl:53
err = ErrTimestampParse
( m.p)--
- ( m.cs) = 247;
+ ( m.cs) = 257;
{( m.p)++; ( m.cs) = 0; goto _out }
-//line plugins/parsers/influx/machine.go:30741
+//line plugins/parsers/influx/machine.go:31580
}
}
_out: {}
}
-//line plugins/parsers/influx/machine.go.rl:390
+//line plugins/parsers/influx/machine.go.rl:415
if err != nil {
return err
@@ -30763,7 +31602,7 @@ tr425:
//
// Otherwise we have successfully parsed a metric line, so if we are at
// the EOF we will report it the next call.
- if !foundMetric && m.p == m.pe && m.pe == m.eof {
+ if !m.beginMetric && m.p == m.pe && m.pe == m.eof {
return EOF
}
@@ -30795,3 +31634,101 @@ func (m *machine) Column() int {
func (m *machine) text() []byte {
return m.data[m.pb:m.p]
}
+
+type streamMachine struct {
+ machine *machine
+ reader io.Reader
+}
+
+func NewStreamMachine(r io.Reader, handler Handler) *streamMachine {
+ m := &streamMachine{
+ machine: NewMachine(handler),
+ reader: r,
+ }
+
+ m.machine.SetData(make([]byte, 1024))
+ m.machine.pe = 0
+ m.machine.eof = -1
+ return m
+}
+
+func (m *streamMachine) Next() error {
+ // Check if we are already at EOF, this should only happen if called again
+ // after already returning EOF.
+ if m.machine.p == m.machine.pe && m.machine.pe == m.machine.eof {
+ return EOF
+ }
+
+ copy(m.machine.data, m.machine.data[m.machine.p:])
+ m.machine.pe = m.machine.pe - m.machine.p
+ m.machine.sol = m.machine.sol - m.machine.p
+ m.machine.pb = 0
+ m.machine.p = 0
+ m.machine.eof = -1
+
+ m.machine.key = nil
+ m.machine.beginMetric = false
+ m.machine.finishMetric = false
+
+ for {
+ // Expand the buffer if it is full
+ if m.machine.pe == len(m.machine.data) {
+ expanded := make([]byte, 2 * len(m.machine.data))
+ copy(expanded, m.machine.data)
+ m.machine.data = expanded
+ }
+
+ err := m.machine.exec()
+ if err != nil {
+ return err
+ }
+
+ // If we have successfully parsed a full metric line break out
+ if m.machine.finishMetric {
+ break
+ }
+
+ n, err := m.reader.Read(m.machine.data[m.machine.pe:])
+ if n == 0 && err == io.EOF {
+ m.machine.eof = m.machine.pe
+ } else if err != nil && err != io.EOF {
+ // After the reader returns an error this function shouldn't be
+ // called again. This will cause the machine to return EOF this
+ // is done.
+ m.machine.p = m.machine.pe
+ m.machine.eof = m.machine.pe
+ return &readErr{Err: err}
+ }
+
+ m.machine.pe += n
+
+ }
+
+ return nil
+}
+
+// Position returns the current byte offset into the data.
+func (m *streamMachine) Position() int {
+ return m.machine.Position()
+}
+
+// LineOffset returns the byte offset of the current line.
+func (m *streamMachine) LineOffset() int {
+ return m.machine.LineOffset()
+}
+
+// LineNumber returns the current line number. Lines are counted based on the
+// regular expression `\r?\n`.
+func (m *streamMachine) LineNumber() int {
+ return m.machine.LineNumber()
+}
+
+// Column returns the current column.
+func (m *streamMachine) Column() int {
+ return m.machine.Column()
+}
+
+// LineText returns the text of the current line that has been parsed so far.
+func (m *streamMachine) LineText() string {
+ return string(m.machine.data[0:m.machine.p])
+}
diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl
index 52b32b2b836af..f8f40cd7c1dc0 100644
--- a/plugins/parsers/influx/machine.go.rl
+++ b/plugins/parsers/influx/machine.go.rl
@@ -2,8 +2,17 @@ package influx
import (
"errors"
+ "io"
)
+type readErr struct {
+ Err error
+}
+
+func (e *readErr) Error() string {
+ return e.Err.Error()
+}
+
var (
ErrNameParse = errors.New("expected measurement name")
ErrFieldParse = errors.New("expected field")
@@ -70,8 +79,8 @@ action goto_align {
fgoto align;
}
-action found_metric {
- foundMetric = true
+action begin_metric {
+ m.beginMetric = true
}
action name {
@@ -84,11 +93,11 @@ action name {
}
action tagkey {
- key = m.text()
+ m.key = m.text()
}
action tagvalue {
- err = m.handler.AddTag(key, m.text())
+ err = m.handler.AddTag(m.key, m.text())
if err != nil {
fhold;
fnext discard_line;
@@ -97,11 +106,11 @@ action tagvalue {
}
action fieldkey {
- key = m.text()
+ m.key = m.text()
}
action integer {
- err = m.handler.AddInt(key, m.text())
+ err = m.handler.AddInt(m.key, m.text())
if err != nil {
fhold;
fnext discard_line;
@@ -110,7 +119,7 @@ action integer {
}
action unsigned {
- err = m.handler.AddUint(key, m.text())
+ err = m.handler.AddUint(m.key, m.text())
if err != nil {
fhold;
fnext discard_line;
@@ -119,7 +128,7 @@ action unsigned {
}
action float {
- err = m.handler.AddFloat(key, m.text())
+ err = m.handler.AddFloat(m.key, m.text())
if err != nil {
fhold;
fnext discard_line;
@@ -128,7 +137,7 @@ action float {
}
action bool {
- err = m.handler.AddBool(key, m.text())
+ err = m.handler.AddBool(m.key, m.text())
if err != nil {
fhold;
fnext discard_line;
@@ -137,7 +146,7 @@ action bool {
}
action string {
- err = m.handler.AddString(key, m.text())
+ err = m.handler.AddString(m.key, m.text())
if err != nil {
fhold;
fnext discard_line;
@@ -161,15 +170,20 @@ action incr_newline {
}
action eol {
+ m.finishMetric = true
fnext align;
fbreak;
}
+action finish_metric {
+ m.finishMetric = true
+}
+
ws =
[\t\v\f ];
newline =
- '\r'? '\n' %to(incr_newline);
+ '\r'? '\n' >incr_newline;
non_zero_digit =
[1-9];
@@ -214,7 +228,7 @@ fieldbool =
(true | false) >begin %bool;
fieldstringchar =
- [^\f\r\n\\"] | '\\' [\\"] | newline;
+ [^\n\\"] | '\\' [\\"] | newline;
fieldstring =
fieldstringchar* >begin %string;
@@ -273,7 +287,7 @@ line_without_term =
main :=
(line_with_term*
(line_with_term | line_without_term?)
- ) >found_metric
+ ) >begin_metric %eof(finish_metric)
;
# The discard_line machine discards the current line. Useful for recovering
@@ -299,7 +313,7 @@ align :=
# Series is a machine for matching measurement+tagset
series :=
(measurement >err(name_error) tagset eol_break?)
- >found_metric
+ >begin_metric
;
}%%
@@ -317,14 +331,17 @@ type Handler interface {
}
type machine struct {
- data []byte
- cs int
- p, pe, eof int
- pb int
- lineno int
- sol int
- handler Handler
- initState int
+ data []byte
+ cs int
+ p, pe, eof int
+ pb int
+ lineno int
+ sol int
+ handler Handler
+ initState int
+ key []byte
+ beginMetric bool
+ finishMetric bool
}
func NewMachine(handler Handler) *machine {
@@ -368,6 +385,9 @@ func (m *machine) SetData(data []byte) {
m.sol = 0
m.pe = len(data)
m.eof = len(data)
+ m.key = nil
+ m.beginMetric = false
+ m.finishMetric = false
%% write init;
m.cs = m.initState
@@ -382,10 +402,15 @@ func (m *machine) Next() error {
return EOF
}
- var err error
- var key []byte
- foundMetric := false
+ m.key = nil
+ m.beginMetric = false
+ m.finishMetric = false
+
+ return m.exec()
+}
+func (m *machine) exec() error {
+ var err error
%% write exec;
if err != nil {
@@ -405,7 +430,7 @@ func (m *machine) Next() error {
//
// Otherwise we have successfully parsed a metric line, so if we are at
// the EOF we will report it the next call.
- if !foundMetric && m.p == m.pe && m.pe == m.eof {
+ if !m.beginMetric && m.p == m.pe && m.pe == m.eof {
return EOF
}
@@ -437,3 +462,101 @@ func (m *machine) Column() int {
func (m *machine) text() []byte {
return m.data[m.pb:m.p]
}
+
+type streamMachine struct {
+ machine *machine
+ reader io.Reader
+}
+
+func NewStreamMachine(r io.Reader, handler Handler) *streamMachine {
+ m := &streamMachine{
+ machine: NewMachine(handler),
+ reader: r,
+ }
+
+ m.machine.SetData(make([]byte, 1024))
+ m.machine.pe = 0
+ m.machine.eof = -1
+ return m
+}
+
+func (m *streamMachine) Next() error {
+ // Check if we are already at EOF, this should only happen if called again
+ // after already returning EOF.
+ if m.machine.p == m.machine.pe && m.machine.pe == m.machine.eof {
+ return EOF
+ }
+
+ copy(m.machine.data, m.machine.data[m.machine.p:])
+ m.machine.pe = m.machine.pe - m.machine.p
+ m.machine.sol = m.machine.sol - m.machine.p
+ m.machine.pb = 0
+ m.machine.p = 0
+ m.machine.eof = -1
+
+ m.machine.key = nil
+ m.machine.beginMetric = false
+ m.machine.finishMetric = false
+
+ for {
+ // Expand the buffer if it is full
+ if m.machine.pe == len(m.machine.data) {
+ expanded := make([]byte, 2 * len(m.machine.data))
+ copy(expanded, m.machine.data)
+ m.machine.data = expanded
+ }
+
+ err := m.machine.exec()
+ if err != nil {
+ return err
+ }
+
+ // If we have successfully parsed a full metric line break out
+ if m.machine.finishMetric {
+ break
+ }
+
+ n, err := m.reader.Read(m.machine.data[m.machine.pe:])
+ if n == 0 && err == io.EOF {
+ m.machine.eof = m.machine.pe
+ } else if err != nil && err != io.EOF {
+ // After the reader returns an error this function shouldn't be
+ // called again. This will cause the machine to return EOF this
+ // is done.
+ m.machine.p = m.machine.pe
+ m.machine.eof = m.machine.pe
+ return &readErr{Err: err}
+ }
+
+ m.machine.pe += n
+
+ }
+
+ return nil
+}
+
+// Position returns the current byte offset into the data.
+func (m *streamMachine) Position() int {
+ return m.machine.Position()
+}
+
+// LineOffset returns the byte offset of the current line.
+func (m *streamMachine) LineOffset() int {
+ return m.machine.LineOffset()
+}
+
+// LineNumber returns the current line number. Lines are counted based on the
+// regular expression `\r?\n`.
+func (m *streamMachine) LineNumber() int {
+ return m.machine.LineNumber()
+}
+
+// Column returns the current column.
+func (m *streamMachine) Column() int {
+ return m.machine.Column()
+}
+
+// LineText returns the text of the current line that has been parsed so far.
+func (m *streamMachine) LineText() string {
+ return string(m.machine.data[0:m.machine.p])
+}
diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go
index a1c921ef12d0e..de5353da0c446 100644
--- a/plugins/parsers/influx/machine_test.go
+++ b/plugins/parsers/influx/machine_test.go
@@ -1,8 +1,10 @@
package influx_test
import (
+ "bytes"
"errors"
"fmt"
+ "io"
"testing"
"github.com/influxdata/telegraf/plugins/parsers/influx"
@@ -14,41 +16,59 @@ type TestingHandler struct {
}
func (h *TestingHandler) SetMeasurement(name []byte) error {
+ n := make([]byte, len(name))
+ copy(n, name)
+
mname := Result{
Name: Measurement,
- Value: name,
+ Value: n,
}
h.results = append(h.results, mname)
return nil
}
func (h *TestingHandler) AddTag(key []byte, value []byte) error {
+ k := make([]byte, len(key))
+ copy(k, key)
+ v := make([]byte, len(value))
+ copy(v, value)
+
tagkey := Result{
Name: TagKey,
- Value: key,
+ Value: k,
}
tagvalue := Result{
Name: TagValue,
- Value: value,
+ Value: v,
}
h.results = append(h.results, tagkey, tagvalue)
return nil
}
func (h *TestingHandler) AddInt(key []byte, value []byte) error {
+ k := make([]byte, len(key))
+ copy(k, key)
+ v := make([]byte, len(value))
+ copy(v, value)
+
fieldkey := Result{
Name: FieldKey,
- Value: key,
+ Value: k,
}
fieldvalue := Result{
Name: FieldInt,
- Value: value,
+ Value: v,
}
h.results = append(h.results, fieldkey, fieldvalue)
return nil
}
func (h *TestingHandler) AddUint(key []byte, value []byte) error {
+ k := make([]byte, len(key))
+ copy(k, key)
+ v := make([]byte, len(value))
+ copy(v, value)
+
fieldkey := Result{
Name: FieldKey,
Value: key,
@@ -62,48 +82,66 @@ func (h *TestingHandler) AddUint(key []byte, value []byte) error {
}
func (h *TestingHandler) AddFloat(key []byte, value []byte) error {
+ k := make([]byte, len(key))
+ copy(k, key)
+ v := make([]byte, len(value))
+ copy(v, value)
+
fieldkey := Result{
Name: FieldKey,
- Value: key,
+ Value: k,
}
fieldvalue := Result{
Name: FieldFloat,
- Value: value,
+ Value: v,
}
h.results = append(h.results, fieldkey, fieldvalue)
return nil
}
func (h *TestingHandler) AddString(key []byte, value []byte) error {
+ k := make([]byte, len(key))
+ copy(k, key)
+ v := make([]byte, len(value))
+ copy(v, value)
+
fieldkey := Result{
Name: FieldKey,
- Value: key,
+ Value: k,
}
fieldvalue := Result{
Name: FieldString,
- Value: value,
+ Value: v,
}
h.results = append(h.results, fieldkey, fieldvalue)
return nil
}
func (h *TestingHandler) AddBool(key []byte, value []byte) error {
+ k := make([]byte, len(key))
+ copy(k, key)
+ v := make([]byte, len(value))
+ copy(v, value)
+
fieldkey := Result{
Name: FieldKey,
- Value: key,
+ Value: k,
}
fieldvalue := Result{
Name: FieldBool,
- Value: value,
+ Value: v,
}
h.results = append(h.results, fieldkey, fieldvalue)
return nil
}
func (h *TestingHandler) SetTimestamp(tm []byte) error {
+ t := make([]byte, len(tm))
+ copy(t, tm)
+
timestamp := Result{
Name: Timestamp,
- Value: tm,
+ Value: t,
}
h.results = append(h.results, timestamp)
return nil
@@ -835,6 +873,27 @@ var tests = []struct {
},
},
},
+ {
+ name: "cr in string field",
+ input: []byte("cpu value=\"4\r2\""),
+ results: []Result{
+ {
+ Name: Measurement,
+ Value: []byte("cpu"),
+ },
+ {
+ Name: FieldKey,
+ Value: []byte("value"),
+ },
+ {
+ Name: FieldString,
+ Value: []byte("4\r2"),
+ },
+ {
+ Name: Success,
+ },
+ },
+ },
{
name: "bool field",
input: []byte("cpu value=true"),
@@ -1676,63 +1735,64 @@ func TestMachine(t *testing.T) {
}
}
+var positionTests = []struct {
+ name string
+ input []byte
+ lineno int
+ column int
+}{
+ {
+ name: "empty string",
+ input: []byte(""),
+ lineno: 1,
+ column: 1,
+ },
+ {
+ name: "minimal",
+ input: []byte("cpu value=42"),
+ lineno: 1,
+ column: 13,
+ },
+ {
+ name: "one newline",
+ input: []byte("cpu value=42\ncpu value=42"),
+ lineno: 2,
+ column: 13,
+ },
+ {
+ name: "several newlines",
+ input: []byte("cpu value=42\n\n\n"),
+ lineno: 4,
+ column: 1,
+ },
+ {
+ name: "error on second line",
+ input: []byte("cpu value=42\ncpu value=invalid"),
+ lineno: 2,
+ column: 11,
+ },
+ {
+ name: "error after comment line",
+ input: []byte("cpu value=42\n# comment\ncpu value=invalid"),
+ lineno: 3,
+ column: 11,
+ },
+ {
+ name: "dos line endings",
+ input: []byte("cpu value=42\r\ncpu value=invalid"),
+ lineno: 2,
+ column: 11,
+ },
+ {
+ name: "mac line endings not supported",
+ input: []byte("cpu value=42\rcpu value=invalid"),
+ lineno: 1,
+ column: 14,
+ },
+}
+
func TestMachinePosition(t *testing.T) {
- var tests = []struct {
- name string
- input []byte
- lineno int
- column int
- }{
- {
- name: "empty string",
- input: []byte(""),
- lineno: 1,
- column: 1,
- },
- {
- name: "minimal",
- input: []byte("cpu value=42"),
- lineno: 1,
- column: 13,
- },
- {
- name: "one newline",
- input: []byte("cpu value=42\ncpu value=42"),
- lineno: 2,
- column: 13,
- },
- {
- name: "several newlines",
- input: []byte("cpu value=42\n\n\n"),
- lineno: 4,
- column: 1,
- },
- {
- name: "error on second line",
- input: []byte("cpu value=42\ncpu value=invalid"),
- lineno: 2,
- column: 11,
- },
- {
- name: "error after comment line",
- input: []byte("cpu value=42\n# comment\ncpu value=invalid"),
- lineno: 3,
- column: 11,
- },
- {
- name: "dos line endings",
- input: []byte("cpu value=42\r\ncpu value=invalid"),
- lineno: 2,
- column: 11,
- },
- {
- name: "mac line endings not supported",
- input: []byte("cpu value=42\rcpu value=invalid"),
- lineno: 1,
- column: 14,
- },
- }
- for _, tt := range tests {
+ for _, tt := range positionTests {
t.Run(tt.name, func(t *testing.T) {
handler := &TestingHandler{}
fsm := influx.NewMachine(handler)
@@ -1773,7 +1833,7 @@ func BenchmarkMachine(b *testing.B) {
}
func TestMachineProcstat(t *testing.T) {
- input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000")
+ input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000")
handler := &TestingHandler{}
fsm := influx.NewMachine(handler)
fsm.SetData(input)
@@ -1786,7 +1846,7 @@ func TestMachineProcstat(t *testing.T) {
}
func BenchmarkMachineProcstat(b *testing.B) {
- input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000")
+ input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000")
handler := &BenchmarkingHandler{}
fsm := influx.NewMachine(handler)
for n := 0; n < b.N; n++ {
@@ -1932,135 +1992,136 @@ func (h *MockHandler) SetTimestamp(tm []byte) error {
return h.SetTimestampF(tm)
}
-func TestHandlerErrorRecovery(t *testing.T) {
- var tests = []struct {
- name string
- input []byte
- handler *MockHandler
- results []Result
- }{
- {
- name: "integer",
- input: []byte("cpu value=43i\ncpu value=42i"),
- handler: &MockHandler{
- SetMeasurementF: func(name []byte) error {
- return nil
- },
- AddIntF: func(name, value []byte) error {
- if string(value) != "42i" {
- return errors.New("handler error")
- }
- return nil
- },
+var errorRecoveryTests = []struct {
+ name string
+ input []byte
+ handler *MockHandler
+ results []Result
+}{
+ {
+ name: "integer",
+ input: []byte("cpu value=43i\ncpu value=42i"),
+ handler: &MockHandler{
+ SetMeasurementF: func(name []byte) error {
+ return nil
},
- results: []Result{
- {
- Name: Measurement,
- Value: []byte("cpu"),
- },
- {
- Name: Error,
- err: errors.New("handler error"),
- },
- {
- Name: Measurement,
- Value: []byte("cpu"),
- },
- {
- Name: FieldKey,
- Value: []byte("value"),
- },
- {
- Name: FieldInt,
- Value: []byte("42i"),
- },
- {
- Name: Success,
- },
+ AddIntF: func(name, value []byte) error {
+ if string(value) != "42i" {
+ return errors.New("handler error")
+ }
+ return nil
},
},
- {
- name: "integer with timestamp",
- input: []byte("cpu value=43i 1516241192000000000\ncpu value=42i"),
- handler: &MockHandler{
- SetMeasurementF: func(name []byte) error {
- return nil
- },
- AddIntF: func(name, value []byte) error {
- if string(value) != "42i" {
- return errors.New("handler error")
- }
- return nil
- },
+ results: []Result{
+ {
+ Name: Measurement,
+ Value: []byte("cpu"),
},
- results: []Result{
- {
- Name: Measurement,
- Value: []byte("cpu"),
- },
- {
- Name: Error,
- err: errors.New("handler error"),
- },
- {
- Name: Measurement,
- Value: []byte("cpu"),
- },
- {
- Name: FieldKey,
- Value: []byte("value"),
- },
- {
- Name: FieldInt,
- Value: []byte("42i"),
- },
- {
- Name: Success,
- },
+ {
+ Name: Error,
+ err: errors.New("handler error"),
+ },
+ {
+ Name: Measurement,
+ Value: []byte("cpu"),
+ },
+ {
+ Name: FieldKey,
+ Value: []byte("value"),
+ },
+ {
+ Name: FieldInt,
+ Value: []byte("42i"),
+ },
+ {
+ Name: Success,
},
},
- {
- name: "unsigned",
- input: []byte("cpu value=43u\ncpu value=42u"),
- handler: &MockHandler{
- SetMeasurementF: func(name []byte) error {
- return nil
- },
- AddUintF: func(name, value []byte) error {
- if string(value) != "42u" {
- return errors.New("handler error")
- }
- return nil
- },
+ },
+ {
+ name: "integer with timestamp",
+ input: []byte("cpu value=43i 1516241192000000000\ncpu value=42i"),
+ handler: &MockHandler{
+ SetMeasurementF: func(name []byte) error {
+ return nil
},
- results: []Result{
- {
- Name: Measurement,
- Value: []byte("cpu"),
- },
- {
- Name: Error,
- err: errors.New("handler error"),
- },
- {
- Name: Measurement,
- Value: []byte("cpu"),
- },
- {
- Name: FieldKey,
- Value: []byte("value"),
- },
- {
- Name: FieldUint,
- Value: []byte("42u"),
- },
- {
- Name: Success,
- },
+ AddIntF: func(name, value []byte) error {
+ if string(value) != "42i" {
+ return errors.New("handler error")
+ }
+ return nil
},
},
- }
- for _, tt := range tests {
+ results: []Result{
+ {
+ Name: Measurement,
+ Value: []byte("cpu"),
+ },
+ {
+ Name: Error,
+ err: errors.New("handler error"),
+ },
+ {
+ Name: Measurement,
+ Value: []byte("cpu"),
+ },
+ {
+ Name: FieldKey,
+ Value: []byte("value"),
+ },
+ {
+ Name: FieldInt,
+ Value: []byte("42i"),
+ },
+ {
+ Name: Success,
+ },
+ },
+ },
+ {
+ name: "unsigned",
+ input: []byte("cpu value=43u\ncpu value=42u"),
+ handler: &MockHandler{
+ SetMeasurementF: func(name []byte) error {
+ return nil
+ },
+ AddUintF: func(name, value []byte) error {
+ if string(value) != "42u" {
+ return errors.New("handler error")
+ }
+ return nil
+ },
+ },
+ results: []Result{
+ {
+ Name: Measurement,
+ Value: []byte("cpu"),
+ },
+ {
+ Name: Error,
+ err: errors.New("handler error"),
+ },
+ {
+ Name: Measurement,
+ Value: []byte("cpu"),
+ },
+ {
+ Name: FieldKey,
+ Value: []byte("value"),
+ },
+ {
+ Name: FieldUint,
+ Value: []byte("42u"),
+ },
+ {
+ Name: Success,
+ },
+ },
+ },
+}
+
+func TestHandlerErrorRecovery(t *testing.T) {
+ for _, tt := range errorRecoveryTests {
t.Run(tt.name, func(t *testing.T) {
fsm := influx.NewMachine(tt.handler)
fsm.SetData(tt.input)
@@ -2078,3 +2139,79 @@ func TestHandlerErrorRecovery(t *testing.T) {
})
}
}
+
+func TestStreamMachine(t *testing.T) {
+ type testcase struct {
+ name string
+ input io.Reader
+ results []Result
+ err error
+ }
+
+ var tc []testcase
+ for _, tt := range tests {
+ tc = append(tc, testcase{
+ name: tt.name,
+ input: bytes.NewBuffer([]byte(tt.input)),
+ results: tt.results,
+ err: tt.err,
+ })
+ }
+
+ for _, tt := range tc {
+ t.Run(tt.name, func(t *testing.T) {
+ handler := &TestingHandler{}
+ fsm := influx.NewStreamMachine(tt.input, handler)
+
+ // Parse only up to 20 metrics; to avoid any bugs where the parser
+ // isn't terminated.
+ for i := 0; i < 20; i++ {
+ err := fsm.Next()
+ if err != nil && err == influx.EOF {
+ break
+ }
+ handler.Result(err)
+ }
+
+ results := handler.Results()
+ require.Equal(t, tt.results, results)
+ })
+ }
+}
+
+func TestStreamMachinePosition(t *testing.T) {
+ type testcase struct {
+ name string
+ input io.Reader
+ lineno int
+ column int
+ }
+
+ var tc []testcase
+ for _, tt := range positionTests {
+ tc = append(tc, testcase{
+ name: tt.name,
+ input: bytes.NewBuffer([]byte(tt.input)),
+ lineno: tt.lineno,
+ column: tt.column,
+ })
+ }
+
+ for _, tt := range tc {
+ t.Run(tt.name, func(t *testing.T) {
+ handler := &TestingHandler{}
+ fsm := influx.NewStreamMachine(tt.input, handler)
+
+ // Parse until an error or eof
+ for i := 0; i < 20; i++ {
+ err := fsm.Next()
+ if err != nil {
+ break
+ }
+ }
+
+ require.Equal(t, tt.lineno, fsm.LineNumber(), "lineno")
+ require.Equal(t, tt.column, fsm.Column(), "column")
+ })
+ }
+}
diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go
index f1cd9a0325ecd..620104ac6b93b 100644
--- a/plugins/parsers/influx/parser.go
+++ b/plugins/parsers/influx/parser.go
@@ -3,8 +3,10 @@ package influx
import (
"errors"
"fmt"
+ "io"
"strings"
"sync"
+ "time"
"github.com/influxdata/telegraf"
)
@@ -17,6 +19,9 @@ var (
ErrNoMetric = errors.New("no metric in line")
)
+type TimeFunc func() time.Time
+
+// ParseError indicates a error in the parsing of the text.
type ParseError struct {
Offset int
LineOffset int
@@ -33,11 +38,26 @@ func (e *ParseError) Error() string {
buffer = buffer[:eol]
}
if len(buffer) > maxErrorBufferSize {
- buffer = buffer[:maxErrorBufferSize] + "..."
+ startEllipsis := true
+ offset := e.Offset - e.LineOffset
+ start := offset - maxErrorBufferSize
+ if start < 0 {
+ startEllipsis = false
+ start = 0
+ }
+ // if we trimmed it the column won't line up. it'll always be the last character,
+ // because the parser doesn't continue past it, but point it out anyway so
+ // it's obvious where the issue is.
+ buffer = buffer[start:offset] + "<-- here"
+ if startEllipsis {
+ buffer = "..." + buffer
+ }
}
return fmt.Sprintf("metric parse error: %s at %d:%d: %q", e.msg, e.LineNumber, e.Column, buffer)
}
+// Parser is an InfluxDB Line Protocol parser that implements the
+// parsers.Parser interface.
type Parser struct {
DefaultTags map[string]string
@@ -62,6 +82,10 @@ func NewSeriesParser(handler *MetricHandler) *Parser {
}
}
+func (h *Parser) SetTimeFunc(f TimeFunc) {
+ h.handler.SetTimeFunc(f)
+}
+
func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
p.Lock()
defer p.Unlock()
@@ -75,7 +99,6 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
}
if err != nil {
- p.handler.Reset()
return nil, &ParseError{
Offset: p.machine.Position(),
LineOffset: p.machine.LineOffset(),
@@ -88,7 +111,6 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
metric, err := p.handler.Metric()
if err != nil {
- p.handler.Reset()
return nil, err
}
@@ -126,10 +148,97 @@ func (p *Parser) applyDefaultTags(metrics []telegraf.Metric) {
}
for _, m := range metrics {
- for k, v := range p.DefaultTags {
- if !m.HasTag(k) {
- m.AddTag(k, v)
- }
+ p.applyDefaultTagsSingle(m)
+ }
+}
+
+func (p *Parser) applyDefaultTagsSingle(metric telegraf.Metric) {
+ for k, v := range p.DefaultTags {
+ if !metric.HasTag(k) {
+ metric.AddTag(k, v)
}
}
}
+
+// StreamParser is an InfluxDB Line Protocol parser. It is not safe for
+// concurrent use in multiple goroutines.
+type StreamParser struct {
+ machine *streamMachine
+ handler *MetricHandler
+}
+
+func NewStreamParser(r io.Reader) *StreamParser {
+ handler := NewMetricHandler()
+ return &StreamParser{
+ machine: NewStreamMachine(r, handler),
+ handler: handler,
+ }
+}
+
+// SetTimeFunc changes the function used to determine the time of metrics
+// without a timestamp. The default TimeFunc is time.Now. Useful mostly for
+// testing, or perhaps if you want all metrics to have the same timestamp.
+func (h *StreamParser) SetTimeFunc(f TimeFunc) {
+ h.handler.SetTimeFunc(f)
+}
+
+func (h *StreamParser) SetTimePrecision(u time.Duration) {
+ h.handler.SetTimePrecision(u)
+}
+
+// Next parses the next item from the stream. You can repeat calls to this
+// function if it returns ParseError to get the next metric or error.
+func (p *StreamParser) Next() (telegraf.Metric, error) {
+ err := p.machine.Next()
+ if err == EOF {
+ return nil, err
+ }
+
+ if e, ok := err.(*readErr); ok {
+ return nil, e.Err
+ }
+
+ if err != nil {
+ return nil, &ParseError{
+ Offset: p.machine.Position(),
+ LineOffset: p.machine.LineOffset(),
+ LineNumber: p.machine.LineNumber(),
+ Column: p.machine.Column(),
+ msg: err.Error(),
+ buf: p.machine.LineText(),
+ }
+ }
+
+ metric, err := p.handler.Metric()
+ if err != nil {
+ return nil, err
+ }
+
+ return metric, nil
+}
+
+// Position returns the current byte offset into the data.
+func (p *StreamParser) Position() int {
+ return p.machine.Position()
+}
+
+// LineOffset returns the byte offset of the current line.
+func (p *StreamParser) LineOffset() int {
+ return p.machine.LineOffset()
+}
+
+// LineNumber returns the current line number. Lines are counted based on the
+// regular expression `\r?\n`.
+func (p *StreamParser) LineNumber() int {
+ return p.machine.LineNumber()
+}
+
+// Column returns the current column.
+func (p *StreamParser) Column() int {
+ return p.machine.Column()
+}
+
+// LineText returns the text of the current line that has been parsed so far.
+func (p *StreamParser) LineText() string {
+ return p.machine.LineText()
+}
diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go
index 4d30eeb0b06f2..569eb3a22e7c0 100644
--- a/plugins/parsers/influx/parser_test.go
+++ b/plugins/parsers/influx/parser_test.go
@@ -1,6 +1,9 @@
package influx
import (
+ "bytes"
+ "errors"
+ "io"
"strconv"
"strings"
"testing"
@@ -8,6 +11,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -23,12 +27,11 @@ var DefaultTime = func() time.Time {
}
var ptests = []struct {
- name string
- input []byte
- timeFunc func() time.Time
- precision time.Duration
- metrics []telegraf.Metric
- err error
+ name string
+ input []byte
+ timeFunc func() time.Time
+ metrics []telegraf.Metric
+ err error
}{
{
name: "minimal",
@@ -495,7 +498,7 @@ var ptests = []struct {
err: nil,
},
{
- name: "no timestamp full precision",
+ name: "no timestamp",
input: []byte("cpu value=42"),
timeFunc: func() time.Time {
return time.Unix(42, 123456789)
@@ -514,27 +517,6 @@ var ptests = []struct {
},
err: nil,
},
- {
- name: "no timestamp partial precision",
- input: []byte("cpu value=42"),
- timeFunc: func() time.Time {
- return time.Unix(42, 123456789)
- },
- precision: 1 * time.Millisecond,
- metrics: []telegraf.Metric{
- Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "value": 42.0,
- },
- time.Unix(42, 123000000),
- ),
- ),
- },
- err: nil,
- },
{
name: "multiple lines",
input: []byte("cpu value=42\ncpu value=42"),
@@ -576,7 +558,7 @@ var ptests = []struct {
},
{
name: "procstat",
- input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"),
+ input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"),
metrics: []telegraf.Metric{
Metric(
metric.New(
@@ -595,7 +577,6 @@ var ptests = []struct {
"cpu_time_nice": float64(0),
"cpu_time_soft_irq": float64(0),
"cpu_time_steal": float64(0),
- "cpu_time_stolen": float64(0),
"cpu_time_system": float64(0),
"cpu_time_user": float64(0.02),
"cpu_usage": float64(0),
@@ -652,14 +633,11 @@ func TestParser(t *testing.T) {
for _, tt := range ptests {
t.Run(tt.name, func(t *testing.T) {
handler := NewMetricHandler()
- handler.SetTimeFunc(DefaultTime)
+ parser := NewParser(handler)
+ parser.SetTimeFunc(DefaultTime)
if tt.timeFunc != nil {
- handler.SetTimeFunc(tt.timeFunc)
- }
- if tt.precision > 0 {
- handler.SetTimePrecision(tt.precision)
+ parser.SetTimeFunc(tt.timeFunc)
}
- parser := NewParser(handler)
metrics, err := parser.Parse(tt.input)
require.Equal(t, tt.err, err)
@@ -689,14 +667,41 @@ func BenchmarkParser(b *testing.B) {
}
}
+func TestStreamParser(t *testing.T) {
+ for _, tt := range ptests {
+ t.Run(tt.name, func(t *testing.T) {
+ r := bytes.NewBuffer(tt.input)
+ parser := NewStreamParser(r)
+ parser.SetTimeFunc(DefaultTime)
+ if tt.timeFunc != nil {
+ parser.SetTimeFunc(tt.timeFunc)
+ }
+
+ var i int
+ for {
+ m, err := parser.Next()
+ if err != nil {
+ if err == EOF {
+ break
+ }
+ require.Equal(t, tt.err, err)
+ break
+ }
+
+ testutil.RequireMetricEqual(t, tt.metrics[i], m)
+ i++
+ }
+ })
+ }
+}
+
func TestSeriesParser(t *testing.T) {
var tests = []struct {
- name string
- input []byte
- timeFunc func() time.Time
- precision time.Duration
- metrics []telegraf.Metric
- err error
+ name string
+ input []byte
+ timeFunc func() time.Time
+ metrics []telegraf.Metric
+ err error
}{
{
name: "empty",
@@ -750,14 +755,10 @@ func TestSeriesParser(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := NewMetricHandler()
- handler.SetTimeFunc(DefaultTime)
+ parser := NewSeriesParser(handler)
if tt.timeFunc != nil {
- handler.SetTimeFunc(tt.timeFunc)
- }
- if tt.precision > 0 {
- handler.SetTimePrecision(tt.precision)
+ parser.SetTimeFunc(tt.timeFunc)
}
- parser := NewSeriesParser(handler)
metrics, err := parser.Parse(tt.input)
require.Equal(t, tt.err, err)
@@ -790,7 +791,12 @@ func TestParserErrorString(t *testing.T) {
{
name: "buffer too long",
input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"),
- errString: "metric parse error: expected field at 1:2054: \"cpu " + strings.Repeat("ab", maxErrorBufferSize)[:maxErrorBufferSize-4] + "...\"",
+ errString: "metric parse error: expected field at 1:2054: \"...b" + strings.Repeat("ab", maxErrorBufferSize/2-1) + "=<-- here\"",
+ },
+ {
+ name: "multiple line error",
+ input: []byte("cpu value=42\ncpu value=invalid\ncpu value=42\ncpu value=invalid"),
+ errString: `metric parse error: expected field at 2:11: "cpu value=invalid"`,
},
}
@@ -804,3 +810,105 @@ func TestParserErrorString(t *testing.T) {
})
}
}
+
+func TestStreamParserErrorString(t *testing.T) {
+ var ptests = []struct {
+ name string
+ input []byte
+ errs []string
+ }{
+ {
+ name: "multiple line error",
+ input: []byte("cpu value=42\ncpu value=invalid\ncpu value=42"),
+ errs: []string{
+ `metric parse error: expected field at 2:11: "cpu value="`,
+ },
+ },
+ {
+ name: "handler error",
+ input: []byte("cpu value=9223372036854775808i\ncpu value=42"),
+ errs: []string{
+ `metric parse error: value out of range at 1:31: "cpu value=9223372036854775808i"`,
+ },
+ },
+ {
+ name: "buffer too long",
+ input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"),
+ errs: []string{
+ "metric parse error: expected field at 1:2054: \"...b" + strings.Repeat("ab", maxErrorBufferSize/2-1) + "=<-- here\"",
+ },
+ },
+ {
+ name: "multiple errors",
+ input: []byte("foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4.0"),
+ errs: []string{
+ `metric parse error: expected field at 1:12: "foo value=1"`,
+ `metric parse error: expected field at 3:12: "foo value=3"`,
+ },
+ },
+ }
+
+ for _, tt := range ptests {
+ t.Run(tt.name, func(t *testing.T) {
+ parser := NewStreamParser(bytes.NewBuffer(tt.input))
+
+ var errs []error
+ for i := 0; i < 20; i++ {
+ _, err := parser.Next()
+ if err == EOF {
+ break
+ }
+
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ require.Equal(t, len(tt.errs), len(errs))
+ for i, err := range errs {
+ require.Equal(t, tt.errs[i], err.Error())
+ }
+ })
+ }
+}
+
+type MockReader struct {
+ ReadF func(p []byte) (int, error)
+}
+
+func (r *MockReader) Read(p []byte) (int, error) {
+ return r.ReadF(p)
+}
+
+// Errors from the Reader are returned from the Parser
+func TestStreamParserReaderError(t *testing.T) {
+ readerErr := errors.New("error but not eof")
+
+ parser := NewStreamParser(&MockReader{
+ ReadF: func(p []byte) (int, error) {
+ return 0, readerErr
+ },
+ })
+ _, err := parser.Next()
+ require.Error(t, err)
+ require.Equal(t, err, readerErr)
+
+ _, err = parser.Next()
+ require.Equal(t, err, EOF)
+}
+
+func TestStreamParserProducesAllAvailableMetrics(t *testing.T) {
+ r, w := io.Pipe()
+
+ parser := NewStreamParser(r)
+ parser.SetTimeFunc(DefaultTime)
+
+ go w.Write([]byte("metric value=1\nmetric2 value=1\n"))
+
+ _, err := parser.Next()
+ require.NoError(t, err)
+
+ // should not block on second read
+ _, err = parser.Next()
+ require.NoError(t, err)
+}
diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md
index 60e1f3f9e61d2..d39a9d6bf77d9 100644
--- a/plugins/parsers/json/README.md
+++ b/plugins/parsers/json/README.md
@@ -18,20 +18,25 @@ ignored unless specified in the `tag_key` or `json_string_fields` options.
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
+ ## When strict is true and a JSON array is being parsed, all objects within the
+ ## array must be valid
+ json_strict = true
+
## Query is a GJSON path that specifies a specific chunk of JSON to be
## parsed, if not specified the whole document will be parsed.
##
## GJSON query paths are described here:
- ## https://github.com/tidwall/gjson#path-syntax
+ ## https://github.com/tidwall/gjson/tree/v1.3.0#path-syntax
json_query = ""
- ## Tag keys is an array of keys that should be added as tags.
+ ## Tag keys is an array of keys that should be added as tags. Matching keys
+ ## are no longer saved as fields.
tag_keys = [
"my_tag_1",
"my_tag_2"
]
- ## String fields is an array of keys that should be added as string fields.
+ ## Array of glob pattern strings keys that should be added as string fields.
json_string_fields = []
## Name key is the key to use as the measurement name.
@@ -41,7 +46,7 @@ ignored unless specified in the `tag_key` or `json_string_fields` options.
## metric.
json_time_key = ""
- ## Time format is the time layout that should be used to interprete the json_time_key.
+ ## Time format is the time layout that should be used to interpret the json_time_key.
## The time must be `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in the
## "reference time". To define a different format, arrange the values from
## the "reference time" in the example to match the format you will be
@@ -68,11 +73,15 @@ ignored unless specified in the `tag_key` or `json_string_fields` options.
#### json_query
-The `json_query` is a [GJSON][gjson] path that can be used to limit the
-portion of the overall JSON document that should be parsed. The result of the
-query should contain a JSON object or an array of objects.
+The `json_query` is a [GJSON][gjson] path that can be used to transform the
+JSON document before being parsed. The query is performed before any other
+options are applied and the new document produced will be parsed instead of the
+original document, as such, the result of the query should be a JSON object or
+an array of objects.
-Consult the GJSON [path syntax][gjson syntax] for details and examples.
+Consult the GJSON [path syntax][gjson syntax] for details and examples, and
+consider using the [GJSON playground][gjson playground] for developing and
+debugging your query.
#### json_time_key, json_time_format, json_timezone
@@ -232,5 +241,6 @@ file,first=Jane last="Murphy",age=47
[gjson]: https://github.com/tidwall/gjson
[gjson syntax]: https://github.com/tidwall/gjson#path-syntax
+[gjson playground]: https://gjson.dev/
[json]: https://www.json.org/
[time parse]: https://golang.org/pkg/time/#Parse
diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go
index ebe31fd23972f..bd9dee869170f 100644
--- a/plugins/parsers/json/parser.go
+++ b/plugins/parsers/json/parser.go
@@ -3,113 +3,154 @@ package json
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"log"
"strconv"
- "strings"
"time"
- "github.com/tidwall/gjson"
-
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/metric"
+ "github.com/tidwall/gjson"
)
var (
- utf8BOM = []byte("\xef\xbb\xbf")
+ utf8BOM = []byte("\xef\xbb\xbf")
+ ErrWrongType = errors.New("must be an object or an array of objects")
)
-type JSONParser struct {
- MetricName string
- TagKeys []string
- StringFields []string
- JSONNameKey string
- JSONQuery string
- JSONTimeKey string
- JSONTimeFormat string
- JSONTimezone string
- DefaultTags map[string]string
+type Config struct {
+ MetricName string
+ TagKeys []string
+ NameKey string
+ StringFields []string
+ Query string
+ TimeKey string
+ TimeFormat string
+ Timezone string
+ DefaultTags map[string]string
+ Strict bool
}
-func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) {
- metrics := make([]telegraf.Metric, 0)
+type Parser struct {
+ metricName string
+ tagKeys []string
+ stringFields filter.Filter
+ nameKey string
+ query string
+ timeKey string
+ timeFormat string
+ timezone string
+ defaultTags map[string]string
+ strict bool
+}
- var jsonOut []map[string]interface{}
- err := json.Unmarshal(buf, &jsonOut)
+func New(config *Config) (*Parser, error) {
+ stringFilter, err := filter.Compile(config.StringFields)
if err != nil {
- err = fmt.Errorf("unable to parse out as JSON Array, %s", err)
return nil, err
}
- for _, item := range jsonOut {
- metrics, err = p.parseObject(metrics, item)
- if err != nil {
- return nil, err
+
+ return &Parser{
+ metricName: config.MetricName,
+ tagKeys: config.TagKeys,
+ nameKey: config.NameKey,
+ stringFields: stringFilter,
+ query: config.Query,
+ timeKey: config.TimeKey,
+ timeFormat: config.TimeFormat,
+ timezone: config.Timezone,
+ defaultTags: config.DefaultTags,
+ strict: config.Strict,
+ }, nil
+}
+
+func (p *Parser) parseArray(data []interface{}, timestamp time.Time) ([]telegraf.Metric, error) {
+ results := make([]telegraf.Metric, 0)
+
+ for _, item := range data {
+ switch v := item.(type) {
+ case map[string]interface{}:
+ metrics, err := p.parseObject(v, timestamp)
+ if err != nil {
+ if p.strict {
+ return nil, err
+ }
+ continue
+ }
+ results = append(results, metrics...)
+ default:
+ return nil, ErrWrongType
+
}
}
- return metrics, nil
+
+ return results, nil
}
-func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) {
+func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ([]telegraf.Metric, error) {
tags := make(map[string]string)
- for k, v := range p.DefaultTags {
+ for k, v := range p.defaultTags {
tags[k] = v
}
f := JSONFlattener{}
- err := f.FullFlattenJSON("", jsonOut, true, true)
+ err := f.FullFlattenJSON("", data, true, true)
if err != nil {
return nil, err
}
+ name := p.metricName
+
//checks if json_name_key is set
- if p.JSONNameKey != "" {
- switch field := f.Fields[p.JSONNameKey].(type) {
+ if p.nameKey != "" {
+ switch field := f.Fields[p.nameKey].(type) {
case string:
- p.MetricName = field
+ name = field
}
}
- //if time key is specified, set it to nTime
- nTime := time.Now().UTC()
- if p.JSONTimeKey != "" {
- if p.JSONTimeFormat == "" {
+ //if time key is specified, set timestamp to it
+ if p.timeKey != "" {
+ if p.timeFormat == "" {
err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'")
return nil, err
}
- if f.Fields[p.JSONTimeKey] == nil {
+ if f.Fields[p.timeKey] == nil {
err := fmt.Errorf("JSON time key could not be found")
return nil, err
}
- nTime, err = internal.ParseTimestampWithLocation(f.Fields[p.JSONTimeKey], p.JSONTimeFormat, p.JSONTimezone)
+ timestamp, err = internal.ParseTimestamp(p.timeFormat, f.Fields[p.timeKey], p.timezone)
if err != nil {
return nil, err
}
- delete(f.Fields, p.JSONTimeKey)
+ delete(f.Fields, p.timeKey)
//if the year is 0, set to current year
- if nTime.Year() == 0 {
- nTime = nTime.AddDate(time.Now().Year(), 0, 0)
+ if timestamp.Year() == 0 {
+ timestamp = timestamp.AddDate(time.Now().Year(), 0, 0)
}
}
tags, nFields := p.switchFieldToTag(tags, f.Fields)
- metric, err := metric.New(p.MetricName, tags, nFields, nTime)
+ metric, err := metric.New(name, tags, nFields, timestamp)
if err != nil {
return nil, err
}
- return append(metrics, metric), nil
+ return []telegraf.Metric{metric}, nil
}
//will take in field map with strings and bools,
//search for TagKeys that match fieldnames and add them to tags
//will delete any strings/bools that shouldn't be fields
//assumes that any non-numeric values in TagKeys should be displayed as tags
-func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) {
- for _, name := range p.TagKeys {
+func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) {
+ for _, name := range p.tagKeys {
//switch any fields in tagkeys into tags
if fields[name] == nil {
continue
@@ -130,31 +171,21 @@ func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string]
}
//remove any additional string/bool values from fields
- for k := range fields {
- //check if field is in StringFields
- sField := false
- for _, v := range p.StringFields {
- if v == k {
- sField = true
+ for fk := range fields {
+ switch fields[fk].(type) {
+ case string, bool:
+ if p.stringFields != nil && p.stringFields.Match(fk) {
+ continue
}
- }
- if sField {
- continue
- }
-
- switch fields[k].(type) {
- case string:
- delete(fields, k)
- case bool:
- delete(fields, k)
+ delete(fields, fk)
}
}
return tags, fields
}
-func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
- if p.JSONQuery != "" {
- result := gjson.GetBytes(buf, p.JSONQuery)
+func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
+ if p.query != "" {
+ result := gjson.GetBytes(buf, p.query)
buf = []byte(result.Raw)
if !result.IsArray() && !result.IsObject() {
err := fmt.Errorf("E! Query path must lead to a JSON object or array of objects, but lead to: %v", result.Type)
@@ -168,20 +199,24 @@ func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
return make([]telegraf.Metric, 0), nil
}
- if !isarray(buf) {
- metrics := make([]telegraf.Metric, 0)
- var jsonOut map[string]interface{}
- err := json.Unmarshal(buf, &jsonOut)
- if err != nil {
- err = fmt.Errorf("unable to parse out as JSON, %s", err)
- return nil, err
- }
- return p.parseObject(metrics, jsonOut)
+ var data interface{}
+ err := json.Unmarshal(buf, &data)
+ if err != nil {
+ return nil, err
+ }
+
+ timestamp := time.Now().UTC()
+ switch v := data.(type) {
+ case map[string]interface{}:
+ return p.parseObject(v, timestamp)
+ case []interface{}:
+ return p.parseArray(v, timestamp)
+ default:
+ return nil, ErrWrongType
}
- return p.parseArray(buf)
}
-func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) {
+func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line + "\n"))
if err != nil {
@@ -195,8 +230,8 @@ func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) {
return metrics[0], nil
}
-func (p *JSONParser) SetDefaultTags(tags map[string]string) {
- p.DefaultTags = tags
+func (p *Parser) SetDefaultTags(tags map[string]string) {
+ p.defaultTags = tags
}
type JSONFlattener struct {
@@ -224,19 +259,27 @@ func (f *JSONFlattener) FullFlattenJSON(
if f.Fields == nil {
f.Fields = make(map[string]interface{})
}
- fieldname = strings.Trim(fieldname, "_")
+
switch t := v.(type) {
case map[string]interface{}:
for k, v := range t {
- err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool)
+ fieldkey := k
+ if fieldname != "" {
+ fieldkey = fieldname + "_" + fieldkey
+ }
+
+ err := f.FullFlattenJSON(fieldkey, v, convertString, convertBool)
if err != nil {
return err
}
}
case []interface{}:
for i, v := range t {
- k := strconv.Itoa(i)
- err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool)
+ fieldkey := strconv.Itoa(i)
+ if fieldname != "" {
+ fieldkey = fieldname + "_" + fieldkey
+ }
+ err := f.FullFlattenJSON(fieldkey, v, convertString, convertBool)
if err != nil {
return nil
}
@@ -263,13 +306,3 @@ func (f *JSONFlattener) FullFlattenJSON(
}
return nil
}
-
-func isarray(buf []byte) bool {
- ia := bytes.IndexByte(buf, '[')
- ib := bytes.IndexByte(buf, '{')
- if ia > -1 && ia < ib {
- return true
- } else {
- return false
- }
-}
diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go
index 2db9ad78f196f..31c507e7517f7 100644
--- a/plugins/parsers/json/parser_test.go
+++ b/plugins/parsers/json/parser_test.go
@@ -2,7 +2,6 @@ package json
import (
"fmt"
- "log"
"testing"
"time"
@@ -18,6 +17,7 @@ const (
validJSONArrayMultiple = "[{\"a\": 5, \"b\": {\"c\": 6}}, {\"a\": 7, \"b\": {\"c\": 8}}]"
invalidJSON = "I don't think this is JSON"
invalidJSON2 = "{\"a\": 5, \"b\": \"c\": 6}}"
+ mixedValidityJSON = "[{\"a\": 5, \"time\": \"2006-01-02T15:04:05\"}, {\"a\": 2}]"
)
const validJSONTags = `
@@ -53,9 +53,10 @@ const validJSONArrayTags = `
`
func TestParseValidJSON(t *testing.T) {
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
- }
+ })
+ require.NoError(t, err)
// Most basic vanilla test
metrics, err := parser.Parse([]byte(validJSON))
@@ -102,9 +103,10 @@ func TestParseValidJSON(t *testing.T) {
}
func TestParseLineValidJSON(t *testing.T) {
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
- }
+ })
+ require.NoError(t, err)
// Most basic vanilla test
metric, err := parser.ParseLine(validJSON)
@@ -138,11 +140,12 @@ func TestParseLineValidJSON(t *testing.T) {
}
func TestParseInvalidJSON(t *testing.T) {
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
- }
+ })
+ require.NoError(t, err)
- _, err := parser.Parse([]byte(invalidJSON))
+ _, err = parser.Parse([]byte(invalidJSON))
require.Error(t, err)
_, err = parser.Parse([]byte(invalidJSON2))
require.Error(t, err)
@@ -150,12 +153,49 @@ func TestParseInvalidJSON(t *testing.T) {
require.Error(t, err)
}
+func TestParseJSONImplicitStrictness(t *testing.T) {
+ parserImplicitNoStrict, err := New(&Config{
+ MetricName: "json_test",
+ TimeKey: "time",
+ })
+ require.NoError(t, err)
+
+ _, err = parserImplicitNoStrict.Parse([]byte(mixedValidityJSON))
+ require.NoError(t, err)
+}
+
+func TestParseJSONExplicitStrictnessFalse(t *testing.T) {
+ parserNoStrict, err := New(&Config{
+ MetricName: "json_test",
+ TimeKey: "time",
+ Strict: false,
+ })
+ require.NoError(t, err)
+
+ _, err = parserNoStrict.Parse([]byte(mixedValidityJSON))
+ require.NoError(t, err)
+}
+
+func TestParseJSONExplicitStrictnessTrue(t *testing.T) {
+ parserStrict, err := New(&Config{
+ MetricName: "json_test",
+ TimeKey: "time",
+ Strict: true,
+ })
+ require.NoError(t, err)
+
+ _, err = parserStrict.Parse([]byte(mixedValidityJSON))
+ require.Error(t, err)
+}
+
func TestParseWithTagKeys(t *testing.T) {
// Test that strings not matching tag keys are ignored
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
TagKeys: []string{"wrongtagkey"},
- }
+ })
+ require.NoError(t, err)
+
metrics, err := parser.Parse([]byte(validJSONTags))
require.NoError(t, err)
require.Len(t, metrics, 1)
@@ -167,10 +207,12 @@ func TestParseWithTagKeys(t *testing.T) {
require.Equal(t, map[string]string{}, metrics[0].Tags())
// Test that single tag key is found and applied
- parser = JSONParser{
+ parser, err = New(&Config{
MetricName: "json_test",
TagKeys: []string{"mytag"},
- }
+ })
+ require.NoError(t, err)
+
metrics, err = parser.Parse([]byte(validJSONTags))
require.NoError(t, err)
require.Len(t, metrics, 1)
@@ -184,10 +226,11 @@ func TestParseWithTagKeys(t *testing.T) {
}, metrics[0].Tags())
// Test that both tag keys are found and applied
- parser = JSONParser{
+ parser, err = New(&Config{
MetricName: "json_test",
TagKeys: []string{"mytag", "othertag"},
- }
+ })
+ require.NoError(t, err)
metrics, err = parser.Parse([]byte(validJSONTags))
require.NoError(t, err)
require.Len(t, metrics, 1)
@@ -204,10 +247,11 @@ func TestParseWithTagKeys(t *testing.T) {
func TestParseLineWithTagKeys(t *testing.T) {
// Test that strings not matching tag keys are ignored
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
TagKeys: []string{"wrongtagkey"},
- }
+ })
+ require.NoError(t, err)
metric, err := parser.ParseLine(validJSONTags)
require.NoError(t, err)
require.Equal(t, "json_test", metric.Name())
@@ -218,10 +262,12 @@ func TestParseLineWithTagKeys(t *testing.T) {
require.Equal(t, map[string]string{}, metric.Tags())
// Test that single tag key is found and applied
- parser = JSONParser{
+ parser, err = New(&Config{
MetricName: "json_test",
TagKeys: []string{"mytag"},
- }
+ })
+ require.NoError(t, err)
+
metric, err = parser.ParseLine(validJSONTags)
require.NoError(t, err)
require.Equal(t, "json_test", metric.Name())
@@ -234,10 +280,12 @@ func TestParseLineWithTagKeys(t *testing.T) {
}, metric.Tags())
// Test that both tag keys are found and applied
- parser = JSONParser{
+ parser, err = New(&Config{
MetricName: "json_test",
TagKeys: []string{"mytag", "othertag"},
- }
+ })
+ require.NoError(t, err)
+
metric, err = parser.ParseLine(validJSONTags)
require.NoError(t, err)
require.Equal(t, "json_test", metric.Name())
@@ -252,13 +300,14 @@ func TestParseLineWithTagKeys(t *testing.T) {
}
func TestParseValidJSONDefaultTags(t *testing.T) {
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
TagKeys: []string{"mytag"},
DefaultTags: map[string]string{
"t4g": "default",
},
- }
+ })
+ require.NoError(t, err)
// Most basic vanilla test
metrics, err := parser.Parse([]byte(validJSON))
@@ -288,13 +337,14 @@ func TestParseValidJSONDefaultTags(t *testing.T) {
// Test that default tags are overridden by tag keys
func TestParseValidJSONDefaultTagsOverride(t *testing.T) {
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
TagKeys: []string{"mytag"},
DefaultTags: map[string]string{
"mytag": "default",
},
- }
+ })
+ require.NoError(t, err)
// Most basic vanilla test
metrics, err := parser.Parse([]byte(validJSON))
@@ -323,9 +373,10 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) {
// Test that json arrays can be parsed
func TestParseValidJSONArray(t *testing.T) {
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_array_test",
- }
+ })
+ require.NoError(t, err)
// Most basic vanilla test
metrics, err := parser.Parse([]byte(validJSONArray))
@@ -358,10 +409,12 @@ func TestParseValidJSONArray(t *testing.T) {
func TestParseArrayWithTagKeys(t *testing.T) {
// Test that strings not matching tag keys are ignored
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_array_test",
TagKeys: []string{"wrongtagkey"},
- }
+ })
+ require.NoError(t, err)
+
metrics, err := parser.Parse([]byte(validJSONArrayTags))
require.NoError(t, err)
require.Len(t, metrics, 2)
@@ -380,10 +433,12 @@ func TestParseArrayWithTagKeys(t *testing.T) {
require.Equal(t, map[string]string{}, metrics[1].Tags())
// Test that single tag key is found and applied
- parser = JSONParser{
+ parser, err = New(&Config{
MetricName: "json_array_test",
TagKeys: []string{"mytag"},
- }
+ })
+ require.NoError(t, err)
+
metrics, err = parser.Parse([]byte(validJSONArrayTags))
require.NoError(t, err)
require.Len(t, metrics, 2)
@@ -406,10 +461,12 @@ func TestParseArrayWithTagKeys(t *testing.T) {
}, metrics[1].Tags())
// Test that both tag keys are found and applied
- parser = JSONParser{
+ parser, err = New(&Config{
MetricName: "json_array_test",
TagKeys: []string{"mytag", "othertag"},
- }
+ })
+ require.NoError(t, err)
+
metrics, err = parser.Parse([]byte(validJSONArrayTags))
require.NoError(t, err)
require.Len(t, metrics, 2)
@@ -437,12 +494,13 @@ func TestParseArrayWithTagKeys(t *testing.T) {
var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]")
func TestHttpJsonBOM(t *testing.T) {
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
- }
+ })
+ require.NoError(t, err)
// Most basic vanilla test
- _, err := parser.Parse(jsonBOM)
+ _, err = parser.Parse(jsonBOM)
require.NoError(t, err)
}
@@ -466,15 +524,16 @@ func TestJSONParseNestedArray(t *testing.T) {
}
}`
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
TagKeys: []string{"total_devices", "total_threads", "shares_tester3_fun"},
- }
+ })
+ require.NoError(t, err)
metrics, err := parser.Parse([]byte(testString))
- log.Printf("m[0] name: %v, tags: %v, fields: %v", metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields())
+ require.Len(t, metrics, 1)
require.NoError(t, err)
- require.Equal(t, len(parser.TagKeys), len(metrics[0].Tags()))
+ require.Equal(t, 3, len(metrics[0].Tags()))
}
func TestJSONQueryErrorOnArray(t *testing.T) {
@@ -494,13 +553,14 @@ func TestJSONQueryErrorOnArray(t *testing.T) {
}
}`
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
TagKeys: []string{},
- JSONQuery: "shares.myArr",
- }
+ Query: "shares.myArr",
+ })
+ require.NoError(t, err)
- _, err := parser.Parse([]byte(testString))
+ _, err = parser.Parse([]byte(testString))
require.Error(t, err)
}
@@ -527,11 +587,12 @@ func TestArrayOfObjects(t *testing.T) {
"more_stuff":"junk"
}`
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
TagKeys: []string{"ice"},
- JSONQuery: "meta.shares",
- }
+ Query: "meta.shares",
+ })
+ require.NoError(t, err)
metrics, err := parser.Parse([]byte(testString))
require.NoError(t, err)
@@ -553,12 +614,13 @@ func TestUseCaseJSONQuery(t *testing.T) {
}
}`
- parser := JSONParser{
+ parser, err := New(&Config{
MetricName: "json_test",
StringFields: []string{"last"},
TagKeys: []string{"first"},
- JSONQuery: "obj.friends",
- }
+ Query: "obj.friends",
+ })
+ require.NoError(t, err)
metrics, err := parser.Parse([]byte(testString))
require.NoError(t, err)
@@ -588,11 +650,12 @@ func TestTimeParser(t *testing.T) {
}
]`
- parser := JSONParser{
- MetricName: "json_test",
- JSONTimeKey: "b_time",
- JSONTimeFormat: "02 Jan 06 15:04 MST",
- }
+ parser, err := New(&Config{
+ MetricName: "json_test",
+ TimeKey: "b_time",
+ TimeFormat: "02 Jan 06 15:04 MST",
+ })
+ require.NoError(t, err)
metrics, err := parser.Parse([]byte(testString))
require.NoError(t, err)
require.Equal(t, 2, len(metrics))
@@ -604,12 +667,13 @@ func TestTimeParserWithTimezone(t *testing.T) {
"time": "04 Jan 06 15:04"
}`
- parser := JSONParser{
- MetricName: "json_test",
- JSONTimeKey: "time",
- JSONTimeFormat: "02 Jan 06 15:04",
- JSONTimezone: "America/New_York",
- }
+ parser, err := New(&Config{
+ MetricName: "json_test",
+ TimeKey: "time",
+ TimeFormat: "02 Jan 06 15:04",
+ Timezone: "America/New_York",
+ })
+ require.NoError(t, err)
metrics, err := parser.Parse([]byte(testString))
require.NoError(t, err)
require.Equal(t, 1, len(metrics))
@@ -638,11 +702,13 @@ func TestUnixTimeParser(t *testing.T) {
}
]`
- parser := JSONParser{
- MetricName: "json_test",
- JSONTimeKey: "b_time",
- JSONTimeFormat: "unix",
- }
+ parser, err := New(&Config{
+ MetricName: "json_test",
+ TimeKey: "b_time",
+ TimeFormat: "unix",
+ })
+ require.NoError(t, err)
+
metrics, err := parser.Parse([]byte(testString))
require.NoError(t, err)
require.Equal(t, 2, len(metrics))
@@ -671,11 +737,13 @@ func TestUnixMsTimeParser(t *testing.T) {
}
]`
- parser := JSONParser{
- MetricName: "json_test",
- JSONTimeKey: "b_time",
- JSONTimeFormat: "unix_ms",
- }
+ parser, err := New(&Config{
+ MetricName: "json_test",
+ TimeKey: "b_time",
+ TimeFormat: "unix_ms",
+ })
+ require.NoError(t, err)
+
metrics, err := parser.Parse([]byte(testString))
require.NoError(t, err)
require.Equal(t, 2, len(metrics))
@@ -693,11 +761,12 @@ func TestTimeErrors(t *testing.T) {
"my_tag_2": "baz"
}`
- parser := JSONParser{
- MetricName: "json_test",
- JSONTimeKey: "b_time",
- JSONTimeFormat: "02 January 06 15:04 MST",
- }
+ parser, err := New(&Config{
+ MetricName: "json_test",
+ TimeKey: "b_time",
+ TimeFormat: "02 January 06 15:04 MST",
+ })
+ require.NoError(t, err)
metrics, err := parser.Parse([]byte(testString))
require.Error(t, err)
@@ -712,19 +781,31 @@ func TestTimeErrors(t *testing.T) {
"my_tag_2": "baz"
}`
- parser = JSONParser{
- MetricName: "json_test",
- JSONTimeKey: "b_time",
- JSONTimeFormat: "02 January 06 15:04 MST",
- }
+ parser, err = New(&Config{
+ MetricName: "json_test",
+ TimeKey: "b_time",
+ TimeFormat: "02 January 06 15:04 MST",
+ })
+ require.NoError(t, err)
metrics, err = parser.Parse([]byte(testString2))
- log.Printf("err: %v", err)
require.Error(t, err)
require.Equal(t, 0, len(metrics))
require.Equal(t, fmt.Errorf("JSON time key could not be found"), err)
}
+func TestShareTimestamp(t *testing.T) {
+ parser, err := New(&Config{
+ MetricName: "json_test",
+ })
+ require.NoError(t, err)
+
+ metrics, err := parser.Parse([]byte(validJSONArrayMultiple))
+ require.NoError(t, err)
+ require.Equal(t, 2, len(metrics))
+ require.Equal(t, true, metrics[0].Time() == metrics[1].Time())
+}
+
func TestNameKey(t *testing.T) {
testString := `{
"a": 5,
@@ -736,35 +817,134 @@ func TestNameKey(t *testing.T) {
"my_tag_2": "baz"
}`
- parser := JSONParser{
- JSONNameKey: "b_c",
- }
+ parser, err := New(&Config{
+ NameKey: "b_c",
+ })
+ require.NoError(t, err)
metrics, err := parser.Parse([]byte(testString))
require.NoError(t, err)
require.Equal(t, "this is my name", metrics[0].Name())
}
-func TestTimeKeyDelete(t *testing.T) {
- data := `{
- "timestamp": 1541183052,
- "value": 42
- }`
-
- parser := JSONParser{
- MetricName: "json",
- JSONTimeKey: "timestamp",
- JSONTimeFormat: "unix",
- }
+func TestParseArrayWithWrongType(t *testing.T) {
+ data := `[{"answer": 42}, 123]`
- metrics, err := parser.Parse([]byte(data))
+ parser, err := New(&Config{})
require.NoError(t, err)
- expected := []telegraf.Metric{
- testutil.MustMetric("json",
- map[string]string{},
- map[string]interface{}{"value": 42.0},
- time.Unix(1541183052, 0)),
+
+ _, err = parser.Parse([]byte(data))
+ require.Error(t, err)
+}
+
+func TestParse(t *testing.T) {
+ tests := []struct {
+ name string
+ config *Config
+ input []byte
+ expected []telegraf.Metric
+ }{
+ {
+ name: "tag keys with underscore issue 6705",
+ config: &Config{
+ MetricName: "json",
+ TagKeys: []string{"metric___name__"},
+ },
+ input: []byte(`{"metric": {"__name__": "howdy", "time_idle": 42}}`),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "json",
+ map[string]string{
+ "metric___name__": "howdy",
+ },
+ map[string]interface{}{
+ "metric_time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "parse empty array",
+ config: &Config{},
+ input: []byte(`[]`),
+ expected: []telegraf.Metric{},
+ },
+ {
+ name: "parse simple array",
+ config: &Config{
+ MetricName: "json",
+ },
+ input: []byte(`[{"answer": 42}]`),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "json",
+ map[string]string{},
+ map[string]interface{}{
+ "answer": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "string field glob",
+ config: &Config{
+ MetricName: "json",
+ StringFields: []string{"*"},
+ },
+ input: []byte(`
+{
+ "color": "red",
+ "status": "error"
+}
+`),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "json",
+ map[string]string{},
+ map[string]interface{}{
+ "color": "red",
+ "status": "error",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "time key is deleted from fields",
+ config: &Config{
+ MetricName: "json",
+ TimeKey: "timestamp",
+ TimeFormat: "unix",
+ },
+ input: []byte(`
+{
+ "value": 42,
+ "timestamp": 1541183052
+}
+`),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "json",
+ map[string]string{},
+ map[string]interface{}{
+ "value": 42.0,
+ },
+ time.Unix(1541183052, 0),
+ ),
+ },
+ },
}
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ parser, err := New(tt.config)
+ require.NoError(t, err)
+
+ actual, err := parser.Parse(tt.input)
+ require.NoError(t, err)
- testutil.RequireMetricsEqual(t, expected, metrics)
+ testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime())
+ })
+ }
}
diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go
index e6e15469fc55d..729ed048c0720 100644
--- a/plugins/parsers/registry.go
+++ b/plugins/parsers/registry.go
@@ -2,12 +2,12 @@ package parsers
import (
"fmt"
- "time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/parsers/collectd"
"github.com/influxdata/telegraf/plugins/parsers/csv"
"github.com/influxdata/telegraf/plugins/parsers/dropwizard"
+ "github.com/influxdata/telegraf/plugins/parsers/form_urlencoded"
"github.com/influxdata/telegraf/plugins/parsers/graphite"
"github.com/influxdata/telegraf/plugins/parsers/grok"
"github.com/influxdata/telegraf/plugins/parsers/influx"
@@ -69,7 +69,7 @@ type Config struct {
// TagKeys only apply to JSON data
TagKeys []string `toml:"tag_keys"`
- // FieldKeys only apply to JSON
+ // Array of glob pattern strings keys that should be added as string fields.
JSONStringFields []string `toml:"json_string_fields"`
JSONNameKey string `toml:"json_name_key"`
@@ -88,6 +88,9 @@ type Config struct {
// default timezone
JSONTimezone string `toml:"json_timezone"`
+ // Whether to continue if a JSON object can't be coerced
+ JSONStrict bool `toml:"json_strict"`
+
// Authentication file for collectd
CollectdAuthFile string `toml:"collectd_auth_file"`
// One of none (default), sign, or encrypt
@@ -140,7 +143,11 @@ type Config struct {
CSVTagColumns []string `toml:"csv_tag_columns"`
CSVTimestampColumn string `toml:"csv_timestamp_column"`
CSVTimestampFormat string `toml:"csv_timestamp_format"`
+ CSVTimezone string `toml:"csv_timezone"`
CSVTrimSpace bool `toml:"csv_trim_space"`
+
+ // FormData configuration
+ FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"`
}
// NewParser returns a Parser interface based on the given config.
@@ -149,15 +156,20 @@ func NewParser(config *Config) (Parser, error) {
var parser Parser
switch config.DataFormat {
case "json":
- parser = newJSONParser(config.MetricName,
- config.TagKeys,
- config.JSONNameKey,
- config.JSONStringFields,
- config.JSONQuery,
- config.JSONTimeKey,
- config.JSONTimeFormat,
- config.JSONTimezone,
- config.DefaultTags)
+ parser, err = json.New(
+ &json.Config{
+ MetricName: config.MetricName,
+ TagKeys: config.TagKeys,
+ NameKey: config.JSONNameKey,
+ StringFields: config.JSONStringFields,
+ Query: config.JSONQuery,
+ TimeKey: config.JSONTimeKey,
+ TimeFormat: config.JSONTimeFormat,
+ Timezone: config.JSONTimezone,
+ DefaultTags: config.DefaultTags,
+ Strict: config.JSONStrict,
+ },
+ )
case "value":
parser, err = NewValueParser(config.MetricName,
config.DataType, config.DefaultTags)
@@ -193,111 +205,39 @@ func NewParser(config *Config) (Parser, error) {
config.GrokTimezone,
config.GrokUniqueTimestamp)
case "csv":
- parser, err = newCSVParser(config.MetricName,
- config.CSVHeaderRowCount,
- config.CSVSkipRows,
- config.CSVSkipColumns,
- config.CSVDelimiter,
- config.CSVComment,
- config.CSVTrimSpace,
- config.CSVColumnNames,
- config.CSVColumnTypes,
- config.CSVTagColumns,
- config.CSVMeasurementColumn,
- config.CSVTimestampColumn,
- config.CSVTimestampFormat,
- config.DefaultTags)
+ config := &csv.Config{
+ MetricName: config.MetricName,
+ HeaderRowCount: config.CSVHeaderRowCount,
+ SkipRows: config.CSVSkipRows,
+ SkipColumns: config.CSVSkipColumns,
+ Delimiter: config.CSVDelimiter,
+ Comment: config.CSVComment,
+ TrimSpace: config.CSVTrimSpace,
+ ColumnNames: config.CSVColumnNames,
+ ColumnTypes: config.CSVColumnTypes,
+ TagColumns: config.CSVTagColumns,
+ MeasurementColumn: config.CSVMeasurementColumn,
+ TimestampColumn: config.CSVTimestampColumn,
+ TimestampFormat: config.CSVTimestampFormat,
+ Timezone: config.CSVTimezone,
+ DefaultTags: config.DefaultTags,
+ }
+
+ return csv.NewParser(config)
case "logfmt":
parser, err = NewLogFmtParser(config.MetricName, config.DefaultTags)
+ case "form_urlencoded":
+ parser, err = NewFormUrlencodedParser(
+ config.MetricName,
+ config.DefaultTags,
+ config.FormUrlencodedTagKeys,
+ )
default:
err = fmt.Errorf("Invalid data format: %s", config.DataFormat)
}
return parser, err
}
-func newCSVParser(metricName string,
- headerRowCount int,
- skipRows int,
- skipColumns int,
- delimiter string,
- comment string,
- trimSpace bool,
- columnNames []string,
- columnTypes []string,
- tagColumns []string,
- nameColumn string,
- timestampColumn string,
- timestampFormat string,
- defaultTags map[string]string) (Parser, error) {
-
- if headerRowCount == 0 && len(columnNames) == 0 {
- return nil, fmt.Errorf("`csv_header_row_count` must be defined if `csv_column_names` is not specified")
- }
-
- if delimiter != "" {
- runeStr := []rune(delimiter)
- if len(runeStr) > 1 {
- return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", delimiter)
- }
- }
-
- if comment != "" {
- runeStr := []rune(comment)
- if len(runeStr) > 1 {
- return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", comment)
- }
- }
-
- if len(columnNames) > 0 && len(columnTypes) > 0 && len(columnNames) != len(columnTypes) {
- return nil, fmt.Errorf("csv_column_names field count doesn't match with csv_column_types")
- }
-
- parser := &csv.Parser{
- MetricName: metricName,
- HeaderRowCount: headerRowCount,
- SkipRows: skipRows,
- SkipColumns: skipColumns,
- Delimiter: delimiter,
- Comment: comment,
- TrimSpace: trimSpace,
- ColumnNames: columnNames,
- ColumnTypes: columnTypes,
- TagColumns: tagColumns,
- MeasurementColumn: nameColumn,
- TimestampColumn: timestampColumn,
- TimestampFormat: timestampFormat,
- DefaultTags: defaultTags,
- TimeFunc: time.Now,
- }
-
- return parser, nil
-}
-
-func newJSONParser(
- metricName string,
- tagKeys []string,
- jsonNameKey string,
- stringFields []string,
- jsonQuery string,
- timeKey string,
- timeFormat string,
- timezone string,
- defaultTags map[string]string,
-) Parser {
- parser := &json.JSONParser{
- MetricName: metricName,
- TagKeys: tagKeys,
- StringFields: stringFields,
- JSONNameKey: jsonNameKey,
- JSONQuery: jsonQuery,
- JSONTimeKey: timeKey,
- JSONTimeFormat: timeFormat,
- JSONTimezone: timezone,
- DefaultTags: defaultTags,
- }
- return parser
-}
-
func newGrokParser(metricName string,
patterns []string, nPatterns []string,
cPatterns string, cPatternFiles []string,
@@ -316,19 +256,6 @@ func newGrokParser(metricName string,
return &parser, err
}
-func NewJSONParser(
- metricName string,
- tagKeys []string,
- defaultTags map[string]string,
-) (Parser, error) {
- parser := &json.JSONParser{
- MetricName: metricName,
- TagKeys: tagKeys,
- DefaultTags: defaultTags,
- }
- return parser, nil
-}
-
func NewNagiosParser() (Parser, error) {
return &nagios.NagiosParser{}, nil
}
@@ -400,3 +327,15 @@ func NewLogFmtParser(metricName string, defaultTags map[string]string) (Parser,
func NewWavefrontParser(defaultTags map[string]string) (Parser, error) {
return wavefront.NewWavefrontParser(defaultTags), nil
}
+
+func NewFormUrlencodedParser(
+ metricName string,
+ defaultTags map[string]string,
+ tagKeys []string,
+) (Parser, error) {
+ return &form_urlencoded.Parser{
+ MetricName: metricName,
+ DefaultTags: defaultTags,
+ TagKeys: tagKeys,
+ }, nil
+}
diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go
index 3b7c875a2c93e..5ed37645cdd74 100644
--- a/plugins/parsers/wavefront/element.go
+++ b/plugins/parsers/wavefront/element.go
@@ -28,7 +28,7 @@ type WhiteSpaceParser struct {
type TagParser struct{}
type LoopedParser struct {
wrappedParser ElementParser
- wsPaser *WhiteSpaceParser
+ wsParser *WhiteSpaceParser
}
type LiteralParser struct {
literal string
@@ -37,7 +37,7 @@ type LiteralParser struct {
func (ep *NameParser) parse(p *PointParser, pt *Point) error {
//Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot (".").
// Forward slash ("/") and comma (",") are allowed if metricName is enclosed in double quotes.
- // Delta (U+2206) is allowed as the first characeter of the
+ // Delta (U+2206) is allowed as the first character of the
// metricName
name, err := parseLiteral(p)
@@ -136,7 +136,7 @@ func (ep *LoopedParser) parse(p *PointParser, pt *Point) error {
if err != nil {
return err
}
- err = ep.wsPaser.parse(p, pt)
+ err = ep.wsParser.parse(p, pt)
if err == ErrEOF {
break
}
diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go
index f5fc88dbfe570..7ae455d47dbbd 100644
--- a/plugins/parsers/wavefront/parser.go
+++ b/plugins/parsers/wavefront/parser.go
@@ -6,6 +6,7 @@ import (
"io"
"log"
"strconv"
+ "sync"
"time"
"github.com/influxdata/telegraf"
@@ -22,7 +23,12 @@ type Point struct {
Tags map[string]string
}
-// Parser represents a parser.
+type WavefrontParser struct {
+ parsers *sync.Pool
+ defaultTags map[string]string
+}
+
+// PointParser is a thread-unsafe parser and must be kept in a pool.
type PointParser struct {
s *PointScanner
buf struct {
@@ -30,10 +36,10 @@ type PointParser struct {
lit []string // last read n literals
n int // unscanned buffer size (max=2)
}
- scanBuf bytes.Buffer // buffer reused for scanning tokens
- writeBuf bytes.Buffer // buffer reused for parsing elements
- Elements []ElementParser
- defaultTags map[string]string
+ scanBuf bytes.Buffer // buffer reused for scanning tokens
+ writeBuf bytes.Buffer // buffer reused for parsing elements
+ Elements []ElementParser
+ parent *WavefrontParser
}
// Returns a slice of ElementParser's for the Graphite format
@@ -41,15 +47,46 @@ func NewWavefrontElements() []ElementParser {
var elements []ElementParser
wsParser := WhiteSpaceParser{}
wsParserNextOpt := WhiteSpaceParser{nextOptional: true}
- repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsPaser: &wsParser}
+ repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsParser: &wsParser}
elements = append(elements, &NameParser{}, &wsParser, &ValueParser{}, &wsParserNextOpt,
&TimestampParser{optional: true}, &wsParserNextOpt, &repeatParser)
return elements
}
-func NewWavefrontParser(defaultTags map[string]string) *PointParser {
+func NewWavefrontParser(defaultTags map[string]string) *WavefrontParser {
+ wp := &WavefrontParser{defaultTags: defaultTags}
+ wp.parsers = &sync.Pool{
+ New: func() interface{} {
+ return NewPointParser(wp)
+ },
+ }
+ return wp
+}
+
+func NewPointParser(parent *WavefrontParser) *PointParser {
elements := NewWavefrontElements()
- return &PointParser{Elements: elements, defaultTags: defaultTags}
+ return &PointParser{Elements: elements, parent: parent}
+}
+
+func (p *WavefrontParser) ParseLine(line string) (telegraf.Metric, error) {
+ buf := []byte(line)
+
+ metrics, err := p.Parse(buf)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(metrics) > 0 {
+ return metrics[0], nil
+ }
+
+ return nil, nil
+}
+
+func (p *WavefrontParser) Parse(buf []byte) ([]telegraf.Metric, error) {
+ pp := p.parsers.Get().(*PointParser)
+ defer p.parsers.Put(pp)
+ return pp.Parse(buf)
}
func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) {
@@ -91,21 +128,7 @@ func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) {
return metrics, nil
}
-func (p *PointParser) ParseLine(line string) (telegraf.Metric, error) {
- buf := []byte(line)
- metrics, err := p.Parse(buf)
- if err != nil {
- return nil, err
- }
-
- if len(metrics) > 0 {
- return metrics[0], nil
- }
-
- return nil, nil
-}
-
-func (p *PointParser) SetDefaultTags(tags map[string]string) {
+func (p *WavefrontParser) SetDefaultTags(tags map[string]string) {
p.defaultTags = tags
}
@@ -119,7 +142,7 @@ func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.M
tags[k] = v
}
// apply default tags after parsed tags
- for k, v := range p.defaultTags {
+ for k, v := range p.parent.defaultTags {
tags[k] = v
}
diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go
index 41e2707d3e4c8..c84ee81110ee5 100644
--- a/plugins/processors/all/all.go
+++ b/plugins/processors/all/all.go
@@ -1,13 +1,28 @@
package all
import (
+ _ "github.com/influxdata/telegraf/plugins/processors/clone"
_ "github.com/influxdata/telegraf/plugins/processors/converter"
+ _ "github.com/influxdata/telegraf/plugins/processors/date"
+ _ "github.com/influxdata/telegraf/plugins/processors/dedup"
+ _ "github.com/influxdata/telegraf/plugins/processors/defaults"
_ "github.com/influxdata/telegraf/plugins/processors/enum"
+ _ "github.com/influxdata/telegraf/plugins/processors/execd"
+ _ "github.com/influxdata/telegraf/plugins/processors/filepath"
+ _ "github.com/influxdata/telegraf/plugins/processors/ifname"
_ "github.com/influxdata/telegraf/plugins/processors/override"
_ "github.com/influxdata/telegraf/plugins/processors/parser"
+ _ "github.com/influxdata/telegraf/plugins/processors/pivot"
+ _ "github.com/influxdata/telegraf/plugins/processors/port_name"
_ "github.com/influxdata/telegraf/plugins/processors/printer"
_ "github.com/influxdata/telegraf/plugins/processors/regex"
_ "github.com/influxdata/telegraf/plugins/processors/rename"
+ _ "github.com/influxdata/telegraf/plugins/processors/reverse_dns"
+ _ "github.com/influxdata/telegraf/plugins/processors/s2geo"
+ _ "github.com/influxdata/telegraf/plugins/processors/starlark"
_ "github.com/influxdata/telegraf/plugins/processors/strings"
+ _ "github.com/influxdata/telegraf/plugins/processors/tag_limit"
+ _ "github.com/influxdata/telegraf/plugins/processors/template"
_ "github.com/influxdata/telegraf/plugins/processors/topk"
+ _ "github.com/influxdata/telegraf/plugins/processors/unpivot"
)
diff --git a/plugins/processors/clone/README.md b/plugins/processors/clone/README.md
new file mode 100644
index 0000000000000..7ae33d36b235c
--- /dev/null
+++ b/plugins/processors/clone/README.md
@@ -0,0 +1,38 @@
+# Clone Processor Plugin
+
+The clone processor plugin create a copy of each metric passing through it,
+preserving untouched the original metric and allowing modifications in the
+copied one.
+
+The modifications allowed are the ones supported by input plugins and aggregators:
+
+* name_override
+* name_prefix
+* name_suffix
+* tags
+
+Select the metrics to modify using the standard
+[measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering)
+options.
+
+Values of *name_override*, *name_prefix*, *name_suffix* and already present
+*tags* with conflicting keys will be overwritten. Absent *tags* will be
+created.
+
+A typical use-case is gathering metrics once and cloning them to simulate
+having several hosts (modifying ``host`` tag).
+
+### Configuration:
+
+```toml
+# Apply metric modifications using override semantics.
+[[processors.clone]]
+ ## All modifications on inputs and aggregators can be overridden:
+ # name_override = "new_name"
+ # name_prefix = "new_name_prefix"
+ # name_suffix = "new_name_suffix"
+
+ ## Tags to be added (all values must be strings)
+ # [processors.clone.tags]
+ # additional_tag = "tag_value"
+```
diff --git a/plugins/processors/clone/clone.go b/plugins/processors/clone/clone.go
new file mode 100644
index 0000000000000..ad03fd3e4e2bc
--- /dev/null
+++ b/plugins/processors/clone/clone.go
@@ -0,0 +1,60 @@
+package clone
+
+import (
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+var sampleConfig = `
+ ## All modifications on inputs and aggregators can be overridden:
+ # name_override = "new_name"
+ # name_prefix = "new_name_prefix"
+ # name_suffix = "new_name_suffix"
+
+ ## Tags to be added (all values must be strings)
+ # [processors.clone.tags]
+ # additional_tag = "tag_value"
+`
+
+type Clone struct {
+ NameOverride string
+ NamePrefix string
+ NameSuffix string
+ Tags map[string]string
+}
+
+func (c *Clone) SampleConfig() string {
+ return sampleConfig
+}
+
+func (c *Clone) Description() string {
+ return "Clone metrics and apply modifications."
+}
+
+func (c *Clone) Apply(in ...telegraf.Metric) []telegraf.Metric {
+ cloned := []telegraf.Metric{}
+
+ for _, metric := range in {
+ cloned = append(cloned, metric.Copy())
+
+ if len(c.NameOverride) > 0 {
+ metric.SetName(c.NameOverride)
+ }
+ if len(c.NamePrefix) > 0 {
+ metric.AddPrefix(c.NamePrefix)
+ }
+ if len(c.NameSuffix) > 0 {
+ metric.AddSuffix(c.NameSuffix)
+ }
+ for key, value := range c.Tags {
+ metric.AddTag(key, value)
+ }
+ }
+ return append(in, cloned...)
+}
+
+func init() {
+ processors.Add("clone", func() telegraf.Processor {
+ return &Clone{}
+ })
+}
diff --git a/plugins/processors/clone/clone_test.go b/plugins/processors/clone/clone_test.go
new file mode 100644
index 0000000000000..f1b8dc5b29c03
--- /dev/null
+++ b/plugins/processors/clone/clone_test.go
@@ -0,0 +1,83 @@
+package clone
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/stretchr/testify/assert"
+)
+
+func createTestMetric() telegraf.Metric {
+ metric, _ := metric.New("m1",
+ map[string]string{"metric_tag": "from_metric"},
+ map[string]interface{}{"value": int64(1)},
+ time.Now(),
+ )
+ return metric
+}
+
+func calculateProcessedTags(processor Clone, metric telegraf.Metric) map[string]string {
+ processed := processor.Apply(metric)
+ return processed[0].Tags()
+}
+
+func TestRetainsTags(t *testing.T) {
+ processor := Clone{}
+
+ tags := calculateProcessedTags(processor, createTestMetric())
+
+ value, present := tags["metric_tag"]
+ assert.True(t, present, "Tag of metric was not present")
+ assert.Equal(t, "from_metric", value, "Value of Tag was changed")
+}
+
+func TestAddTags(t *testing.T) {
+ processor := Clone{Tags: map[string]string{"added_tag": "from_config", "another_tag": ""}}
+
+ tags := calculateProcessedTags(processor, createTestMetric())
+
+ value, present := tags["added_tag"]
+ assert.True(t, present, "Additional Tag of metric was not present")
+ assert.Equal(t, "from_config", value, "Value of Tag was changed")
+ assert.Equal(t, 3, len(tags), "Should have one previous and two added tags.")
+}
+
+func TestOverwritesPresentTagValues(t *testing.T) {
+ processor := Clone{Tags: map[string]string{"metric_tag": "from_config"}}
+
+ tags := calculateProcessedTags(processor, createTestMetric())
+
+ value, present := tags["metric_tag"]
+ assert.True(t, present, "Tag of metric was not present")
+ assert.Equal(t, 1, len(tags), "Should only have one tag.")
+ assert.Equal(t, "from_config", value, "Value of Tag was not changed")
+}
+
+func TestOverridesName(t *testing.T) {
+ processor := Clone{NameOverride: "overridden"}
+
+ processed := processor.Apply(createTestMetric())
+
+ assert.Equal(t, "overridden", processed[0].Name(), "Name was not overridden")
+ assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified")
+}
+
+func TestNamePrefix(t *testing.T) {
+ processor := Clone{NamePrefix: "Pre-"}
+
+ processed := processor.Apply(createTestMetric())
+
+ assert.Equal(t, "Pre-m1", processed[0].Name(), "Prefix was not applied")
+ assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified")
+}
+
+func TestNameSuffix(t *testing.T) {
+ processor := Clone{NameSuffix: "-suff"}
+
+ processed := processor.Apply(createTestMetric())
+
+ assert.Equal(t, "m1-suff", processed[0].Name(), "Suffix was not applied")
+ assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified")
+}
diff --git a/plugins/processors/converter/README.md b/plugins/processors/converter/README.md
index d56985d846a4f..d916c87643bee 100644
--- a/plugins/processors/converter/README.md
+++ b/plugins/processors/converter/README.md
@@ -9,7 +9,7 @@ Values that cannot be converted are dropped.
uniquely identifiable. Fields with the same series key (measurement + tags)
will overwrite one another.
-### Configuration:
+### Configuration
```toml
# Convert values to another metric value type
[[processors.converter]]
@@ -19,6 +19,7 @@ will overwrite one another.
## select the keys to convert. The array may contain globs.
## = [...]
[processors.converter.tags]
+ measurement = []
string = []
integer = []
unsigned = []
@@ -31,6 +32,7 @@ will overwrite one another.
## select the keys to convert. The array may contain globs.
## = [...]
[processors.converter.fields]
+ measurement = []
tag = []
string = []
integer = []
@@ -39,19 +41,40 @@ will overwrite one another.
float = []
```
-### Examples:
+### Example
+Convert `port` tag to a string field:
```toml
[[processors.converter]]
[processors.converter.tags]
string = ["port"]
+```
+
+```diff
+- apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0
++ apache,server=debian-stretch-apache port="80",BusyWorkers=1,BytesPerReq=0
+```
+Convert all `scboard_*` fields to an integer:
+```toml
+[[processors.converter]]
[processors.converter.fields]
integer = ["scboard_*"]
- tag = ["ParentServerConfigGeneration"]
```
```diff
-- apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000
-+ apache,server=debian-stretch-apache,ParentServerConfigGeneration=3 port="80",BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0i,scboard_dnslookup=0i,scboard_finishing=0i,scboard_idle_cleanup=0i,scboard_keepalive=0i,scboard_logging=0i,scboard_open=100i,scboard_reading=0i,scboard_sending=1i,scboard_starting=0i,scboard_waiting=49i 1502489900000000000
+- apache scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49
++ apache scboard_closing=0i,scboard_dnslookup=0i,scboard_finishing=0i,scboard_idle_cleanup=0i,scboard_keepalive=0i,scboard_logging=0i,scboard_open=100i,scboard_reading=0i,scboard_sending=1i,scboard_starting=0i,scboard_waiting=49i
+```
+
+Rename the measurement from a tag value:
+```toml
+[[processors.converter]]
+ [processors.converter.tags]
+ measurement = ["topic"]
+```
+
+```diff
+- mqtt_consumer,topic=sensor temp=42
++ sensor temp=42
```
diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go
index db240abf4d742..55a2a2d0965dc 100644
--- a/plugins/processors/converter/converter.go
+++ b/plugins/processors/converter/converter.go
@@ -2,7 +2,6 @@ package converter
import (
"fmt"
- "log"
"math"
"strconv"
@@ -18,6 +17,7 @@ var sampleConfig = `
## select the keys to convert. The array may contain globs.
## = [...]
[processors.converter.tags]
+ measurement = []
string = []
integer = []
unsigned = []
@@ -30,6 +30,7 @@ var sampleConfig = `
## select the keys to convert. The array may contain globs.
## = [...]
[processors.converter.fields]
+ measurement = []
tag = []
string = []
integer = []
@@ -39,30 +40,32 @@ var sampleConfig = `
`
type Conversion struct {
- Tag []string `toml:"tag"`
- String []string `toml:"string"`
- Integer []string `toml:"integer"`
- Unsigned []string `toml:"unsigned"`
- Boolean []string `toml:"boolean"`
- Float []string `toml:"float"`
+ Measurement []string `toml:"measurement"`
+ Tag []string `toml:"tag"`
+ String []string `toml:"string"`
+ Integer []string `toml:"integer"`
+ Unsigned []string `toml:"unsigned"`
+ Boolean []string `toml:"boolean"`
+ Float []string `toml:"float"`
}
type Converter struct {
- Tags *Conversion `toml:"tags"`
- Fields *Conversion `toml:"fields"`
+ Tags *Conversion `toml:"tags"`
+ Fields *Conversion `toml:"fields"`
+ Log telegraf.Logger `toml:"-"`
- initialized bool
tagConversions *ConversionFilter
fieldConversions *ConversionFilter
}
type ConversionFilter struct {
- Tag filter.Filter
- String filter.Filter
- Integer filter.Filter
- Unsigned filter.Filter
- Boolean filter.Filter
- Float filter.Filter
+ Measurement filter.Filter
+ Tag filter.Filter
+ String filter.Filter
+ Integer filter.Filter
+ Unsigned filter.Filter
+ Boolean filter.Filter
+ Float filter.Filter
}
func (p *Converter) SampleConfig() string {
@@ -73,15 +76,11 @@ func (p *Converter) Description() string {
return "Convert values to another metric value type"
}
-func (p *Converter) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
- if !p.initialized {
- err := p.compile()
- if err != nil {
- logPrintf("initialization error: %v\n", err)
- return metrics
- }
- }
+func (p *Converter) Init() error {
+ return p.compile()
+}
+func (p *Converter) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
for _, metric := range metrics {
p.convertTags(metric)
p.convertFields(metric)
@@ -106,7 +105,6 @@ func (p *Converter) compile() error {
p.tagConversions = tf
p.fieldConversions = ff
- p.initialized = true
return nil
}
@@ -117,6 +115,11 @@ func compileFilter(conv *Conversion) (*ConversionFilter, error) {
var err error
cf := &ConversionFilter{}
+ cf.Measurement, err = filter.Compile(conv.Measurement)
+ if err != nil {
+ return nil, err
+ }
+
cf.Tag, err = filter.Compile(conv.Tag)
if err != nil {
return nil, err
@@ -150,13 +153,19 @@ func compileFilter(conv *Conversion) (*ConversionFilter, error) {
return cf, nil
}
-// convertTags converts tags into fields
+// convertTags converts tags into measurements or fields.
func (p *Converter) convertTags(metric telegraf.Metric) {
if p.tagConversions == nil {
return
}
for key, value := range metric.Tags() {
+ if p.tagConversions.Measurement != nil && p.tagConversions.Measurement.Match(key) {
+ metric.RemoveTag(key)
+ metric.SetName(value)
+ continue
+ }
+
if p.tagConversions.String != nil && p.tagConversions.String.Match(key) {
metric.RemoveTag(key)
metric.AddField(key, value)
@@ -167,7 +176,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) {
v, ok := toInteger(value)
if !ok {
metric.RemoveTag(key)
- logPrintf("error converting to integer [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to integer [%T]: %v", value, value)
continue
}
@@ -179,7 +188,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) {
v, ok := toUnsigned(value)
if !ok {
metric.RemoveTag(key)
- logPrintf("error converting to unsigned [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to unsigned [%T]: %v", value, value)
continue
}
@@ -192,7 +201,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) {
v, ok := toBool(value)
if !ok {
metric.RemoveTag(key)
- logPrintf("error converting to boolean [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to boolean [%T]: %v", value, value)
continue
}
@@ -205,7 +214,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) {
v, ok := toFloat(value)
if !ok {
metric.RemoveTag(key)
- logPrintf("error converting to float [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to float [%T]: %v", value, value)
continue
}
@@ -216,18 +225,31 @@ func (p *Converter) convertTags(metric telegraf.Metric) {
}
}
-// convertFields converts fields into tags or other field types
+// convertFields converts fields into measurements, tags, or other field types.
func (p *Converter) convertFields(metric telegraf.Metric) {
if p.fieldConversions == nil {
return
}
for key, value := range metric.Fields() {
+ if p.fieldConversions.Measurement != nil && p.fieldConversions.Measurement.Match(key) {
+ v, ok := toString(value)
+ if !ok {
+ metric.RemoveField(key)
+ p.Log.Errorf("error converting to measurement [%T]: %v", value, value)
+ continue
+ }
+
+ metric.RemoveField(key)
+ metric.SetName(v)
+ continue
+ }
+
if p.fieldConversions.Tag != nil && p.fieldConversions.Tag.Match(key) {
v, ok := toString(value)
if !ok {
metric.RemoveField(key)
- logPrintf("error converting to tag [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to tag [%T]: %v", value, value)
continue
}
@@ -240,7 +262,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) {
v, ok := toFloat(value)
if !ok {
metric.RemoveField(key)
- logPrintf("error converting to integer [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to float [%T]: %v", value, value)
continue
}
@@ -253,7 +275,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) {
v, ok := toInteger(value)
if !ok {
metric.RemoveField(key)
- logPrintf("error converting to integer [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to integer [%T]: %v", value, value)
continue
}
@@ -266,7 +288,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) {
v, ok := toUnsigned(value)
if !ok {
metric.RemoveField(key)
- logPrintf("error converting to unsigned [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to unsigned [%T]: %v", value, value)
continue
}
@@ -279,7 +301,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) {
v, ok := toBool(value)
if !ok {
metric.RemoveField(key)
- logPrintf("error converting to bool [%T]: %v\n", value, value)
+ p.Log.Errorf("error converting to bool [%T]: %v", value, value)
continue
}
@@ -292,7 +314,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) {
v, ok := toString(value)
if !ok {
metric.RemoveField(key)
- logPrintf("error converting to string [%T]: %v\n", value, value)
+ p.Log.Errorf("Error converting to string [%T]: %v", value, value)
continue
}
@@ -305,12 +327,12 @@ func (p *Converter) convertFields(metric telegraf.Metric) {
func toBool(v interface{}) (bool, bool) {
switch value := v.(type) {
- case int64, uint64, float64:
- if value != 0 {
- return true, true
- } else {
- return false, false
- }
+ case int64:
+ return value != 0, true
+ case uint64:
+ return value != 0, true
+ case float64:
+ return value != 0, true
case bool:
return value, true
case string:
@@ -336,7 +358,7 @@ func toInteger(v interface{}) (int64, bool) {
} else if value > float64(math.MaxInt64) {
return math.MaxInt64, true
} else {
- return int64(Round(value)), true
+ return int64(math.Round(value)), true
}
case bool:
if value {
@@ -375,7 +397,7 @@ func toUnsigned(v interface{}) (uint64, bool) {
} else if value > float64(math.MaxUint64) {
return math.MaxUint64, true
} else {
- return uint64(Round(value)), true
+ return uint64(math.Round(value)), true
}
case bool:
if value {
@@ -435,20 +457,6 @@ func toString(v interface{}) (string, bool) {
return "", false
}
-// math.Round was not added until Go 1.10, can be removed when support for Go
-// 1.9 is dropped.
-func Round(x float64) float64 {
- t := math.Trunc(x)
- if math.Abs(x-t) >= 0.5 {
- return t + math.Copysign(1, x)
- }
- return t
-}
-
-func logPrintf(format string, v ...interface{}) {
- log.Printf("D! [processors.converter] "+format, v...)
-}
-
func init() {
processors.Add("converter", func() telegraf.Processor {
return &Converter{}
diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go
index 1d60a40fb2ab4..efde0bcd9e21f 100644
--- a/plugins/processors/converter/converter_test.go
+++ b/plugins/processors/converter/converter_test.go
@@ -6,48 +6,17 @@ import (
"time"
"github.com/influxdata/telegraf"
- "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
-func Metric(v telegraf.Metric, err error) telegraf.Metric {
- if err != nil {
- panic(err)
- }
- return v
-}
-
func TestConverter(t *testing.T) {
tests := []struct {
name string
converter *Converter
input telegraf.Metric
- expected telegraf.Metric
+ expected []telegraf.Metric
}{
- {
- name: "empty",
- converter: &Converter{},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "value": 42.0,
- },
- time.Unix(0, 0),
- ),
- ),
- expected: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "value": 42.0,
- },
- time.Unix(0, 0),
- ),
- ),
- },
{
name: "from tag",
converter: &Converter{
@@ -60,23 +29,21 @@ func TestConverter(t *testing.T) {
Tag: []string{"tag"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{
- "float": "42",
- "int": "42",
- "uint": "42",
- "bool": "true",
- "string": "howdy",
- "tag": "tag",
- },
- map[string]interface{}{},
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "float": "42",
+ "int": "42",
+ "uint": "42",
+ "bool": "true",
+ "string": "howdy",
+ "tag": "tag",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{
"tag": "tag",
@@ -90,7 +57,7 @@ func TestConverter(t *testing.T) {
},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "from tag unconvertible",
@@ -102,27 +69,25 @@ func TestConverter(t *testing.T) {
Float: []string{"float"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{
- "float": "a",
- "int": "b",
- "uint": "c",
- "bool": "maybe",
- },
- map[string]interface{}{},
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "float": "a",
+ "int": "b",
+ "uint": "c",
+ "bool": "maybe",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "from string field",
@@ -136,29 +101,27 @@ func TestConverter(t *testing.T) {
Tag: []string{"f"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "a": "howdy",
- "b": "42",
- "b1": "42.2",
- "b2": "42.5",
- "b3": "0x2A",
- "c": "42",
- "c1": "42.2",
- "c2": "42.5",
- "c3": "0x2A",
- "d": "true",
- "e": "42.0",
- "f": "foo",
- },
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "howdy",
+ "b": "42",
+ "b1": "42.2",
+ "b2": "42.5",
+ "b3": "0x2A",
+ "c": "42",
+ "c1": "42.2",
+ "c2": "42.5",
+ "c3": "0x2A",
+ "d": "true",
+ "e": "42.0",
+ "f": "foo",
+ },
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{
"f": "foo",
@@ -178,7 +141,7 @@ func TestConverter(t *testing.T) {
},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "from string field unconvertible",
@@ -190,27 +153,25 @@ func TestConverter(t *testing.T) {
Float: []string{"d"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "a": "a",
- "b": "b",
- "c": "c",
- "d": "d",
- },
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "a",
+ "b": "b",
+ "c": "c",
+ "d": "d",
+ },
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "from integer field",
@@ -219,29 +180,28 @@ func TestConverter(t *testing.T) {
String: []string{"a"},
Integer: []string{"b"},
Unsigned: []string{"c", "negative_uint"},
- Boolean: []string{"d"},
+ Boolean: []string{"d", "bool_zero"},
Float: []string{"e"},
Tag: []string{"f"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "a": int64(42),
- "b": int64(42),
- "c": int64(42),
- "d": int64(42),
- "e": int64(42),
- "f": int64(42),
- "negative_uint": int64(-42),
- },
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": int64(42),
+ "b": int64(42),
+ "c": int64(42),
+ "d": int64(42),
+ "e": int64(42),
+ "f": int64(42),
+ "negative_uint": int64(-42),
+ "bool_zero": int64(0),
+ },
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{
"f": "42",
@@ -253,10 +213,11 @@ func TestConverter(t *testing.T) {
"d": true,
"e": 42.0,
"negative_uint": uint64(0),
+ "bool_zero": false,
},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "from unsigned field",
@@ -265,29 +226,28 @@ func TestConverter(t *testing.T) {
String: []string{"a"},
Integer: []string{"b", "overflow_int"},
Unsigned: []string{"c"},
- Boolean: []string{"d"},
+ Boolean: []string{"d", "bool_zero"},
Float: []string{"e"},
Tag: []string{"f"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "a": uint64(42),
- "b": uint64(42),
- "c": uint64(42),
- "d": uint64(42),
- "e": uint64(42),
- "f": uint64(42),
- "overflow_int": uint64(math.MaxUint64),
- },
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": uint64(42),
+ "b": uint64(42),
+ "c": uint64(42),
+ "d": uint64(42),
+ "e": uint64(42),
+ "f": uint64(42),
+ "overflow_int": uint64(math.MaxUint64),
+ "bool_zero": uint64(0),
+ },
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{
"f": "42",
@@ -299,10 +259,11 @@ func TestConverter(t *testing.T) {
"d": true,
"e": 42.0,
"overflow_int": int64(math.MaxInt64),
+ "bool_zero": false,
},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "out of range for unsigned",
@@ -311,19 +272,17 @@ func TestConverter(t *testing.T) {
Unsigned: []string{"a", "b"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "a": int64(-42),
- "b": math.MaxFloat64,
- },
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": int64(-42),
+ "b": math.MaxFloat64,
+ },
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
@@ -332,7 +291,7 @@ func TestConverter(t *testing.T) {
},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "boolean field",
@@ -346,29 +305,27 @@ func TestConverter(t *testing.T) {
Tag: []string{"f", "ff"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "a": true,
- "b": true,
- "c": true,
- "d": true,
- "e": true,
- "f": true,
- "af": false,
- "bf": false,
- "cf": false,
- "df": false,
- "ef": false,
- "ff": false,
- },
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": true,
+ "b": true,
+ "c": true,
+ "d": true,
+ "e": true,
+ "f": true,
+ "af": false,
+ "bf": false,
+ "cf": false,
+ "df": false,
+ "ef": false,
+ "ff": false,
+ },
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{
"f": "true",
@@ -388,7 +345,7 @@ func TestConverter(t *testing.T) {
},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "from float field",
@@ -397,33 +354,32 @@ func TestConverter(t *testing.T) {
String: []string{"a"},
Integer: []string{"b", "too_large_int", "too_small_int"},
Unsigned: []string{"c", "negative_uint", "too_large_uint", "too_small_uint"},
- Boolean: []string{"d"},
+ Boolean: []string{"d", "bool_zero"},
Float: []string{"e"},
Tag: []string{"f"},
},
},
- input: Metric(
- metric.New(
- "cpu",
- map[string]string{},
- map[string]interface{}{
- "a": 42.0,
- "b": 42.0,
- "c": 42.0,
- "d": 42.0,
- "e": 42.0,
- "f": 42.0,
- "too_large_int": math.MaxFloat64,
- "too_large_uint": math.MaxFloat64,
- "too_small_int": -math.MaxFloat64,
- "too_small_uint": -math.MaxFloat64,
- "negative_uint": -42.0,
- },
- time.Unix(0, 0),
- ),
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": 42.0,
+ "b": 42.0,
+ "c": 42.0,
+ "d": 42.0,
+ "e": 42.0,
+ "f": 42.0,
+ "too_large_int": math.MaxFloat64,
+ "too_large_uint": math.MaxFloat64,
+ "too_small_int": -math.MaxFloat64,
+ "too_small_uint": -math.MaxFloat64,
+ "negative_uint": -42.0,
+ "bool_zero": 0.0,
+ },
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{
"f": "42",
@@ -439,10 +395,11 @@ func TestConverter(t *testing.T) {
"too_small_int": int64(math.MinInt64),
"too_small_uint": uint64(0),
"negative_uint": uint64(0),
+ "bool_zero": false,
},
time.Unix(0, 0),
),
- ),
+ },
},
{
name: "globbing",
@@ -451,41 +408,123 @@ func TestConverter(t *testing.T) {
Integer: []string{"int_*"},
},
},
- input: Metric(
- metric.New(
+ input: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "int_a": "1",
+ "int_b": "2",
+ "float_a": 1.0,
+ },
+ time.Unix(0, 0),
+ ),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
- "int_a": "1",
- "int_b": "2",
+ "int_a": int64(1),
+ "int_b": int64(2),
"float_a": 1.0,
},
time.Unix(0, 0),
),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tt.converter.Log = testutil.Logger{}
+
+ err := tt.converter.Init()
+ require.NoError(t, err)
+ actual := tt.converter.Apply(tt.input)
+
+ testutil.RequireMetricsEqual(t, tt.expected, actual)
+ })
+ }
+}
+
+func TestMeasurement(t *testing.T) {
+ tests := []struct {
+ name string
+ converter *Converter
+ input telegraf.Metric
+ expected []telegraf.Metric
+ }{
+ {
+ name: "measurement from tag",
+ converter: &Converter{
+ Tags: &Conversion{
+ Measurement: []string{"filepath"},
+ },
+ },
+ input: testutil.MustMetric(
+ "file",
+ map[string]string{
+ "filepath": "/var/log/syslog",
+ },
+ map[string]interface{}{
+ "msg": "howdy",
+ },
+ time.Unix(0, 0),
),
- expected: Metric(
- metric.New(
- "cpu",
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "/var/log/syslog",
map[string]string{},
map[string]interface{}{
- "int_a": int64(1),
- "int_b": int64(2),
- "float_a": 1.0,
+ "msg": "howdy",
},
time.Unix(0, 0),
),
+ },
+ },
+ {
+ name: "measurement from field",
+ converter: &Converter{
+ Fields: &Conversion{
+ Measurement: []string{"topic"},
+ },
+ },
+ input: testutil.MustMetric(
+ "file",
+ map[string]string{},
+ map[string]interface{}{
+ "v": 1,
+ "topic": "telegraf",
+ },
+ time.Unix(0, 0),
),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "telegraf",
+ map[string]string{},
+ map[string]interface{}{
+ "v": 1,
+ },
+ time.Unix(0, 0),
+ ),
+ },
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- metrics := tt.converter.Apply(tt.input)
+ tt.converter.Log = testutil.Logger{}
+ err := tt.converter.Init()
+ require.NoError(t, err)
- require.Equal(t, 1, len(metrics))
- require.Equal(t, tt.expected.Name(), metrics[0].Name())
- require.Equal(t, tt.expected.Tags(), metrics[0].Tags())
- require.Equal(t, tt.expected.Fields(), metrics[0].Fields())
- require.Equal(t, tt.expected.Time(), metrics[0].Time())
+ actual := tt.converter.Apply(tt.input)
+
+ testutil.RequireMetricsEqual(t, tt.expected, actual)
})
}
}
+
+func TestEmptyConfigInitError(t *testing.T) {
+ converter := &Converter{
+ Log: testutil.Logger{},
+ }
+ err := converter.Init()
+ require.Error(t, err)
+}
diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md
new file mode 100644
index 0000000000000..9a093fe0e86db
--- /dev/null
+++ b/plugins/processors/date/README.md
@@ -0,0 +1,56 @@
+# Date Processor Plugin
+
+Use the `date` processor to add the metric timestamp as a human readable tag.
+
+A common use is to add a tag that can be used to group by month or year.
+
+A few example usecases include:
+1) consumption data for utilities on per month basis
+2) bandwidth capacity per month
+3) compare energy production or sales on a yearly or monthly basis
+
+### Configuration
+
+```toml
+[[processors.date]]
+ ## New tag to create
+ tag_key = "month"
+
+ ## New field to create (cannot set both field_key and tag_key)
+ # field_key = "month"
+
+ ## Date format string, must be a representation of the Go "reference time"
+ ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
+ date_format = "Jan"
+
+ ## If destination is a field, date format can also be one of
+ ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
+ # date_format = "unix"
+
+ ## Offset duration added to the date string when writing the new tag.
+ # date_offset = "0s"
+
+ ## Timezone to use when creating the tag or field using a reference time
+ ## string. This can be set to one of "UTC", "Local", or to a location name
+ ## in the IANA Time Zone database.
+ ## example: timezone = "America/Los_Angeles"
+ # timezone = "UTC"
+```
+
+#### timezone
+
+On Windows, only the `Local` and `UTC` zones are available by default. To use
+other timezones, set the `ZONEINFO` environment variable to the location of
+[`zoneinfo.zip`][zoneinfo]:
+```
+set ZONEINFO=C:\zoneinfo.zip
+```
+
+### Example
+
+```diff
+- throughput lower=10i,upper=1000i,mean=500i 1560540094000000000
++ throughput,month=Jun lower=10i,upper=1000i,mean=500i 1560540094000000000
+```
+
+[zoneinfo]: https://github.com/golang/go/raw/50bd1c4d4eb4fac8ddeb5f063c099daccfb71b26/lib/time/zoneinfo.zip
diff --git a/plugins/processors/date/date.go b/plugins/processors/date/date.go
new file mode 100644
index 0000000000000..ef8609811c1f7
--- /dev/null
+++ b/plugins/processors/date/date.go
@@ -0,0 +1,101 @@
+package date
+
+import (
+ "errors"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+const sampleConfig = `
+ ## New tag to create
+ tag_key = "month"
+
+ ## New field to create (cannot set both field_key and tag_key)
+ # field_key = "month"
+
+ ## Date format string, must be a representation of the Go "reference time"
+ ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
+ date_format = "Jan"
+
+ ## If destination is a field, date format can also be one of
+ ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
+ # date_format = "unix"
+
+ ## Offset duration added to the date string when writing the new tag.
+ # date_offset = "0s"
+
+ ## Timezone to use when creating the tag or field using a reference time
+ ## string. This can be set to one of "UTC", "Local", or to a location name
+ ## in the IANA Time Zone database.
+ ## example: timezone = "America/Los_Angeles"
+ # timezone = "UTC"
+`
+
+const defaultTimezone = "UTC"
+
+type Date struct {
+ TagKey string `toml:"tag_key"`
+ FieldKey string `toml:"field_key"`
+ DateFormat string `toml:"date_format"`
+ DateOffset internal.Duration `toml:"date_offset"`
+ Timezone string `toml:"timezone"`
+
+ location *time.Location
+}
+
+func (d *Date) SampleConfig() string {
+ return sampleConfig
+}
+
+func (d *Date) Description() string {
+ return "Dates measurements, tags, and fields that pass through this filter."
+}
+
+func (d *Date) Init() error {
+ // Check either TagKey or FieldKey specified
+ if len(d.FieldKey) > 0 && len(d.TagKey) > 0 {
+ return errors.New("Only one of field_key or tag_key can be specified")
+ } else if len(d.FieldKey) == 0 && len(d.TagKey) == 0 {
+ return errors.New("One of field_key or tag_key must be specified")
+ }
+
+ var err error
+ // LoadLocation returns UTC if timezone is the empty string.
+ d.location, err = time.LoadLocation(d.Timezone)
+ return err
+}
+
+func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric {
+ for _, point := range in {
+ tm := point.Time().In(d.location).Add(d.DateOffset.Duration)
+ if len(d.TagKey) > 0 {
+ point.AddTag(d.TagKey, tm.Format(d.DateFormat))
+ } else if len(d.FieldKey) > 0 {
+ switch d.DateFormat {
+ case "unix":
+ point.AddField(d.FieldKey, tm.Unix())
+ case "unix_ms":
+ point.AddField(d.FieldKey, tm.UnixNano()/1000000)
+ case "unix_us":
+ point.AddField(d.FieldKey, tm.UnixNano()/1000)
+ case "unix_ns":
+ point.AddField(d.FieldKey, tm.UnixNano())
+ default:
+ point.AddField(d.FieldKey, tm.Format(d.DateFormat))
+ }
+ }
+ }
+
+ return in
+}
+
+func init() {
+ processors.Add("date", func() telegraf.Processor {
+ return &Date{
+ Timezone: defaultTimezone,
+ }
+ })
+}
diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go
new file mode 100644
index 0000000000000..42e094c939c17
--- /dev/null
+++ b/plugins/processors/date/date_test.go
@@ -0,0 +1,199 @@
+package date
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric {
+ if tags == nil {
+ tags = map[string]string{}
+ }
+ if fields == nil {
+ fields = map[string]interface{}{}
+ }
+ m, _ := metric.New(name, tags, fields, metricTime)
+ return m
+}
+
+func TestTagAndField(t *testing.T) {
+ dateFormatTagAndField := Date{
+ TagKey: "month",
+ FieldKey: "month",
+ }
+ err := dateFormatTagAndField.Init()
+ require.Error(t, err)
+
+}
+
+func TestNoOutputSpecified(t *testing.T) {
+ dateFormatNoOutput := Date{}
+ err := dateFormatNoOutput.Init()
+ require.Error(t, err)
+}
+
+func TestMonthTag(t *testing.T) {
+ dateFormatMonth := Date{
+ TagKey: "month",
+ DateFormat: "Jan",
+ }
+ err := dateFormatMonth.Init()
+ require.NoError(t, err)
+
+ currentTime := time.Now()
+ month := currentTime.Format("Jan")
+
+ m1 := MustMetric("foo", nil, nil, currentTime)
+ m2 := MustMetric("bar", nil, nil, currentTime)
+ m3 := MustMetric("baz", nil, nil, currentTime)
+ monthApply := dateFormatMonth.Apply(m1, m2, m3)
+ assert.Equal(t, map[string]string{"month": month}, monthApply[0].Tags(), "should add tag 'month'")
+ assert.Equal(t, map[string]string{"month": month}, monthApply[1].Tags(), "should add tag 'month'")
+ assert.Equal(t, map[string]string{"month": month}, monthApply[2].Tags(), "should add tag 'month'")
+}
+
+func TestMonthField(t *testing.T) {
+ dateFormatMonth := Date{
+ FieldKey: "month",
+ DateFormat: "Jan",
+ }
+
+ err := dateFormatMonth.Init()
+ require.NoError(t, err)
+
+ currentTime := time.Now()
+ month := currentTime.Format("Jan")
+
+ m1 := MustMetric("foo", nil, nil, currentTime)
+ m2 := MustMetric("bar", nil, nil, currentTime)
+ m3 := MustMetric("baz", nil, nil, currentTime)
+ monthApply := dateFormatMonth.Apply(m1, m2, m3)
+ assert.Equal(t, map[string]interface{}{"month": month}, monthApply[0].Fields(), "should add field 'month'")
+ assert.Equal(t, map[string]interface{}{"month": month}, monthApply[1].Fields(), "should add field 'month'")
+ assert.Equal(t, map[string]interface{}{"month": month}, monthApply[2].Fields(), "should add field 'month'")
+}
+
+func TestOldDateTag(t *testing.T) {
+ dateFormatYear := Date{
+ TagKey: "year",
+ DateFormat: "2006",
+ }
+
+ err := dateFormatYear.Init()
+ require.NoError(t, err)
+
+ m7 := MustMetric("foo", nil, nil, time.Date(1993, 05, 27, 0, 0, 0, 0, time.UTC))
+ customDateApply := dateFormatYear.Apply(m7)
+ assert.Equal(t, map[string]string{"year": "1993"}, customDateApply[0].Tags(), "should add tag 'year'")
+}
+
+func TestFieldUnix(t *testing.T) {
+ dateFormatUnix := Date{
+ FieldKey: "unix",
+ DateFormat: "unix",
+ }
+
+ err := dateFormatUnix.Init()
+ require.NoError(t, err)
+
+ currentTime := time.Now()
+ unixTime := currentTime.Unix()
+
+ m8 := MustMetric("foo", nil, nil, currentTime)
+ unixApply := dateFormatUnix.Apply(m8)
+ assert.Equal(t, map[string]interface{}{"unix": unixTime}, unixApply[0].Fields(), "should add unix time in s as field 'unix'")
+}
+
+func TestFieldUnixNano(t *testing.T) {
+ dateFormatUnixNano := Date{
+ FieldKey: "unix_ns",
+ DateFormat: "unix_ns",
+ }
+
+ err := dateFormatUnixNano.Init()
+ require.NoError(t, err)
+
+ currentTime := time.Now()
+ unixNanoTime := currentTime.UnixNano()
+
+ m9 := MustMetric("foo", nil, nil, currentTime)
+ unixNanoApply := dateFormatUnixNano.Apply(m9)
+ assert.Equal(t, map[string]interface{}{"unix_ns": unixNanoTime}, unixNanoApply[0].Fields(), "should add unix time in ns as field 'unix_ns'")
+}
+
+func TestFieldUnixMillis(t *testing.T) {
+ dateFormatUnixMillis := Date{
+ FieldKey: "unix_ms",
+ DateFormat: "unix_ms",
+ }
+
+ err := dateFormatUnixMillis.Init()
+ require.NoError(t, err)
+
+ currentTime := time.Now()
+ unixMillisTime := currentTime.UnixNano() / 1000000
+
+ m10 := MustMetric("foo", nil, nil, currentTime)
+ unixMillisApply := dateFormatUnixMillis.Apply(m10)
+ assert.Equal(t, map[string]interface{}{"unix_ms": unixMillisTime}, unixMillisApply[0].Fields(), "should add unix time in ms as field 'unix_ms'")
+}
+
+func TestFieldUnixMicros(t *testing.T) {
+ dateFormatUnixMicros := Date{
+ FieldKey: "unix_us",
+ DateFormat: "unix_us",
+ }
+
+ err := dateFormatUnixMicros.Init()
+ require.NoError(t, err)
+
+ currentTime := time.Now()
+ unixMicrosTime := currentTime.UnixNano() / 1000
+
+ m11 := MustMetric("foo", nil, nil, currentTime)
+ unixMicrosApply := dateFormatUnixMicros.Apply(m11)
+ assert.Equal(t, map[string]interface{}{"unix_us": unixMicrosTime}, unixMicrosApply[0].Fields(), "should add unix time in us as field 'unix_us'")
+}
+
+func TestDateOffset(t *testing.T) {
+ plugin := &Date{
+ TagKey: "hour",
+ DateFormat: "15",
+ DateOffset: internal.Duration{Duration: 2 * time.Hour},
+ }
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ metric := testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(1578603600, 0),
+ )
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "hour": "23",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(1578603600, 0),
+ ),
+ }
+
+ actual := plugin.Apply(metric)
+ testutil.RequireMetricsEqual(t, expected, actual)
+}
diff --git a/plugins/processors/dedup/README.md b/plugins/processors/dedup/README.md
new file mode 100644
index 0000000000000..d0b516c274cf4
--- /dev/null
+++ b/plugins/processors/dedup/README.md
@@ -0,0 +1,24 @@
+# Dedup Processor Plugin
+
+Filter metrics whose field values are exact repetitions of the previous values.
+
+### Configuration
+
+```toml
+[[processors.dedup]]
+ ## Maximum time to suppress output
+ dedup_interval = "600s"
+```
+
+### Example
+
+```diff
+- cpu,cpu=cpu0 time_idle=42i,time_guest=1i
+- cpu,cpu=cpu0 time_idle=42i,time_guest=2i
+- cpu,cpu=cpu0 time_idle=42i,time_guest=2i
+- cpu,cpu=cpu0 time_idle=44i,time_guest=2i
+- cpu,cpu=cpu0 time_idle=44i,time_guest=2i
++ cpu,cpu=cpu0 time_idle=42i,time_guest=1i
++ cpu,cpu=cpu0 time_idle=42i,time_guest=2i
++ cpu,cpu=cpu0 time_idle=44i,time_guest=2i
+```
diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go
new file mode 100644
index 0000000000000..3dd7516a696c2
--- /dev/null
+++ b/plugins/processors/dedup/dedup.go
@@ -0,0 +1,128 @@
+package dedup
+
+import (
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+var sampleConfig = `
+ ## Maximum time to suppress output
+ dedup_interval = "600s"
+`
+
+type Dedup struct {
+ DedupInterval internal.Duration `toml:"dedup_interval"`
+ FlushTime time.Time
+ Cache map[uint64]telegraf.Metric
+}
+
+func (d *Dedup) SampleConfig() string {
+ return sampleConfig
+}
+
+func (d *Dedup) Description() string {
+ return "Filter metrics with repeating field values"
+}
+
+// Remove single item from slice
+func remove(slice []telegraf.Metric, i int) []telegraf.Metric {
+ slice[len(slice)-1], slice[i] = slice[i], slice[len(slice)-1]
+ return slice[:len(slice)-1]
+}
+
+// Remove expired items from cache
+func (d *Dedup) cleanup() {
+ // No need to cleanup cache too often. Lets save some CPU
+ if time.Since(d.FlushTime) < d.DedupInterval.Duration {
+ return
+ }
+ d.FlushTime = time.Now()
+ keep := make(map[uint64]telegraf.Metric, 0)
+ for id, metric := range d.Cache {
+ if time.Since(metric.Time()) < d.DedupInterval.Duration {
+ keep[id] = metric
+ }
+ }
+ d.Cache = keep
+}
+
+// Save item to cache
+func (d *Dedup) save(metric telegraf.Metric, id uint64) {
+ d.Cache[id] = metric.Copy()
+ d.Cache[id].Accept()
+}
+
+// main processing method
+func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
+ for idx, metric := range metrics {
+ id := metric.HashID()
+ m, ok := d.Cache[id]
+
+ // If not in cache then just save it
+ if !ok {
+ d.save(metric, id)
+ continue
+ }
+
+ // If cache item has expired then refresh it
+ if time.Since(m.Time()) >= d.DedupInterval.Duration {
+ d.save(metric, id)
+ continue
+ }
+
+ // For each field compare value with the cached one
+ changed := false
+ added := false
+ sametime := metric.Time() == m.Time()
+ for _, f := range metric.FieldList() {
+ if value, ok := m.GetField(f.Key); ok {
+ if value != f.Value {
+ changed = true
+ break
+ }
+ } else if sametime {
+ // This field isn't in the cached metric but it's the
+ // same series and timestamp. Merge it into the cached
+ // metric.
+
+ // Metrics have a ValueType that applies to all values
+ // in the metric. If an input needs to produce values
+ // with different ValueTypes but the same timestamp,
+ // they have to produce multiple metrics. (See the
+ // system input for an example.) In this case, dedup
+ // ignores the ValueTypes of the metrics and merges
+ // the fields into one metric for the dup check.
+
+ m.AddField(f.Key, f.Value)
+ added = true
+ }
+ }
+ // If any field value has changed then refresh the cache
+ if changed {
+ d.save(metric, id)
+ continue
+ }
+
+ if sametime && added {
+ continue
+ }
+
+ // In any other case remove metric from the output
+ metrics = remove(metrics, idx)
+ }
+ d.cleanup()
+ return metrics
+}
+
+func init() {
+ processors.Add("dedup", func() telegraf.Processor {
+ return &Dedup{
+ DedupInterval: internal.Duration{Duration: 10 * time.Minute},
+ FlushTime: time.Now(),
+ Cache: make(map[uint64]telegraf.Metric),
+ }
+ })
+}
diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go
new file mode 100644
index 0000000000000..cae2bf1a529ed
--- /dev/null
+++ b/plugins/processors/dedup/dedup_test.go
@@ -0,0 +1,194 @@
+package dedup
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/metric"
+)
+
+func createMetric(name string, value int64, when time.Time) telegraf.Metric {
+ m, _ := metric.New(name,
+ map[string]string{"tag": "tag_value"},
+ map[string]interface{}{"value": value},
+ when,
+ )
+ return m
+}
+
+func createDedup(initTime time.Time) Dedup {
+ return Dedup{
+ DedupInterval: internal.Duration{Duration: 10 * time.Minute},
+ FlushTime: initTime,
+ Cache: make(map[uint64]telegraf.Metric),
+ }
+}
+
+func assertCacheRefresh(t *testing.T, proc *Dedup, item telegraf.Metric) {
+ id := item.HashID()
+ name := item.Name()
+ // cache is not empty
+ require.NotEqual(t, 0, len(proc.Cache))
+ // cache has metric with proper id
+ cache, present := proc.Cache[id]
+ require.True(t, present)
+ // cache has metric with proper name
+ require.Equal(t, name, cache.Name())
+ // cached metric has proper field
+ cValue, present := cache.GetField("value")
+ require.True(t, present)
+ iValue, _ := item.GetField("value")
+ require.Equal(t, cValue, iValue)
+ // cached metric has proper timestamp
+ require.Equal(t, cache.Time(), item.Time())
+}
+
+func assertCacheHit(t *testing.T, proc *Dedup, item telegraf.Metric) {
+ id := item.HashID()
+ name := item.Name()
+ // cache is not empty
+ require.NotEqual(t, 0, len(proc.Cache))
+ // cache has metric with proper id
+ cache, present := proc.Cache[id]
+ require.True(t, present)
+ // cache has metric with proper name
+ require.Equal(t, name, cache.Name())
+ // cached metric has proper field
+ cValue, present := cache.GetField("value")
+ require.True(t, present)
+ iValue, _ := item.GetField("value")
+ require.Equal(t, cValue, iValue)
+ // cached metric did NOT change timestamp
+ require.NotEqual(t, cache.Time(), item.Time())
+}
+
+func assertMetricPassed(t *testing.T, target []telegraf.Metric, source telegraf.Metric) {
+ // target is not empty
+ require.NotEqual(t, 0, len(target))
+ // target has metric with proper name
+ require.Equal(t, "m1", target[0].Name())
+ // target metric has proper field
+ tValue, present := target[0].GetField("value")
+ require.True(t, present)
+ sValue, present := source.GetField("value")
+ require.Equal(t, tValue, sValue)
+ // target metric has proper timestamp
+ require.Equal(t, target[0].Time(), source.Time())
+}
+
+func assertMetricSuppressed(t *testing.T, target []telegraf.Metric, source telegraf.Metric) {
+ // target is empty
+ require.Equal(t, 0, len(target))
+}
+
+func TestProcRetainsMetric(t *testing.T) {
+ deduplicate := createDedup(time.Now())
+ source := createMetric("m1", 1, time.Now())
+ target := deduplicate.Apply(source)
+
+ assertCacheRefresh(t, &deduplicate, source)
+ assertMetricPassed(t, target, source)
+}
+
+func TestSuppressRepeatedValue(t *testing.T) {
+ deduplicate := createDedup(time.Now())
+ // Create metric in the past
+ source := createMetric("m1", 1, time.Now().Add(-1*time.Second))
+ target := deduplicate.Apply(source)
+ source = createMetric("m1", 1, time.Now())
+ target = deduplicate.Apply(source)
+
+ assertCacheHit(t, &deduplicate, source)
+ assertMetricSuppressed(t, target, source)
+}
+
+func TestPassUpdatedValue(t *testing.T) {
+ deduplicate := createDedup(time.Now())
+ // Create metric in the past
+ source := createMetric("m1", 1, time.Now().Add(-1*time.Second))
+ target := deduplicate.Apply(source)
+ source = createMetric("m1", 2, time.Now())
+ target = deduplicate.Apply(source)
+
+ assertCacheRefresh(t, &deduplicate, source)
+ assertMetricPassed(t, target, source)
+}
+
+func TestPassAfterCacheExpire(t *testing.T) {
+ deduplicate := createDedup(time.Now())
+ // Create metric in the past
+ source := createMetric("m1", 1, time.Now().Add(-1*time.Hour))
+ target := deduplicate.Apply(source)
+ source = createMetric("m1", 1, time.Now())
+ target = deduplicate.Apply(source)
+
+ assertCacheRefresh(t, &deduplicate, source)
+ assertMetricPassed(t, target, source)
+}
+
+func TestCacheRetainsMetrics(t *testing.T) {
+ deduplicate := createDedup(time.Now())
+ // Create metric in the past 3sec
+ source := createMetric("m1", 1, time.Now().Add(-3*time.Hour))
+ deduplicate.Apply(source)
+ // Create metric in the past 2sec
+ source = createMetric("m1", 1, time.Now().Add(-2*time.Hour))
+ deduplicate.Apply(source)
+ source = createMetric("m1", 1, time.Now())
+ deduplicate.Apply(source)
+
+ assertCacheRefresh(t, &deduplicate, source)
+}
+
+func TestCacheShrink(t *testing.T) {
+ // Time offset is more than 2 * DedupInterval
+ deduplicate := createDedup(time.Now().Add(-2 * time.Hour))
+ // Time offset is more than 1 * DedupInterval
+ source := createMetric("m1", 1, time.Now().Add(-1*time.Hour))
+ deduplicate.Apply(source)
+
+ require.Equal(t, 0, len(deduplicate.Cache))
+}
+
+func TestSameTimestamp(t *testing.T) {
+ now := time.Now()
+ dedup := createDedup(now)
+ var in telegraf.Metric
+ var out []telegraf.Metric
+
+ in, _ = metric.New("metric",
+ map[string]string{"tag": "value"},
+ map[string]interface{}{"foo": 1}, // field
+ now,
+ )
+ out = dedup.Apply(in)
+ require.Equal(t, []telegraf.Metric{in}, out) // pass
+
+ in, _ = metric.New("metric",
+ map[string]string{"tag": "value"},
+ map[string]interface{}{"bar": 1}, // different field
+ now,
+ )
+ out = dedup.Apply(in)
+ require.Equal(t, []telegraf.Metric{in}, out) // pass
+
+ in, _ = metric.New("metric",
+ map[string]string{"tag": "value"},
+ map[string]interface{}{"bar": 2}, // same field different value
+ now,
+ )
+ out = dedup.Apply(in)
+ require.Equal(t, []telegraf.Metric{in}, out) // pass
+
+ in, _ = metric.New("metric",
+ map[string]string{"tag": "value"},
+ map[string]interface{}{"bar": 2}, // same field same value
+ now,
+ )
+ out = dedup.Apply(in)
+ require.Equal(t, []telegraf.Metric{}, out) // drop
+}
diff --git a/plugins/processors/defaults/README.md b/plugins/processors/defaults/README.md
new file mode 100644
index 0000000000000..55a7eeb46e326
--- /dev/null
+++ b/plugins/processors/defaults/README.md
@@ -0,0 +1,44 @@
+# Defaults Processor
+
+The *Defaults* processor allows you to ensure certain fields will always exist with a specified default value on your metric(s).
+
+There are three cases where this processor will insert a configured default field.
+
+1. The field is nil on the incoming metric
+1. The field is not nil, but its value is an empty string.
+1. The field is not nil, but its value is a string of one or more empty spaces.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration
+```toml
+## Set default fields on your metric(s) when they are nil or empty
+[[processors.defaults]]
+
+## This table determines what fields will be inserted in your metric(s)
+ [processors.defaults.fields]
+ field_1 = "bar"
+ time_idle = 0
+ is_error = true
+```
+
+### Example
+Ensure a _status\_code_ field with _N/A_ is inserted in the metric when one is not set in the metric by default:
+
+```toml
+[[processors.defaults]]
+ [processors.defaults.fields]
+ status_code = "N/A"
+```
+
+```diff
+- lb,http_method=GET cache_status=HIT,latency=230
++ lb,http_method=GET cache_status=HIT,latency=230,status_code="N/A"
+```
+
+Ensure an empty string gets replaced by a default:
+
+```diff
+- lb,http_method=GET cache_status=HIT,latency=230,status_code=""
++ lb,http_method=GET cache_status=HIT,latency=230,status_code="N/A"
+```
diff --git a/plugins/processors/defaults/defaults.go b/plugins/processors/defaults/defaults.go
new file mode 100644
index 0000000000000..eaffdf81a2429
--- /dev/null
+++ b/plugins/processors/defaults/defaults.go
@@ -0,0 +1,72 @@
+package defaults
+
+import (
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+ "strings"
+)
+
+const sampleConfig = `
+ ## Ensures a set of fields always exists on your metric(s) with their
+ ## respective default value.
+ ## For any given field pair (key = default), if it's not set, a field
+ ## is set on the metric with the specified default.
+ ##
+ ## A field is considered not set if it is nil on the incoming metric;
+ ## or it is not nil but its value is an empty string or is a string
+ ## of one or more spaces.
+ ## =
+ # [processors.defaults.fields]
+ # field_1 = "bar"
+ # time_idle = 0
+ # is_error = true
+`
+
+// Defaults is a processor for ensuring certain fields always exist
+// on your Metrics with at least a default value.
+type Defaults struct {
+ DefaultFieldsSets map[string]interface{} `toml:"fields"`
+}
+
+// SampleConfig represents a sample toml config for this plugin.
+func (def *Defaults) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description is a brief description of this processor plugin's behaviour.
+func (def *Defaults) Description() string {
+ return "Defaults sets default value(s) for specified fields that are not set on incoming metrics."
+}
+
+// Apply contains the main implementation of this processor.
+// For each metric in 'inputMetrics', it goes over each default pair.
+// If the field in the pair does not exist on the metric, the associated default is added.
+// If the field was found, then, if its value is the empty string or one or more spaces, it is replaced
+// by the associated default.
+func (def *Defaults) Apply(inputMetrics ...telegraf.Metric) []telegraf.Metric {
+ for _, metric := range inputMetrics {
+ for defField, defValue := range def.DefaultFieldsSets {
+ if maybeCurrent, isSet := metric.GetField(defField); !isSet {
+ metric.AddField(defField, defValue)
+ } else if trimmed, isStr := maybeTrimmedString(maybeCurrent); isStr && trimmed == "" {
+ metric.RemoveField(defField)
+ metric.AddField(defField, defValue)
+ }
+ }
+ }
+ return inputMetrics
+}
+
+func maybeTrimmedString(v interface{}) (string, bool) {
+ switch value := v.(type) {
+ case string:
+ return strings.TrimSpace(value), true
+ }
+ return "", false
+}
+
+func init() {
+ processors.Add("defaults", func() telegraf.Processor {
+ return &Defaults{}
+ })
+}
diff --git a/plugins/processors/defaults/defaults_test.go b/plugins/processors/defaults/defaults_test.go
new file mode 100644
index 0000000000000..c0e930fc6b887
--- /dev/null
+++ b/plugins/processors/defaults/defaults_test.go
@@ -0,0 +1,131 @@
+package defaults
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDefaults(t *testing.T) {
+ scenarios := []struct {
+ name string
+ defaults *Defaults
+ input telegraf.Metric
+ expected []telegraf.Metric
+ }{
+ {
+ name: "Test that no values are changed since they are not nil or empty",
+ defaults: &Defaults{
+ DefaultFieldsSets: map[string]interface{}{
+ "usage": 30,
+ "wind_feel": "very chill",
+ "is_dead": true,
+ },
+ },
+ input: testutil.MustMetric(
+ "CPU metrics",
+ map[string]string{},
+ map[string]interface{}{
+ "usage": 45,
+ "wind_feel": "a dragon's breath",
+ "is_dead": false,
+ },
+ time.Unix(0, 0),
+ ),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "CPU metrics",
+ map[string]string{},
+ map[string]interface{}{
+ "usage": 45,
+ "wind_feel": "a dragon's breath",
+ "is_dead": false,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "Tests that the missing fields are set on the metric",
+ defaults: &Defaults{
+ DefaultFieldsSets: map[string]interface{}{
+ "max_clock_gz": 6,
+ "wind_feel": "Unknown",
+ "boost_enabled": false,
+ "variance": 1.2,
+ },
+ },
+ input: testutil.MustMetric(
+ "CPU metrics",
+ map[string]string{},
+ map[string]interface{}{
+ "usage": 45,
+ "temperature": 64,
+ },
+ time.Unix(0, 0),
+ ),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "CPU metrics",
+ map[string]string{},
+ map[string]interface{}{
+ "usage": 45,
+ "temperature": 64,
+ "max_clock_gz": 6,
+ "wind_feel": "Unknown",
+ "boost_enabled": false,
+ "variance": 1.2,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "Tests that set but empty fields are replaced by specified defaults",
+ defaults: &Defaults{
+ DefaultFieldsSets: map[string]interface{}{
+ "max_clock_gz": 6,
+ "wind_feel": "Unknown",
+ "fan_loudness": "Inaudible",
+ "boost_enabled": false,
+ },
+ },
+ input: testutil.MustMetric(
+ "CPU metrics",
+ map[string]string{},
+ map[string]interface{}{
+ "max_clock_gz": "",
+ "wind_feel": " ",
+ "fan_loudness": " ",
+ },
+ time.Unix(0, 0),
+ ),
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "CPU metrics",
+ map[string]string{},
+ map[string]interface{}{
+ "max_clock_gz": 6,
+ "wind_feel": "Unknown",
+ "fan_loudness": "Inaudible",
+ "boost_enabled": false,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+
+ for _, scenario := range scenarios {
+ t.Run(scenario.name, func(t *testing.T) {
+ defaults := scenario.defaults
+
+ resultMetrics := defaults.Apply(scenario.input)
+ assert.Len(t, resultMetrics, 1)
+ testutil.RequireMetricsEqual(t, scenario.expected, resultMetrics)
+ })
+ }
+}
diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md
index 20c6110a18337..72a0556252902 100644
--- a/plugins/processors/enum/README.md
+++ b/plugins/processors/enum/README.md
@@ -1,13 +1,13 @@
# Enum Processor Plugin
-The Enum Processor allows the configuration of value mappings for metric fields.
+The Enum Processor allows the configuration of value mappings for metric tags or fields.
The main use-case for this is to rewrite status codes such as _red_, _amber_ and
-_green_ by numeric values such as 0, 1, 2. The plugin supports string and bool
-types for the field values. Multiple Fields can be configured with separate
-value mappings for each field. Default mapping values can be configured to be
+_green_ by numeric values such as 0, 1, 2. The plugin supports string, int and bool
+types for the field values. Multiple tags or fields can be configured with separate
+value mappings for each. Default mapping values can be configured to be
used for all values, which are not contained in the value_mappings. The
-processor supports explicit configuration of a destination field. By default the
-source field is overwritten.
+processor supports explicit configuration of a destination tag or field. By default the
+source tag or field is overwritten.
### Configuration:
@@ -17,13 +17,16 @@ source field is overwritten.
## Name of the field to map
field = "status"
- ## Destination field to be used for the mapped value. By default the source
- ## field is used, overwriting the original value.
+ ## Name of the tag to map
+ # tag = "status"
+
+ ## Destination tag or field to be used for the mapped value. By default the
+ ## source tag or field is used, overwriting the original value.
dest = "status_code"
## Default value to be used for all values not contained in the mapping
- ## table. When unset, the unmodified value for the field will be used if no
- ## match is found.
+ ## table. When unset and no match is found, the original field will remain
+ ## unmodified and the destination tag or field will not be created.
# default = 0
## Table of mappings
@@ -39,3 +42,9 @@ source field is overwritten.
- xyzzy status="green" 1502489900000000000
+ xyzzy status="green",status_code=1i 1502489900000000000
```
+
+With unknown value and no default set:
+```diff
+- xyzzy status="black" 1502489900000000000
++ xyzzy status="black" 1502489900000000000
+```
diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go
index b08307f0940ad..a96e7d5095bcf 100644
--- a/plugins/processors/enum/enum.go
+++ b/plugins/processors/enum/enum.go
@@ -1,6 +1,7 @@
package enum
import (
+ "fmt"
"strconv"
"github.com/influxdata/telegraf"
@@ -12,9 +13,12 @@ var sampleConfig = `
## Name of the field to map
field = "status"
- ## Destination field to be used for the mapped value. By default the source
- ## field is used, overwriting the original value.
- # dest = "status_code"
+ ## Name of the tag to map
+ # tag = "status"
+
+ ## Destination tag or field to be used for the mapped value. By default the
+ ## source tag or field is used, overwriting the original value.
+ dest = "status_code"
## Default value to be used for all values not contained in the mapping
## table. When unset, the unmodified value for the field will be used if no
@@ -24,7 +28,7 @@ var sampleConfig = `
## Table of mappings
[processors.enum.mapping.value_mappings]
green = 1
- yellow = 2
+ amber = 2
red = 3
`
@@ -33,6 +37,7 @@ type EnumMapper struct {
}
type Mapping struct {
+ Tag string
Field string
Dest string
Default interface{}
@@ -56,10 +61,24 @@ func (mapper *EnumMapper) Apply(in ...telegraf.Metric) []telegraf.Metric {
func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric {
for _, mapping := range mapper.Mappings {
- if originalValue, isPresent := metric.GetField(mapping.Field); isPresent == true {
- if adjustedValue, isString := adjustBoolValue(originalValue).(string); isString == true {
- if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent == true {
- writeField(metric, mapping.getDestination(), mappedValue)
+ if mapping.Field != "" {
+ if originalValue, isPresent := metric.GetField(mapping.Field); isPresent {
+ if adjustedValue, isString := adjustValue(originalValue).(string); isString {
+ if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent {
+ writeField(metric, mapping.getDestination(), mappedValue)
+ }
+ }
+ }
+ }
+ if mapping.Tag != "" {
+ if originalValue, isPresent := metric.GetTag(mapping.Tag); isPresent {
+ if mappedValue, isMappedValuePresent := mapping.mapValue(originalValue); isMappedValuePresent {
+ switch val := mappedValue.(type) {
+ case string:
+ writeTag(metric, mapping.getDestinationTag(), val)
+ default:
+ writeTag(metric, mapping.getDestinationTag(), fmt.Sprintf("%v", val))
+ }
}
}
}
@@ -67,11 +86,17 @@ func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric
return metric
}
-func adjustBoolValue(in interface{}) interface{} {
- if mappedBool, isBool := in.(bool); isBool == true {
- return strconv.FormatBool(mappedBool)
+func adjustValue(in interface{}) interface{} {
+ switch val := in.(type) {
+ case bool:
+ return strconv.FormatBool(val)
+ case int64:
+ return strconv.FormatInt(val, 10)
+ case uint64:
+ return strconv.FormatUint(val, 10)
+ default:
+ return in
}
- return in
}
func (mapping *Mapping) mapValue(original string) (interface{}, bool) {
@@ -91,11 +116,23 @@ func (mapping *Mapping) getDestination() string {
return mapping.Field
}
+func (mapping *Mapping) getDestinationTag() string {
+ if mapping.Dest != "" {
+ return mapping.Dest
+ }
+ return mapping.Tag
+}
+
func writeField(metric telegraf.Metric, name string, value interface{}) {
metric.RemoveField(name)
metric.AddField(name, value)
}
+func writeTag(metric telegraf.Metric, name string, value string) {
+ metric.RemoveTag(name)
+ metric.AddTag(name, value)
+}
+
func init() {
processors.Add("enum", func() telegraf.Processor {
return &EnumMapper{}
diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go
index d8c0e26de6924..de13aad156f5c 100644
--- a/plugins/processors/enum/enum_test.go
+++ b/plugins/processors/enum/enum_test.go
@@ -14,7 +14,9 @@ func createTestMetric() telegraf.Metric {
map[string]string{"tag": "tag_value"},
map[string]interface{}{
"string_value": "test",
- "int_value": int(13),
+ "int_value": int(200),
+ "uint_value": uint(500),
+ "float_value": float64(3.14),
"true_value": true,
},
time.Now(),
@@ -27,12 +29,23 @@ func calculateProcessedValues(mapper EnumMapper, metric telegraf.Metric) map[str
return processed[0].Fields()
}
+func calculateProcessedTags(mapper EnumMapper, metric telegraf.Metric) map[string]string {
+ processed := mapper.Apply(metric)
+ return processed[0].Tags()
+}
+
func assertFieldValue(t *testing.T, expected interface{}, field string, fields map[string]interface{}) {
value, present := fields[field]
assert.True(t, present, "value of field '"+field+"' was not present")
assert.EqualValues(t, expected, value)
}
+func assertTagValue(t *testing.T, expected interface{}, tag string, tags map[string]string) {
+ value, present := tags[tag]
+ assert.True(t, present, "value of tag '"+tag+"' was not present")
+ assert.EqualValues(t, expected, value)
+}
+
func TestRetainsMetric(t *testing.T) {
mapper := EnumMapper{}
source := createTestMetric()
@@ -41,35 +54,66 @@ func TestRetainsMetric(t *testing.T) {
fields := target.Fields()
assertFieldValue(t, "test", "string_value", fields)
- assertFieldValue(t, 13, "int_value", fields)
+ assertFieldValue(t, 200, "int_value", fields)
+ assertFieldValue(t, 500, "uint_value", fields)
assertFieldValue(t, true, "true_value", fields)
assert.Equal(t, "m1", target.Name())
assert.Equal(t, source.Tags(), target.Tags())
assert.Equal(t, source.Time(), target.Time())
}
-func TestMapsSingleStringValue(t *testing.T) {
- mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", ValueMappings: map[string]interface{}{"test": int64(1)}}}}
+func TestMapsSingleStringValueTag(t *testing.T) {
+ mapper := EnumMapper{Mappings: []Mapping{{Tag: "tag", ValueMappings: map[string]interface{}{"tag_value": "valuable"}}}}
- fields := calculateProcessedValues(mapper, createTestMetric())
+ tags := calculateProcessedTags(mapper, createTestMetric())
- assertFieldValue(t, 1, "string_value", fields)
+ assertTagValue(t, "valuable", "tag", tags)
}
-func TestNoFailureOnMappingsOnNonStringValuedFields(t *testing.T) {
- mapper := EnumMapper{Mappings: []Mapping{{Field: "int_value", ValueMappings: map[string]interface{}{"13i": int64(7)}}}}
+func TestNoFailureOnMappingsOnNonSupportedValuedFields(t *testing.T) {
+ mapper := EnumMapper{Mappings: []Mapping{{Field: "float_value", ValueMappings: map[string]interface{}{"3.14": "pi"}}}}
fields := calculateProcessedValues(mapper, createTestMetric())
- assertFieldValue(t, 13, "int_value", fields)
+ assertFieldValue(t, float64(3.14), "float_value", fields)
}
-func TestMapSingleBoolValue(t *testing.T) {
- mapper := EnumMapper{Mappings: []Mapping{{Field: "true_value", ValueMappings: map[string]interface{}{"true": int64(1)}}}}
-
- fields := calculateProcessedValues(mapper, createTestMetric())
-
- assertFieldValue(t, 1, "true_value", fields)
+func TestMappings(t *testing.T) {
+ mappings := []map[string][]interface{}{
+ {
+ "field_name": []interface{}{"string_value"},
+ "target_values": []interface{}{"test", "test", "test", "not_test", "50", "true"},
+ "mapped_values": []interface{}{"test_1", 5, true, "test_1", 10, false},
+ "expected_values": []interface{}{"test_1", 5, true, "test", "test", "test"},
+ },
+ {
+ "field_name": []interface{}{"true_value"},
+ "target_value": []interface{}{"true", "true", "true", "false", "test", "5"},
+ "mapped_value": []interface{}{false, 1, "false", false, false, false},
+ "expected_value": []interface{}{false, 1, "false", true, true, true},
+ },
+ {
+ "field_name": []interface{}{"int_value"},
+ "target_value": []interface{}{"200", "200", "200", "200", "test", "5"},
+ "mapped_value": []interface{}{"http_ok", true, 1, float64(200.001), false, false},
+ "expected_value": []interface{}{"http_ok", true, 1, float64(200.001), 200, 200},
+ },
+ {
+ "field_name": []interface{}{"uint_value"},
+ "target_value": []interface{}{"500", "500", "500", "test", "false", "5"},
+ "mapped_value": []interface{}{"internal_error", 1, false, false, false, false},
+ "expected_value": []interface{}{"internal_error", 1, false, 500, 500, 500},
+ },
+ }
+
+ for _, mapping := range mappings {
+ field_name := mapping["field_name"][0].(string)
+ for index := range mapping["target_value"] {
+ mapper := EnumMapper{Mappings: []Mapping{{Field: field_name, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}}
+ fields := calculateProcessedValues(mapper, createTestMetric())
+ assertFieldValue(t, mapping["expected_value"][index], field_name, fields)
+ }
+ }
}
func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) {
@@ -104,3 +148,14 @@ func TestWritesToDestination(t *testing.T) {
assertFieldValue(t, "test", "string_value", fields)
assertFieldValue(t, 1, "string_code", fields)
}
+
+func TestDoNotWriteToDestinationWithoutDefaultOrDefinedMapping(t *testing.T) {
+ field := "string_code"
+ mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: field, ValueMappings: map[string]interface{}{"other": int64(1)}}}}
+
+ fields := calculateProcessedValues(mapper, createTestMetric())
+
+ assertFieldValue(t, "test", "string_value", fields)
+ _, present := fields[field]
+ assert.False(t, present, "value of field '"+field+"' was present")
+}
diff --git a/plugins/processors/execd/README.md b/plugins/processors/execd/README.md
new file mode 100644
index 0000000000000..79c354bdd4dec
--- /dev/null
+++ b/plugins/processors/execd/README.md
@@ -0,0 +1,113 @@
+# Execd Processor Plugin
+
+The `execd` processor plugin runs an external program as a separate process and
+pipes metrics in to the process's STDIN and reads processed metrics from its STDOUT.
+The programs must accept influx line protocol on standard in (STDIN) and output
+metrics in influx line protocol to standard output (STDOUT).
+
+Program output on standard error is mirrored to the telegraf log.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Caveats
+
+- Metrics with tracking will be considered "delivered" as soon as they are passed
+ to the external process. There is currently no way to match up which metric
+ coming out of the execd process relates to which metric going in (keep in mind
+ that processors can add and drop metrics, and that this is all done
+ asynchronously).
+- it's not currently possible to use a data_format other than "influx", due to
+ the requirement that it is serialize-parse symmetrical and does not lose any
+ critical type data.
+
+### Configuration:
+
+```toml
+[[processor.execd]]
+ ## One program to run as daemon.
+ ## NOTE: process and each argument should each be their own string
+ ## eg: command = ["/path/to/your_program", "arg1", "arg2"]
+ command = ["cat"]
+
+ ## Delay before the process is restarted after an unexpected termination
+ # restart_delay = "10s"
+```
+
+### Example
+
+#### Go daemon example
+
+This go daemon reads a metric from stdin, multiplies the "count" field by 2,
+and writes the metric back out.
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/parsers/influx"
+ "github.com/influxdata/telegraf/plugins/serializers"
+)
+
+func main() {
+ parser := influx.NewStreamParser(os.Stdin)
+ serializer, _ := serializers.NewInfluxSerializer()
+
+ for {
+ metric, err := parser.Next()
+ if err != nil {
+ if err == influx.EOF {
+ return // stream ended
+ }
+ if parseErr, isParseError := err.(*influx.ParseError); isParseError {
+ fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
+ os.Exit(1)
+ }
+ fmt.Fprintf(os.Stderr, "ERR %v\n", err)
+ os.Exit(1)
+ }
+
+ c, found := metric.GetField("count")
+ if !found {
+ fmt.Fprintf(os.Stderr, "metric has no count field\n")
+ os.Exit(1)
+ }
+ switch t := c.(type) {
+ case float64:
+ t *= 2
+ metric.AddField("count", t)
+ case int64:
+ t *= 2
+ metric.AddField("count", t)
+ default:
+ fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c)
+ os.Exit(1)
+ }
+ b, err := serializer.Serialize(metric)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "ERR %v\n", err)
+ os.Exit(1)
+ }
+ fmt.Fprint(os.Stdout, string(b))
+ }
+}
+```
+
+to run it, you'd build the binary using go, eg `go build -o multiplier.exe main.go`
+
+```toml
+[[processors.execd]]
+ command = ["multiplier.exe"]
+```
+
+#### Ruby daemon
+
+- See [Ruby daemon](./examples/multiplier_line_protocol/multiplier_line_protocol.rb)
+
+```toml
+[[processors.execd]]
+ command = ["ruby", "plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb"]
+```
diff --git a/plugins/processors/execd/examples/multiplier_line_protocol/multiplier.conf b/plugins/processors/execd/examples/multiplier_line_protocol/multiplier.conf
new file mode 100644
index 0000000000000..120e04cbb73f1
--- /dev/null
+++ b/plugins/processors/execd/examples/multiplier_line_protocol/multiplier.conf
@@ -0,0 +1,14 @@
+[agent]
+ interval = "10s"
+
+[[inputs.execd]]
+ command = ["ruby", "plugins/inputs/execd/examples/count.rb"]
+
+[[processors.execd]]
+ command = ["ruby", "plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb"]
+
+
+[[outputs.file]]
+ files = ["stdout"]
+ data_format = "influx"
+
diff --git a/plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb b/plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb
new file mode 100644
index 0000000000000..6949a310e8754
--- /dev/null
+++ b/plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb
@@ -0,0 +1,27 @@
+#!/usr/bin/env ruby
+
+loop do
+ # example input: "counter_ruby count=0 1586302128978187000"
+ line = STDIN.readline.chomp
+ # parse out influx line protocol sections with a really simple hand-rolled parser that doesn't support escaping.
+ # for a full line parser in ruby, check out something like the influxdb-lineprotocol-parser gem.
+ parts = line.split(" ")
+ case parts.size
+ when 3
+ measurement, fields, timestamp = parts
+ when 4
+ measurement, tags, fields, timestamp = parts
+ else
+ STDERR.puts "Unable to parse line protocol"
+ exit 1
+ end
+ fields = fields.split(",").map{|t|
+ k,v = t.split("=")
+ if k == "count"
+ v = v.to_i * 2 # multiple count metric by two
+ end
+ "#{k}=#{v}"
+ }.join(",")
+ puts [measurement, tags, fields, timestamp].select{|s| s && s.size != 0 }.join(" ")
+ STDOUT.flush
+end
diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go
new file mode 100644
index 0000000000000..7aeb285a44fc5
--- /dev/null
+++ b/plugins/processors/execd/execd.go
@@ -0,0 +1,163 @@
+package execd
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/internal/process"
+ "github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/plugins/processors"
+ "github.com/influxdata/telegraf/plugins/serializers"
+)
+
+const sampleConfig = `
+ ## Program to run as daemon
+ ## eg: command = ["/path/to/your_program", "arg1", "arg2"]
+ command = ["cat"]
+
+ ## Delay before the process is restarted after an unexpected termination
+ restart_delay = "10s"
+`
+
+type Execd struct {
+ Command []string `toml:"command"`
+ RestartDelay config.Duration `toml:"restart_delay"`
+ Log telegraf.Logger
+
+ parserConfig *parsers.Config
+ parser parsers.Parser
+ serializerConfig *serializers.Config
+ serializer serializers.Serializer
+ acc telegraf.Accumulator
+ process *process.Process
+}
+
+func New() *Execd {
+ return &Execd{
+ RestartDelay: config.Duration(10 * time.Second),
+ parserConfig: &parsers.Config{
+ DataFormat: "influx",
+ },
+ serializerConfig: &serializers.Config{
+ DataFormat: "influx",
+ },
+ }
+}
+
+func (e *Execd) SampleConfig() string {
+ return sampleConfig
+}
+
+func (e *Execd) Description() string {
+ return "Run executable as long-running processor plugin"
+}
+
+func (e *Execd) Start(acc telegraf.Accumulator) error {
+ var err error
+ e.parser, err = parsers.NewParser(e.parserConfig)
+ if err != nil {
+ return fmt.Errorf("error creating parser: %w", err)
+ }
+ e.serializer, err = serializers.NewSerializer(e.serializerConfig)
+ if err != nil {
+ return fmt.Errorf("error creating serializer: %w", err)
+ }
+ e.acc = acc
+
+ e.process, err = process.New(e.Command)
+ if err != nil {
+ return fmt.Errorf("error creating new process: %w", err)
+ }
+ e.process.Log = e.Log
+ e.process.RestartDelay = time.Duration(e.RestartDelay)
+ e.process.ReadStdoutFn = e.cmdReadOut
+ e.process.ReadStderrFn = e.cmdReadErr
+
+ if err = e.process.Start(); err != nil {
+ // if there was only one argument, and it contained spaces, warn the user
+ // that they may have configured it wrong.
+ if len(e.Command) == 1 && strings.Contains(e.Command[0], " ") {
+ e.Log.Warn("The processors.execd Command contained spaces but no arguments. " +
+ "This setting expects the program and arguments as an array of strings, " +
+ "not as a space-delimited string. See the plugin readme for an example.")
+ }
+ return fmt.Errorf("failed to start process %s: %w", e.Command, err)
+ }
+
+ return nil
+}
+
+func (e *Execd) Add(m telegraf.Metric, acc telegraf.Accumulator) error {
+ b, err := e.serializer.Serialize(m)
+ if err != nil {
+ return fmt.Errorf("metric serializing error: %w", err)
+ }
+
+ _, err = e.process.Stdin.Write(b)
+ if err != nil {
+ return fmt.Errorf("error writing to process stdin: %w", err)
+ }
+
+ // We cannot maintain tracking metrics at the moment because input/output
+ // is done asynchronously and we don't have any metric metadata to tie the
+ // output metric back to the original input metric.
+ m.Drop()
+ return nil
+}
+
+func (e *Execd) Stop() error {
+ e.process.Stop()
+ return nil
+}
+
+func (e *Execd) cmdReadOut(out io.Reader) {
+ scanner := bufio.NewScanner(out)
+ scanBuf := make([]byte, 4096)
+ scanner.Buffer(scanBuf, 262144)
+
+ for scanner.Scan() {
+ metrics, err := e.parser.Parse(scanner.Bytes())
+ if err != nil {
+ e.Log.Errorf("Parse error: %s", err)
+ }
+
+ for _, metric := range metrics {
+ e.acc.AddMetric(metric)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ e.Log.Errorf("Error reading stdout: %s", err)
+ }
+}
+
+func (e *Execd) cmdReadErr(out io.Reader) {
+ scanner := bufio.NewScanner(out)
+
+ for scanner.Scan() {
+ e.Log.Errorf("stderr: %q", scanner.Text())
+ }
+
+ if err := scanner.Err(); err != nil {
+ e.Log.Errorf("Error reading stderr: %s", err)
+ }
+}
+
+func (e *Execd) Init() error {
+ if len(e.Command) == 0 {
+ return errors.New("no command specified")
+ }
+ return nil
+}
+
+func init() {
+ processors.AddStreaming("execd", func() telegraf.StreamingProcessor {
+ return New()
+ })
+}
diff --git a/plugins/processors/execd/execd_test.go b/plugins/processors/execd/execd_test.go
new file mode 100644
index 0000000000000..451669ec6a130
--- /dev/null
+++ b/plugins/processors/execd/execd_test.go
@@ -0,0 +1,136 @@
+package execd
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/parsers/influx"
+ "github.com/influxdata/telegraf/plugins/serializers"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestExternalProcessorWorks(t *testing.T) {
+ e := New()
+ e.Log = testutil.Logger{}
+
+ exe, err := os.Executable()
+ require.NoError(t, err)
+ t.Log(exe)
+ e.Command = []string{exe, "-countmultiplier"}
+ e.RestartDelay = config.Duration(5 * time.Second)
+
+ acc := &testutil.Accumulator{}
+
+ require.NoError(t, e.Start(acc))
+
+ now := time.Now()
+ orig := now
+ metrics := []telegraf.Metric{}
+ for i := 0; i < 10; i++ {
+ m, err := metric.New("test",
+ map[string]string{
+ "city": "Toronto",
+ },
+ map[string]interface{}{
+ "population": 6000000,
+ "count": 1,
+ },
+ now)
+ require.NoError(t, err)
+ metrics = append(metrics, m)
+ now = now.Add(1)
+
+ e.Add(m, acc)
+ }
+
+ acc.Wait(1)
+ require.NoError(t, e.Stop())
+ acc.Wait(9)
+
+ metrics = acc.GetTelegrafMetrics()
+ m := metrics[0]
+
+ expected := testutil.MustMetric("test",
+ map[string]string{
+ "city": "Toronto",
+ },
+ map[string]interface{}{
+ "population": 6000000,
+ "count": 2,
+ },
+ orig,
+ )
+ testutil.RequireMetricEqual(t, expected, m)
+
+ metricTime := m.Time().UnixNano()
+
+ // make sure the other 9 are ordered properly
+ for i := 0; i < 9; i++ {
+ m = metrics[i+1]
+ require.EqualValues(t, metricTime+1, m.Time().UnixNano())
+ metricTime = m.Time().UnixNano()
+ }
+}
+
+var countmultiplier = flag.Bool("countmultiplier", false,
+ "if true, act like line input program instead of test")
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if *countmultiplier {
+ runCountMultiplierProgram()
+ os.Exit(0)
+ }
+ code := m.Run()
+ os.Exit(code)
+}
+
+func runCountMultiplierProgram() {
+ parser := influx.NewStreamParser(os.Stdin)
+ serializer, _ := serializers.NewInfluxSerializer()
+
+ for {
+ metric, err := parser.Next()
+ if err != nil {
+ if err == influx.EOF {
+ return // stream ended
+ }
+ if parseErr, isParseError := err.(*influx.ParseError); isParseError {
+ fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
+ os.Exit(1)
+ }
+ fmt.Fprintf(os.Stderr, "ERR %v\n", err)
+ os.Exit(1)
+ }
+
+ c, found := metric.GetField("count")
+ if !found {
+ fmt.Fprintf(os.Stderr, "metric has no count field\n")
+ os.Exit(1)
+ }
+ switch t := c.(type) {
+ case float64:
+ t *= 2
+ metric.AddField("count", t)
+ case int64:
+ t *= 2
+ metric.AddField("count", t)
+ default:
+ fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c)
+ os.Exit(1)
+ }
+ b, err := serializer.Serialize(metric)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "ERR %v\n", err)
+ os.Exit(1)
+ }
+ fmt.Fprint(os.Stdout, string(b))
+ }
+}
diff --git a/plugins/processors/filepath/README.md b/plugins/processors/filepath/README.md
new file mode 100644
index 0000000000000..ab3454dcb0c11
--- /dev/null
+++ b/plugins/processors/filepath/README.md
@@ -0,0 +1,209 @@
+# Filepath Processor Plugin
+
+The `filepath` processor plugin maps certain go functions from [path/filepath](https://golang.org/pkg/path/filepath/)
+onto tag and field values. Values can be modified in place or stored in another key.
+
+Implemented functions are:
+
+* [Base](https://golang.org/pkg/path/filepath/#Base) (accessible through `[[processors.filepath.basename]]`)
+* [Rel](https://golang.org/pkg/path/filepath/#Rel) (accessible through `[[processors.filepath.rel]]`)
+* [Dir](https://golang.org/pkg/path/filepath/#Dir) (accessible through `[[processors.filepath.dir]]`)
+* [Clean](https://golang.org/pkg/path/filepath/#Clean) (accessible through `[[processors.filepath.clean]]`)
+* [ToSlash](https://golang.org/pkg/path/filepath/#ToSlash) (accessible through `[[processors.filepath.toslash]]`)
+
+ On top of that, the plugin provides an extra function to retrieve the final path component without its extension. This
+ function is accessible through the `[[processors.filepath.stem]]` configuration item.
+
+Please note that, in this implementation, these functions are processed in the order that they appear above( except for
+`stem` that is applied in the first place).
+
+Specify the `tag` and/or `field` that you want processed in each section and optionally a `dest` if you want the result
+stored in a new tag or field.
+
+If you plan to apply multiple transformations to the same `tag`/`field`, bear in mind the processing order stated above.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration
+
+```toml
+[[processors.filepath]]
+ ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
+ # [[processors.filepath.basename]]
+ # tag = "path"
+ # dest = "basepath"
+
+ ## Treat the field value as a path and keep all but the last element of path, typically the path's directory
+ # [[processors.filepath.dirname]]
+ # field = "path"
+
+ ## Treat the tag value as a path, converting it to its the last element without its suffix
+ # [[processors.filepath.stem]]
+ # tag = "path"
+
+ ## Treat the tag value as a path, converting it to the shortest path name equivalent
+ ## to path by purely lexical processing
+ # [[processors.filepath.clean]]
+ # tag = "path"
+
+ ## Treat the tag value as a path, converting it to a relative path that is lexically
+ ## equivalent to the source path when joined to 'base_path'
+ # [[processors.filepath.rel]]
+ # tag = "path"
+ # base_path = "/var/log"
+
+ ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
+ ## effect on Windows
+ # [[processors.filepath.toslash]]
+ # tag = "path"
+```
+
+### Considerations
+
+#### Clean
+
+Even though `clean` is provided a standalone function, it is also invoked when using the `rel` and `dirname` functions,
+so there is no need to use it along with them.
+
+That is:
+
+ ```toml
+[[processors.filepath]]
+ [[processors.filepath.dir]]
+ tag = "path"
+ [[processors.filepath.clean]]
+ tag = "path"
+ ```
+
+Is equivalent to:
+
+ ```toml
+[[processors.filepath]]
+ [[processors.filepath.dir]]
+ tag = "path"
+ ```
+
+#### ToSlash
+
+The effects of this function are only noticeable on Windows platforms, because of the underlying golang implementation.
+
+### Examples
+
+#### Basename
+
+```toml
+[[processors.filepath]]
+ [[processors.filepath.basename]]
+ tag = "path"
+```
+
+```diff
+- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
++ my_metric,path="ajob.log" duration_seconds=134 1587920425000000000
+```
+
+#### Dirname
+
+```toml
+[[processors.filepath]]
+ [[processors.filepath.dirname]]
+ field = "path"
+ dest = "folder"
+```
+
+```diff
+- my_metric path="/var/log/batch/ajob.log",duration_seconds=134 1587920425000000000
++ my_metric path="/var/log/batch/ajob.log",folder="/var/log/batch",duration_seconds=134 1587920425000000000
+```
+
+#### Stem
+
+```toml
+[[processors.filepath]]
+ [[processors.filepath.stem]]
+ tag = "path"
+```
+
+```diff
+- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
++ my_metric,path="ajob" duration_seconds=134 1587920425000000000
+```
+
+#### Clean
+
+```toml
+[[processors.filepath]]
+ [[processors.filepath.clean]]
+ tag = "path"
+```
+
+```diff
+- my_metric,path="/var/log/dummy/../batch//ajob.log" duration_seconds=134 1587920425000000000
++ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+```
+
+#### Rel
+
+```toml
+[[processors.filepath]]
+ [[processors.filepath.rel]]
+ tag = "path"
+ base_path = "/var/log"
+```
+
+```diff
+- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
++ my_metric,path="batch/ajob.log" duration_seconds=134 1587920425000000000
+```
+
+#### ToSlash
+
+```toml
+[[processors.filepath]]
+ [[processors.filepath.rel]]
+ tag = "path"
+```
+
+```diff
+- my_metric,path="\var\log\batch\ajob.log" duration_seconds=134 1587920425000000000
++ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+```
+
+### Processing paths from tail plugin
+
+This plugin can be used together with the
+[tail input plugn](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) to make modifications
+to the `path` tag injected for every file.
+
+Scenario:
+
+* A log file `/var/log/myjobs/mysql_backup.log`, containing logs for a job execution. Whenever the job ends, a line is
+written to the log file following this format: `2020-04-05 11:45:21 total time execution: 70 seconds`
+* We want to generate a measurement that captures the duration of the script as a field and includes the `path` as a
+tag
+ * We are interested in the filename without its extensions, since it might be enough information for plotting our
+ execution times in a dashboard
+ * Just in case, we don't want to override the original path (if for some reason we end up having duplicates we might
+ want this information)
+
+For this purpose, we will use the `tail` input plugin, the `grok` parser plugin and the `filepath` processor.
+
+```toml
+[[inputs.tail]]
+ files = ["/var/log/myjobs/**.log"]
+ data_format = "grok"
+ grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} total time execution: %{NUMBER:duration_seconds:int}']
+ name_override = "myjobs"
+
+[[processors.filepath]]
+ [[processors.filepath.stem]]
+ tag = "path"
+ dest = "stempath"
+
+```
+
+The resulting output for a job taking 70 seconds for the mentioned log file would look like:
+
+```text
+myjobs_duration_seconds,host="my-host",path="/var/log/myjobs/mysql_backup.log",stempath="mysql_backup" 70 1587920425000000000
+```
diff --git a/plugins/processors/filepath/filepath.go b/plugins/processors/filepath/filepath.go
new file mode 100644
index 0000000000000..70013de174a9a
--- /dev/null
+++ b/plugins/processors/filepath/filepath.go
@@ -0,0 +1,150 @@
+package filepath
+
+import (
+ "path/filepath"
+ "strings"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+type Options struct {
+ BaseName []BaseOpts `toml:"basename"`
+ DirName []BaseOpts `toml:"dirname"`
+ Stem []BaseOpts
+ Clean []BaseOpts
+ Rel []RelOpts
+ ToSlash []BaseOpts `toml:"toslash"`
+}
+
+type ProcessorFunc func(s string) string
+
+// BaseOpts contains options applicable to every function
+type BaseOpts struct {
+ Field string
+ Tag string
+ Dest string
+}
+
+type RelOpts struct {
+ BaseOpts
+ BasePath string
+}
+
+const sampleConfig = `
+ ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
+ # [[processors.filepath.basename]]
+ # tag = "path"
+ # dest = "basepath"
+
+ ## Treat the field value as a path and keep all but the last element of path, typically the path's directory
+ # [[processors.filepath.dirname]]
+ # field = "path"
+
+ ## Treat the tag value as a path, converting it to its the last element without its suffix
+ # [[processors.filepath.stem]]
+ # tag = "path"
+
+ ## Treat the tag value as a path, converting it to the shortest path name equivalent
+ ## to path by purely lexical processing
+ # [[processors.filepath.clean]]
+ # tag = "path"
+
+ ## Treat the tag value as a path, converting it to a relative path that is lexically
+ ## equivalent to the source path when joined to 'base_path'
+ # [[processors.filepath.rel]]
+ # tag = "path"
+ # base_path = "/var/log"
+
+ ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
+ ## effect on Windows
+ # [[processors.filepath.toslash]]
+ # tag = "path"
+`
+
+func (o *Options) SampleConfig() string {
+ return sampleConfig
+}
+
+func (o *Options) Description() string {
+ return "Performs file path manipulations on tags and fields"
+}
+
+// applyFunc applies the specified function to the metric
+func (o *Options) applyFunc(bo BaseOpts, fn ProcessorFunc, metric telegraf.Metric) {
+ if bo.Tag != "" {
+ if v, ok := metric.GetTag(bo.Tag); ok {
+ targetTag := bo.Tag
+
+ if bo.Dest != "" {
+ targetTag = bo.Dest
+ }
+ metric.AddTag(targetTag, fn(v))
+ }
+ }
+
+ if bo.Field != "" {
+ if v, ok := metric.GetField(bo.Field); ok {
+ targetField := bo.Field
+
+ if bo.Dest != "" {
+ targetField = bo.Dest
+ }
+
+ // Only string fields are considered
+ if v, ok := v.(string); ok {
+ metric.AddField(targetField, fn(v))
+ }
+
+ }
+ }
+}
+
+func stemFilePath(path string) string {
+ return strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
+}
+
+// processMetric processes fields and tag values for a given metric applying the selected transformations
+func (o *Options) processMetric(metric telegraf.Metric) {
+ // Stem
+ for _, v := range o.Stem {
+ o.applyFunc(v, stemFilePath, metric)
+ }
+ // Basename
+ for _, v := range o.BaseName {
+ o.applyFunc(v, filepath.Base, metric)
+ }
+ // Rel
+ for _, v := range o.Rel {
+ o.applyFunc(v.BaseOpts, func(s string) string {
+ relPath, _ := filepath.Rel(v.BasePath, s)
+ return relPath
+ }, metric)
+ }
+ // Dirname
+ for _, v := range o.DirName {
+ o.applyFunc(v, filepath.Dir, metric)
+ }
+ // Clean
+ for _, v := range o.Clean {
+ o.applyFunc(v, filepath.Clean, metric)
+ }
+ // ToSlash
+ for _, v := range o.ToSlash {
+ o.applyFunc(v, filepath.ToSlash, metric)
+ }
+}
+
+func (o *Options) Apply(in ...telegraf.Metric) []telegraf.Metric {
+ for _, m := range in {
+ o.processMetric(m)
+ }
+
+ return in
+}
+
+func init() {
+ processors.Add("filepath", func() telegraf.Processor {
+ return &Options{}
+ })
+}
diff --git a/plugins/processors/filepath/filepath_test.go b/plugins/processors/filepath/filepath_test.go
new file mode 100644
index 0000000000000..a305c4c5c2f29
--- /dev/null
+++ b/plugins/processors/filepath/filepath_test.go
@@ -0,0 +1,70 @@
+// +build !windows
+
+package filepath
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+var samplePath = "/my/test//c/../path/file.log"
+
+func TestOptions_Apply(t *testing.T) {
+ tests := []testCase{
+ {
+ name: "Smoke Test",
+ o: newOptions("/my/test/"),
+ inputMetrics: getSmokeTestInputMetrics(samplePath),
+ expectedMetrics: []telegraf.Metric{
+ testutil.MustMetric(
+ smokeMetricName,
+ map[string]string{
+ "baseTag": "file.log",
+ "dirTag": "/my/test/path",
+ "stemTag": "file",
+ "cleanTag": "/my/test/path/file.log",
+ "relTag": "path/file.log",
+ "slashTag": "/my/test//c/../path/file.log",
+ },
+ map[string]interface{}{
+ "baseField": "file.log",
+ "dirField": "/my/test/path",
+ "stemField": "file",
+ "cleanField": "/my/test/path/file.log",
+ "relField": "path/file.log",
+ "slashField": "/my/test//c/../path/file.log",
+ },
+ time.Now()),
+ },
+ },
+ {
+ name: "Test Dest Option",
+ o: &Options{
+ BaseName: []BaseOpts{
+ {
+ Field: "sourcePath",
+ Tag: "sourcePath",
+ Dest: "basePath",
+ },
+ }},
+ inputMetrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "testMetric",
+ map[string]string{"sourcePath": samplePath},
+ map[string]interface{}{"sourcePath": samplePath},
+ time.Now()),
+ },
+ expectedMetrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "testMetric",
+ map[string]string{"sourcePath": samplePath, "basePath": "file.log"},
+ map[string]interface{}{"sourcePath": samplePath, "basePath": "file.log"},
+ time.Now()),
+ },
+ },
+ }
+ runTestOptionsApply(t, tests)
+}
diff --git a/plugins/processors/filepath/filepath_test_helpers.go b/plugins/processors/filepath/filepath_test_helpers.go
new file mode 100644
index 0000000000000..571730b546fdd
--- /dev/null
+++ b/plugins/processors/filepath/filepath_test_helpers.go
@@ -0,0 +1,100 @@
+package filepath
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+const smokeMetricName = "testmetric"
+
+type testCase struct {
+ name string
+ o *Options
+ inputMetrics []telegraf.Metric
+ expectedMetrics []telegraf.Metric
+}
+
+func newOptions(basePath string) *Options {
+ return &Options{
+ BaseName: []BaseOpts{
+ {
+ Field: "baseField",
+ Tag: "baseTag",
+ },
+ },
+ DirName: []BaseOpts{
+ {
+ Field: "dirField",
+ Tag: "dirTag",
+ },
+ },
+ Stem: []BaseOpts{
+ {
+ Field: "stemField",
+ Tag: "stemTag",
+ },
+ },
+ Clean: []BaseOpts{
+ {
+ Field: "cleanField",
+ Tag: "cleanTag",
+ },
+ },
+ Rel: []RelOpts{
+ {
+ BaseOpts: BaseOpts{
+ Field: "relField",
+ Tag: "relTag",
+ },
+ BasePath: basePath,
+ },
+ },
+ ToSlash: []BaseOpts{
+ {
+ Field: "slashField",
+ Tag: "slashTag",
+ },
+ },
+ }
+}
+
+func getSampleMetricTags(path string) map[string]string {
+ return map[string]string{
+ "baseTag": path,
+ "dirTag": path,
+ "stemTag": path,
+ "cleanTag": path,
+ "relTag": path,
+ "slashTag": path,
+ }
+}
+
+func getSampleMetricFields(path string) map[string]interface{} {
+ return map[string]interface{}{
+ "baseField": path,
+ "dirField": path,
+ "stemField": path,
+ "cleanField": path,
+ "relField": path,
+ "slashField": path,
+ }
+}
+
+func getSmokeTestInputMetrics(path string) []telegraf.Metric {
+ return []telegraf.Metric{
+ testutil.MustMetric(smokeMetricName, getSampleMetricTags(path), getSampleMetricFields(path),
+ time.Now()),
+ }
+}
+
+func runTestOptionsApply(t *testing.T, tests []testCase) {
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := tt.o.Apply(tt.inputMetrics...)
+ testutil.RequireMetricsEqual(t, tt.expectedMetrics, got, testutil.SortMetrics(), testutil.IgnoreTime())
+ })
+ }
+}
diff --git a/plugins/processors/filepath/filepath_windows_test.go b/plugins/processors/filepath/filepath_windows_test.go
new file mode 100644
index 0000000000000..daca33d188663
--- /dev/null
+++ b/plugins/processors/filepath/filepath_windows_test.go
@@ -0,0 +1,43 @@
+package filepath
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+var samplePath = "c:\\my\\test\\\\c\\..\\path\\file.log"
+
+func TestOptions_Apply(t *testing.T) {
+ tests := []testCase{
+ {
+ name: "Smoke Test",
+ o: newOptions("c:\\my\\test\\"),
+ inputMetrics: getSmokeTestInputMetrics(samplePath),
+ expectedMetrics: []telegraf.Metric{
+ testutil.MustMetric(
+ smokeMetricName,
+ map[string]string{
+ "baseTag": "file.log",
+ "dirTag": "c:\\my\\test\\path",
+ "stemTag": "file",
+ "cleanTag": "c:\\my\\test\\path\\file.log",
+ "relTag": "path\\file.log",
+ "slashTag": "c:/my/test//c/../path/file.log",
+ },
+ map[string]interface{}{
+ "baseField": "file.log",
+ "dirField": "c:\\my\\test\\path",
+ "stemField": "file",
+ "cleanField": "c:\\my\\test\\path\\file.log",
+ "relField": "path\\file.log",
+ "slashField": "c:/my/test//c/../path/file.log",
+ },
+ time.Now()),
+ },
+ },
+ }
+ runTestOptionsApply(t, tests)
+}
diff --git a/plugins/processors/ifname/README.md b/plugins/processors/ifname/README.md
new file mode 100644
index 0000000000000..d68899db40a53
--- /dev/null
+++ b/plugins/processors/ifname/README.md
@@ -0,0 +1,82 @@
+# Network Interface Name Processor Plugin
+
+The `ifname` plugin looks up network interface names using SNMP.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration:
+
+```toml
+[[processors.ifname]]
+ ## Name of tag holding the interface number
+ # tag = "ifIndex"
+
+ ## Name of output tag where service name will be added
+ # dest = "ifName"
+
+ ## Name of tag of the SNMP agent to request the interface name from
+ # agent = "agent"
+
+ ## Timeout for each request.
+ # timeout = "5s"
+
+ ## SNMP version; can be 1, 2, or 3.
+ # version = 2
+
+ ## SNMP community string.
+ # community = "public"
+
+ ## Number of retries to attempt.
+ # retries = 3
+
+ ## The GETBULK max-repetitions parameter.
+ # max_repetitions = 10
+
+ ## SNMPv3 authentication and encryption options.
+ ##
+ ## Security Name.
+ # sec_name = "myuser"
+ ## Authentication protocol; one of "MD5", "SHA", or "".
+ # auth_protocol = "MD5"
+ ## Authentication password.
+ # auth_password = "pass"
+ ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+ # sec_level = "authNoPriv"
+ ## Context Name.
+ # context_name = ""
+ ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+ # priv_protocol = ""
+ ## Privacy password used for encrypted messages.
+ # priv_password = ""
+
+ ## max_parallel_lookups is the maximum number of SNMP requests to
+ ## make at the same time.
+ # max_parallel_lookups = 100
+
+ ## ordered controls whether or not the metrics need to stay in the
+ ## same order this plugin received them in. If false, this plugin
+ ## may change the order when data is cached. If you need metrics to
+ ## stay in order set this to true. keeping the metrics ordered may
+ ## be slightly slower
+ # ordered = false
+
+ ## cache_ttl is the amount of time interface names are cached for a
+ ## given agent. After this period elapses if names are needed they
+ ## will be retrieved again.
+ # cache_ttl = "8h"
+```
+
+### Example processing:
+
+Example config:
+
+```toml
+[[processors.ifname]]
+ tag = "ifIndex"
+ dest = "ifName"
+```
+
+```diff
+- foo,ifIndex=2,agent=127.0.0.1 field=123 1502489900000000000
++ foo,ifIndex=2,agent=127.0.0.1,ifName=eth0 field=123 1502489900000000000
+```
diff --git a/plugins/processors/ifname/cache.go b/plugins/processors/ifname/cache.go
new file mode 100644
index 0000000000000..20c821aef2f43
--- /dev/null
+++ b/plugins/processors/ifname/cache.go
@@ -0,0 +1,83 @@
+package ifname
+
+// See https://girai.dev/blog/lru-cache-implementation-in-go/
+
+import (
+ "container/list"
+)
+
+type LRUValType = TTLValType
+
+type hashType map[keyType]*list.Element
+
+type LRUCache struct {
+ cap uint // capacity
+ l *list.List // doubly linked list
+ m hashType // hash table for checking if list node exists
+}
+
+// Pair is the value of a list node.
+type Pair struct {
+ key keyType
+ value LRUValType
+}
+
+// initializes a new LRUCache.
+func NewLRUCache(capacity uint) LRUCache {
+ return LRUCache{
+ cap: capacity,
+ l: new(list.List),
+ m: make(hashType, capacity),
+ }
+}
+
+// Get a list node from the hash map.
+func (c *LRUCache) Get(key keyType) (LRUValType, bool) {
+ // check if list node exists
+ if node, ok := c.m[key]; ok {
+ val := node.Value.(*list.Element).Value.(Pair).value
+ // move node to front
+ c.l.MoveToFront(node)
+ return val, true
+ }
+ return LRUValType{}, false
+}
+
+// Put key and value in the LRUCache
+func (c *LRUCache) Put(key keyType, value LRUValType) {
+ // check if list node exists
+ if node, ok := c.m[key]; ok {
+ // move the node to front
+ c.l.MoveToFront(node)
+ // update the value of a list node
+ node.Value.(*list.Element).Value = Pair{key: key, value: value}
+ } else {
+ // delete the last list node if the list is full
+ if uint(c.l.Len()) == c.cap {
+ // get the key that we want to delete
+ idx := c.l.Back().Value.(*list.Element).Value.(Pair).key
+ // delete the node pointer in the hash map by key
+ delete(c.m, idx)
+ // remove the last list node
+ c.l.Remove(c.l.Back())
+ }
+ // initialize a list node
+ node := &list.Element{
+ Value: Pair{
+ key: key,
+ value: value,
+ },
+ }
+ // push the new list node into the list
+ ptr := c.l.PushFront(node)
+ // save the node pointer in the hash map
+ c.m[key] = ptr
+ }
+}
+
+func (c *LRUCache) Delete(key keyType) {
+ if node, ok := c.m[key]; ok {
+ c.l.Remove(node)
+ delete(c.m, key)
+ }
+}
diff --git a/plugins/processors/ifname/cache_test.go b/plugins/processors/ifname/cache_test.go
new file mode 100644
index 0000000000000..7d11ee29a8a55
--- /dev/null
+++ b/plugins/processors/ifname/cache_test.go
@@ -0,0 +1,23 @@
+package ifname
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestCache(t *testing.T) {
+ c := NewLRUCache(2)
+
+ c.Put("ones", LRUValType{val: nameMap{1: "one"}})
+ twoMap := LRUValType{val: nameMap{2: "two"}}
+ c.Put("twos", twoMap)
+ c.Put("threes", LRUValType{val: nameMap{3: "three"}})
+
+ _, ok := c.Get("ones")
+ require.False(t, ok)
+
+ v, ok := c.Get("twos")
+ require.True(t, ok)
+ require.Equal(t, twoMap, v)
+}
diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go
new file mode 100644
index 0000000000000..a5666bf0030a8
--- /dev/null
+++ b/plugins/processors/ifname/ifname.go
@@ -0,0 +1,403 @@
+package ifname
+
+import (
+ "fmt"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/snmp"
+ si "github.com/influxdata/telegraf/plugins/inputs/snmp"
+ "github.com/influxdata/telegraf/plugins/processors"
+ "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel"
+)
+
+var sampleConfig = `
+ ## Name of tag holding the interface number
+ # tag = "ifIndex"
+
+ ## Name of output tag where service name will be added
+ # dest = "ifName"
+
+ ## Name of tag of the SNMP agent to request the interface name from
+ # agent = "agent"
+
+ ## Timeout for each request.
+ # timeout = "5s"
+
+ ## SNMP version; can be 1, 2, or 3.
+ # version = 2
+
+ ## SNMP community string.
+ # community = "public"
+
+ ## Number of retries to attempt.
+ # retries = 3
+
+ ## The GETBULK max-repetitions parameter.
+ # max_repetitions = 10
+
+ ## SNMPv3 authentication and encryption options.
+ ##
+ ## Security Name.
+ # sec_name = "myuser"
+ ## Authentication protocol; one of "MD5", "SHA", or "".
+ # auth_protocol = "MD5"
+ ## Authentication password.
+ # auth_password = "pass"
+ ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+ # sec_level = "authNoPriv"
+ ## Context Name.
+ # context_name = ""
+ ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+ # priv_protocol = ""
+ ## Privacy password used for encrypted messages.
+ # priv_password = ""
+
+ ## max_parallel_lookups is the maximum number of SNMP requests to
+ ## make at the same time.
+ # max_parallel_lookups = 100
+
+ ## ordered controls whether or not the metrics need to stay in the
+ ## same order this plugin received them in. If false, this plugin
+ ## may change the order when data is cached. If you need metrics to
+ ## stay in order set this to true. keeping the metrics ordered may
+ ## be slightly slower
+ # ordered = false
+
+ ## cache_ttl is the amount of time interface names are cached for a
+ ## given agent. After this period elapses if names are needed they
+ ## will be retrieved again.
+ # cache_ttl = "8h"
+`
+
+type nameMap map[uint64]string
+type keyType = string
+type valType = nameMap
+
+type mapFunc func(agent string) (nameMap, error)
+type makeTableFunc func(string) (*si.Table, error)
+
+type sigMap map[string](chan struct{})
+
+type IfName struct {
+ SourceTag string `toml:"tag"`
+ DestTag string `toml:"dest"`
+ AgentTag string `toml:"agent"`
+
+ snmp.ClientConfig
+
+ CacheSize uint `toml:"max_cache_entries"`
+ MaxParallelLookups int `toml:"max_parallel_lookups"`
+ Ordered bool `toml:"ordered"`
+ CacheTTL config.Duration `toml:"cache_ttl"`
+
+ Log telegraf.Logger `toml:"-"`
+
+ ifTable *si.Table `toml:"-"`
+ ifXTable *si.Table `toml:"-"`
+
+ rwLock sync.RWMutex `toml:"-"`
+ cache *TTLCache `toml:"-"`
+
+ parallel parallel.Parallel `toml:"-"`
+ acc telegraf.Accumulator `toml:"-"`
+
+ getMapRemote mapFunc `toml:"-"`
+ makeTable makeTableFunc `toml:"-"`
+
+ gsBase snmp.GosnmpWrapper `toml:"-"`
+
+ sigs sigMap `toml:"-"`
+}
+
+const minRetry time.Duration = 5 * time.Minute
+
+func (d *IfName) SampleConfig() string {
+ return sampleConfig
+}
+
+func (d *IfName) Description() string {
+ return "Add a tag of the network interface name looked up over SNMP by interface number"
+}
+
+func (d *IfName) Init() error {
+ d.getMapRemote = d.getMapRemoteNoMock
+ d.makeTable = makeTableNoMock
+
+ c := NewTTLCache(time.Duration(d.CacheTTL), d.CacheSize)
+ d.cache = &c
+
+ d.sigs = make(sigMap)
+
+ return nil
+}
+
+func (d *IfName) addTag(metric telegraf.Metric) error {
+ agent, ok := metric.GetTag(d.AgentTag)
+ if !ok {
+ d.Log.Warn("Agent tag missing.")
+ return nil
+ }
+
+ num_s, ok := metric.GetTag(d.SourceTag)
+ if !ok {
+ d.Log.Warn("Source tag missing.")
+ return nil
+ }
+
+ num, err := strconv.ParseUint(num_s, 10, 64)
+ if err != nil {
+ return fmt.Errorf("couldn't parse source tag as uint")
+ }
+
+ firstTime := true
+ for {
+ m, age, err := d.getMap(agent)
+ if err != nil {
+ return fmt.Errorf("couldn't retrieve the table of interface names: %w", err)
+ }
+
+ name, found := m[num]
+ if found {
+ // success
+ metric.AddTag(d.DestTag, name)
+ return nil
+ }
+
+ // We have the agent's interface map but it doesn't contain
+ // the interface we're interested in. If the entry is old
+ // enough, retrieve it from the agent once more.
+ if age < minRetry {
+ return fmt.Errorf("interface number %d isn't in the table of interface names", num)
+ }
+
+ if firstTime {
+ d.invalidate(agent)
+ firstTime = false
+ continue
+ }
+
+ // not found, cache hit, retrying
+ return fmt.Errorf("missing interface but couldn't retrieve table")
+ }
+}
+
+func (d *IfName) invalidate(agent string) {
+ d.rwLock.RLock()
+ d.cache.Delete(agent)
+ d.rwLock.RUnlock()
+}
+
+func (d *IfName) Start(acc telegraf.Accumulator) error {
+ d.acc = acc
+
+ var err error
+ d.gsBase, err = snmp.NewWrapper(d.ClientConfig)
+ if err != nil {
+ return fmt.Errorf("parsing SNMP client config: %w", err)
+ }
+
+ d.ifTable, err = d.makeTable("IF-MIB::ifTable")
+ if err != nil {
+ return fmt.Errorf("looking up ifTable in local MIB: %w", err)
+ }
+ d.ifXTable, err = d.makeTable("IF-MIB::ifXTable")
+ if err != nil {
+ return fmt.Errorf("looking up ifXTable in local MIB: %w", err)
+ }
+
+ fn := func(m telegraf.Metric) []telegraf.Metric {
+ err := d.addTag(m)
+ if err != nil {
+ d.Log.Debugf("Error adding tag %v", err)
+ }
+ return []telegraf.Metric{m}
+ }
+
+ if d.Ordered {
+ d.parallel = parallel.NewOrdered(acc, fn, 10000, d.MaxParallelLookups)
+ } else {
+ d.parallel = parallel.NewUnordered(acc, fn, d.MaxParallelLookups)
+ }
+ return nil
+}
+
+func (d *IfName) Add(metric telegraf.Metric, acc telegraf.Accumulator) error {
+ d.parallel.Enqueue(metric)
+ return nil
+}
+
+func (d *IfName) Stop() error {
+ d.parallel.Stop()
+ return nil
+}
+
+// getMap gets the interface names map either from cache or from the SNMP
+// agent
+func (d *IfName) getMap(agent string) (entry nameMap, age time.Duration, err error) {
+ var sig chan struct{}
+
+ // Check cache
+ d.rwLock.RLock()
+ m, ok, age := d.cache.Get(agent)
+ d.rwLock.RUnlock()
+ if ok {
+ return m, age, nil
+ }
+
+ // Is this the first request for this agent?
+ d.rwLock.Lock()
+ sig, found := d.sigs[agent]
+ if !found {
+ s := make(chan struct{})
+ d.sigs[agent] = s
+ sig = s
+ }
+ d.rwLock.Unlock()
+
+ if found {
+ // This is not the first request. Wait for first to finish.
+ <-sig
+ // Check cache again
+ d.rwLock.RLock()
+ m, ok, age := d.cache.Get(agent)
+ d.rwLock.RUnlock()
+ if ok {
+ return m, age, nil
+ } else {
+ return nil, 0, fmt.Errorf("getting remote table from cache")
+ }
+ }
+
+ // The cache missed and this is the first request for this
+ // agent.
+
+ // Make the SNMP request
+ m, err = d.getMapRemote(agent)
+ if err != nil {
+ //failure. signal without saving to cache
+ d.rwLock.Lock()
+ close(sig)
+ delete(d.sigs, agent)
+ d.rwLock.Unlock()
+
+ return nil, 0, fmt.Errorf("getting remote table: %w", err)
+ }
+
+ // Cache it, then signal any other waiting requests for this agent
+ // and clean up
+ d.rwLock.Lock()
+ d.cache.Put(agent, m)
+ close(sig)
+ delete(d.sigs, agent)
+ d.rwLock.Unlock()
+
+ return m, 0, nil
+}
+
+func (d *IfName) getMapRemoteNoMock(agent string) (nameMap, error) {
+ gs := d.gsBase
+ err := gs.SetAgent(agent)
+ if err != nil {
+ return nil, fmt.Errorf("parsing agent tag: %w", err)
+ }
+
+ err = gs.Connect()
+ if err != nil {
+ return nil, fmt.Errorf("connecting when fetching interface names: %w", err)
+ }
+
+ //try ifXtable and ifName first. if that fails, fall back to
+ //ifTable and ifDescr
+ var m nameMap
+ m, err = buildMap(gs, d.ifXTable, "ifName")
+ if err == nil {
+ return m, nil
+ }
+
+ m, err = buildMap(gs, d.ifTable, "ifDescr")
+ if err == nil {
+ return m, nil
+ }
+
+ return nil, fmt.Errorf("fetching interface names: %w", err)
+}
+
+func init() {
+ processors.AddStreaming("ifname", func() telegraf.StreamingProcessor {
+ return &IfName{
+ SourceTag: "ifIndex",
+ DestTag: "ifName",
+ AgentTag: "agent",
+ CacheSize: 100,
+ MaxParallelLookups: 100,
+ ClientConfig: snmp.ClientConfig{
+ Retries: 3,
+ MaxRepetitions: 10,
+ Timeout: internal.Duration{Duration: 5 * time.Second},
+ Version: 2,
+ Community: "public",
+ },
+ CacheTTL: config.Duration(8 * time.Hour),
+ }
+ })
+}
+
+func makeTableNoMock(tableName string) (*si.Table, error) {
+ var err error
+ tab := si.Table{
+ Oid: tableName,
+ IndexAsTag: true,
+ }
+
+ err = tab.Init()
+ if err != nil {
+ //Init already wraps
+ return nil, err
+ }
+
+ return &tab, nil
+}
+
+func buildMap(gs snmp.GosnmpWrapper, tab *si.Table, column string) (nameMap, error) {
+ var err error
+
+ rtab, err := tab.Build(gs, true)
+ if err != nil {
+ //Build already wraps
+ return nil, err
+ }
+
+ if len(rtab.Rows) == 0 {
+ return nil, fmt.Errorf("empty table")
+ }
+
+ t := make(nameMap)
+ for _, v := range rtab.Rows {
+ i_str, ok := v.Tags["index"]
+ if !ok {
+ //should always have an index tag because the table should
+ //always have IndexAsTag true
+ return nil, fmt.Errorf("no index tag")
+ }
+ i, err := strconv.ParseUint(i_str, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("index tag isn't a uint")
+ }
+ name_if, ok := v.Fields[column]
+ if !ok {
+ return nil, fmt.Errorf("field %s is missing", column)
+ }
+ name, ok := name_if.(string)
+ if !ok {
+ return nil, fmt.Errorf("field %s isn't a string", column)
+ }
+
+ t[i] = name
+ }
+ return t, nil
+}
diff --git a/plugins/processors/ifname/ifname_test.go b/plugins/processors/ifname/ifname_test.go
new file mode 100644
index 0000000000000..85ddc767411c0
--- /dev/null
+++ b/plugins/processors/ifname/ifname_test.go
@@ -0,0 +1,144 @@
+package ifname
+
+import (
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/snmp"
+ si "github.com/influxdata/telegraf/plugins/inputs/snmp"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTable(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+
+ d := IfName{}
+ d.Init()
+ tab, err := d.makeTable("IF-MIB::ifTable")
+ require.NoError(t, err)
+
+ config := snmp.ClientConfig{
+ Version: 2,
+ Timeout: internal.Duration{Duration: 5 * time.Second}, // Doesn't work with 0 timeout
+ }
+ gs, err := snmp.NewWrapper(config)
+ require.NoError(t, err)
+ err = gs.SetAgent("127.0.0.1")
+ require.NoError(t, err)
+
+ err = gs.Connect()
+ require.NoError(t, err)
+
+ // Could use ifIndex but oid index is always the same
+ m, err := buildMap(gs, tab, "ifDescr")
+ require.NoError(t, err)
+ require.NotEmpty(t, m)
+}
+
+func TestIfName(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+ d := IfName{
+ SourceTag: "ifIndex",
+ DestTag: "ifName",
+ AgentTag: "agent",
+ CacheSize: 1000,
+ ClientConfig: snmp.ClientConfig{
+ Version: 2,
+ Timeout: internal.Duration{Duration: 5 * time.Second}, // Doesn't work with 0 timeout
+ },
+ }
+ err := d.Init()
+ require.NoError(t, err)
+
+ acc := testutil.Accumulator{}
+ err = d.Start(&acc)
+
+ require.NoError(t, err)
+
+ m := testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "ifIndex": "1",
+ "agent": "127.0.0.1",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ )
+
+ expected := testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "ifIndex": "1",
+ "agent": "127.0.0.1",
+ "ifName": "lo",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ )
+
+ err = d.addTag(m)
+ require.NoError(t, err)
+
+ testutil.RequireMetricEqual(t, expected, m)
+}
+
+func TestGetMap(t *testing.T) {
+ d := IfName{
+ CacheSize: 1000,
+ CacheTTL: config.Duration(10 * time.Second),
+ }
+
+ // Don't run net-snmp commands to look up table names.
+ d.makeTable = func(agent string) (*si.Table, error) {
+ return &si.Table{}, nil
+ }
+ err := d.Init()
+ require.NoError(t, err)
+
+ expected := nameMap{
+ 1: "ifname1",
+ 2: "ifname2",
+ }
+
+ var remoteCalls int32
+
+ // Mock the snmp transaction
+ d.getMapRemote = func(agent string) (nameMap, error) {
+ atomic.AddInt32(&remoteCalls, 1)
+ return expected, nil
+ }
+ m, age, err := d.getMap("agent")
+ require.NoError(t, err)
+ require.Zero(t, age) // Age is zero when map comes from getMapRemote
+ require.Equal(t, expected, m)
+
+ // Remote call should happen the first time getMap runs
+ require.Equal(t, int32(1), remoteCalls)
+
+ var wg sync.WaitGroup
+ const thMax = 3
+ for th := 0; th < thMax; th++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ m, age, err := d.getMap("agent")
+ require.NoError(t, err)
+ require.NotZero(t, age) // Age is nonzero when map comes from cache
+ require.Equal(t, expected, m)
+ }()
+ }
+
+ wg.Wait()
+
+ // Remote call should not happen subsequent times getMap runs
+ require.Equal(t, int32(1), remoteCalls)
+}
diff --git a/plugins/processors/ifname/ttl_cache.go b/plugins/processors/ifname/ttl_cache.go
new file mode 100644
index 0000000000000..8f9c4ae653499
--- /dev/null
+++ b/plugins/processors/ifname/ttl_cache.go
@@ -0,0 +1,52 @@
+package ifname
+
+import (
+ "time"
+)
+
+type TTLValType struct {
+ time time.Time // when entry was added
+ val valType
+}
+
+type timeFunc func() time.Time
+
+type TTLCache struct {
+ validDuration time.Duration
+ lru LRUCache
+ now timeFunc
+}
+
+func NewTTLCache(valid time.Duration, capacity uint) TTLCache {
+ return TTLCache{
+ lru: NewLRUCache(capacity),
+ validDuration: valid,
+ now: time.Now,
+ }
+}
+
+func (c *TTLCache) Get(key keyType) (valType, bool, time.Duration) {
+ v, ok := c.lru.Get(key)
+ if !ok {
+ return valType{}, false, 0
+ }
+ age := c.now().Sub(v.time)
+ if age < c.validDuration {
+ return v.val, ok, age
+ } else {
+ c.lru.Delete(key)
+ return valType{}, false, 0
+ }
+}
+
+func (c *TTLCache) Put(key keyType, value valType) {
+ v := TTLValType{
+ val: value,
+ time: c.now(),
+ }
+ c.lru.Put(key, v)
+}
+
+func (c *TTLCache) Delete(key keyType) {
+ c.lru.Delete(key)
+}
diff --git a/plugins/processors/ifname/ttl_cache_test.go b/plugins/processors/ifname/ttl_cache_test.go
new file mode 100644
index 0000000000000..8ae57d6df9265
--- /dev/null
+++ b/plugins/processors/ifname/ttl_cache_test.go
@@ -0,0 +1,43 @@
+package ifname
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestTTLCacheExpire(t *testing.T) {
+ c := NewTTLCache(1*time.Second, 100)
+
+ c.now = func() time.Time {
+ return time.Unix(0, 0)
+ }
+
+ c.Put("ones", nameMap{1: "one"})
+ require.Len(t, c.lru.m, 1)
+
+ c.now = func() time.Time {
+ return time.Unix(1, 0)
+ }
+
+ _, ok, _ := c.Get("ones")
+ require.False(t, ok)
+ require.Len(t, c.lru.m, 0)
+ require.Equal(t, c.lru.l.Len(), 0)
+}
+
+func TestTTLCache(t *testing.T) {
+ c := NewTTLCache(1*time.Second, 100)
+
+ c.now = func() time.Time {
+ return time.Unix(0, 0)
+ }
+
+ expected := nameMap{1: "one"}
+ c.Put("ones", expected)
+
+ actual, ok, _ := c.Get("ones")
+ require.True(t, ok)
+ require.Equal(t, expected, actual)
+}
diff --git a/plugins/processors/pivot/README.md b/plugins/processors/pivot/README.md
new file mode 100644
index 0000000000000..b3eb06fd3f7da
--- /dev/null
+++ b/plugins/processors/pivot/README.md
@@ -0,0 +1,30 @@
+# Pivot Processor
+
+You can use the `pivot` processor to rotate single valued metrics into a multi
+field metric. This transformation often results in data that is more easily
+to apply mathematical operators and comparisons between, and flatten into a
+more compact representation for write operations with some output data
+formats.
+
+To perform the reverse operation use the [unpivot] processor.
+
+### Configuration
+
+```toml
+[[processors.pivot]]
+ ## Tag to use for naming the new field.
+ tag_key = "name"
+ ## Field to use as the value of the new field.
+ value_key = "value"
+```
+
+### Example
+
+```diff
+- cpu,cpu=cpu0,name=time_idle value=42i
+- cpu,cpu=cpu0,name=time_user value=43i
++ cpu,cpu=cpu0 time_idle=42i
++ cpu,cpu=cpu0 time_user=43i
+```
+
+[unpivot]: /plugins/processors/unpivot/README.md
diff --git a/plugins/processors/pivot/pivot.go b/plugins/processors/pivot/pivot.go
new file mode 100644
index 0000000000000..b20c7f7580ef7
--- /dev/null
+++ b/plugins/processors/pivot/pivot.go
@@ -0,0 +1,54 @@
+package pivot
+
+import (
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+const (
+ description = "Rotate a single valued metric into a multi field metric"
+ sampleConfig = `
+ ## Tag to use for naming the new field.
+ tag_key = "name"
+ ## Field to use as the value of the new field.
+ value_key = "value"
+`
+)
+
+type Pivot struct {
+ TagKey string `toml:"tag_key"`
+ ValueKey string `toml:"value_key"`
+}
+
+func (p *Pivot) SampleConfig() string {
+ return sampleConfig
+}
+
+func (p *Pivot) Description() string {
+ return description
+}
+
+func (p *Pivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
+ for _, m := range metrics {
+ key, ok := m.GetTag(p.TagKey)
+ if !ok {
+ continue
+ }
+
+ value, ok := m.GetField(p.ValueKey)
+ if !ok {
+ continue
+ }
+
+ m.RemoveTag(p.TagKey)
+ m.RemoveField(p.ValueKey)
+ m.AddField(key, value)
+ }
+ return metrics
+}
+
+func init() {
+ processors.Add("pivot", func() telegraf.Processor {
+ return &Pivot{}
+ })
+}
diff --git a/plugins/processors/pivot/pivot_test.go b/plugins/processors/pivot/pivot_test.go
new file mode 100644
index 0000000000000..34924f8fa246d
--- /dev/null
+++ b/plugins/processors/pivot/pivot_test.go
@@ -0,0 +1,111 @@
+package pivot
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func TestPivot(t *testing.T) {
+ now := time.Now()
+ tests := []struct {
+ name string
+ pivot *Pivot
+ metrics []telegraf.Metric
+ expected []telegraf.Metric
+ }{
+ {
+ name: "simple",
+ pivot: &Pivot{
+ TagKey: "name",
+ ValueKey: "value",
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "name": "idle_time",
+ },
+ map[string]interface{}{
+ "value": int64(42),
+ },
+ now,
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "idle_time": int64(42),
+ },
+ now,
+ ),
+ },
+ },
+ {
+ name: "missing tag",
+ pivot: &Pivot{
+ TagKey: "name",
+ ValueKey: "value",
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "foo": "idle_time",
+ },
+ map[string]interface{}{
+ "value": int64(42),
+ },
+ now,
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "foo": "idle_time",
+ },
+ map[string]interface{}{
+ "value": int64(42),
+ },
+ now,
+ ),
+ },
+ },
+ {
+ name: "missing field",
+ pivot: &Pivot{
+ TagKey: "name",
+ ValueKey: "value",
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "name": "idle_time",
+ },
+ map[string]interface{}{
+ "foo": int64(42),
+ },
+ now,
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "name": "idle_time",
+ },
+ map[string]interface{}{
+ "foo": int64(42),
+ },
+ now,
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := tt.pivot.Apply(tt.metrics...)
+ testutil.RequireMetricsEqual(t, tt.expected, actual)
+ })
+ }
+}
diff --git a/plugins/processors/port_name/README.md b/plugins/processors/port_name/README.md
new file mode 100644
index 0000000000000..ad4e52d6bc187
--- /dev/null
+++ b/plugins/processors/port_name/README.md
@@ -0,0 +1,28 @@
+# Port Name Lookup Processor Plugin
+
+Use the `port_name` processor to convert a tag containing a well-known port number to the registered service name.
+
+Tag can contain a number ("80") or number and protocol separated by slash ("443/tcp"). If protocol is not provided it defaults to tcp but can be changed with the default_protocol setting.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration
+
+```toml
+[[processors.port_name]]
+ ## Name of tag holding the port number
+ # tag = "port"
+
+ ## Name of output tag where service name will be added
+ # dest = "service"
+
+ ## Default tcp or udp
+ # default_protocol = "tcp"
+```
+
+### Example
+
+```diff
+- measurement,port=80 field=123 1560540094000000000
++ measurement,port=80,service=http field=123 1560540094000000000
+```
diff --git a/plugins/processors/port_name/port_name.go b/plugins/processors/port_name/port_name.go
new file mode 100644
index 0000000000000..50c893e60d6dc
--- /dev/null
+++ b/plugins/processors/port_name/port_name.go
@@ -0,0 +1,174 @@
+package portname
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+var sampleConfig = `
+[[processors.port_name]]
+ ## Name of tag holding the port number
+ # tag = "port"
+
+ ## Name of output tag where service name will be added
+ # dest = "service"
+
+ ## Default tcp or udp
+ # default_protocol = "tcp"
+`
+
+type sMap map[string]map[int]string // "https" == services["tcp"][443]
+
+var services sMap
+
+type PortName struct {
+ SourceTag string `toml:"tag"`
+ DestTag string `toml:"dest"`
+ DefaultProtocol string `toml:"default_protocol"`
+
+ Log telegraf.Logger `toml:"-"`
+}
+
+func (d *PortName) SampleConfig() string {
+ return sampleConfig
+}
+
+func (d *PortName) Description() string {
+ return "Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file"
+}
+
+func readServicesFile() {
+ file, err := os.Open(servicesPath())
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ services = readServices(file)
+}
+
+// Read the services file into a map.
+//
+// This function takes a similar approach to parsing as the go
+// standard library (see src/net/port_unix.go in golang source) but
+// maps protocol and port number to service name, not protocol and
+// service to port number.
+func readServices(r io.Reader) sMap {
+ services = make(sMap)
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ // "http 80/tcp www www-http # World Wide Web HTTP"
+ if i := strings.Index(line, "#"); i >= 0 {
+ line = line[:i]
+ }
+ f := strings.Fields(line)
+ if len(f) < 2 {
+ continue
+ }
+ service := f[0] // "http"
+ portProto := f[1] // "80/tcp"
+ portProtoSlice := strings.SplitN(portProto, "/", 2)
+ if len(portProtoSlice) < 2 {
+ continue
+ }
+ port, err := strconv.Atoi(portProtoSlice[0]) // "80"
+ if err != nil || port <= 0 {
+ continue
+ }
+ proto := portProtoSlice[1] // "tcp"
+ proto = strings.ToLower(proto)
+
+ protoMap, ok := services[proto]
+ if !ok {
+ protoMap = make(map[int]string)
+ services[proto] = protoMap
+ }
+ protoMap[port] = service
+ }
+ return services
+}
+
+func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
+ for _, m := range metrics {
+ portProto, ok := m.GetTag(d.SourceTag)
+ if !ok {
+ // Nonexistent tag
+ continue
+ }
+ portProtoSlice := strings.SplitN(portProto, "/", 2)
+ l := len(portProtoSlice)
+
+ if l == 0 {
+ // Empty tag
+ d.Log.Errorf("empty port tag: %v", d.SourceTag)
+ continue
+ }
+
+ var port int
+ if l > 0 {
+ var err error
+ val := portProtoSlice[0]
+ port, err = strconv.Atoi(val)
+ if err != nil {
+ // Can't convert port to string
+ d.Log.Errorf("error converting port to integer: %v", val)
+ continue
+ }
+ }
+
+ proto := d.DefaultProtocol
+ if l > 1 && len(portProtoSlice[1]) > 0 {
+ proto = portProtoSlice[1]
+ }
+ proto = strings.ToLower(proto)
+
+ protoMap, ok := services[proto]
+ if !ok {
+ // Unknown protocol
+ //
+ // Protocol is normally tcp or udp. The services file
+ // normally has entries for both, so our map does too. If
+ // not, it's very likely the source tag or the services
+ // file doesn't make sense.
+ d.Log.Errorf("protocol not found in services map: %v", proto)
+ continue
+ }
+
+ service, ok := protoMap[port]
+ if !ok {
+ // Unknown port
+ //
+ // Not all ports are named so this isn't an error, but
+ // it's helpful to know when debugging.
+ d.Log.Debugf("port not found in services map: %v", port)
+ continue
+ }
+
+ m.AddTag(d.DestTag, service)
+ }
+
+ return metrics
+}
+
+func (h *PortName) Init() error {
+ services = make(sMap)
+ readServicesFile()
+ return nil
+}
+
+func init() {
+ processors.Add("port_name", func() telegraf.Processor {
+ return &PortName{
+ SourceTag: "port",
+ DestTag: "service",
+ DefaultProtocol: "tcp",
+ }
+ })
+}
diff --git a/plugins/processors/port_name/port_name_test.go b/plugins/processors/port_name/port_name_test.go
new file mode 100644
index 0000000000000..b58f95a9eb75a
--- /dev/null
+++ b/plugins/processors/port_name/port_name_test.go
@@ -0,0 +1,261 @@
+package portname
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+var fakeServices = `
+http 80/tcp www # WorldWideWeb HTTP
+https 443/tcp # http protocol over TLS/SSL
+tftp 69/udp`
+
+func TestReadServicesFile(t *testing.T) {
+ readServicesFile()
+ require.NotZero(t, len(services))
+}
+
+func TestFakeServices(t *testing.T) {
+ r := strings.NewReader(fakeServices)
+ m := readServices(r)
+ require.Equal(t, sMap{"tcp": {80: "http", 443: "https"}, "udp": {69: "tftp"}}, m)
+}
+
+func TestTable(t *testing.T) {
+ var tests = []struct {
+ name string
+ tag string
+ dest string
+ prot string
+ input []telegraf.Metric
+ expected []telegraf.Metric
+ }{
+ {
+ name: "ordinary tcp default",
+ tag: "port",
+ dest: "service",
+ prot: "tcp",
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "443",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "443",
+ "service": "https",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "force udp default",
+ tag: "port",
+ dest: "service",
+ prot: "udp",
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "69",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "69",
+ "service": "tftp",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "override default protocol",
+ tag: "port",
+ dest: "service",
+ prot: "foobar",
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "80/tcp",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "80/tcp",
+ "service": "http",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "multiple metrics, multiple protocols",
+ tag: "port",
+ dest: "service",
+ prot: "tcp",
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "80",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "69/udp",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "80",
+ "service": "http",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "69/udp",
+ "service": "tftp",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "rename source and destination tags",
+ tag: "foo",
+ dest: "bar",
+ prot: "tcp",
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "foo": "80",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "foo": "80",
+ "bar": "http",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "unknown port",
+ tag: "port",
+ dest: "service",
+ prot: "tcp",
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "9999",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "9999",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "don't mix up protocols",
+ tag: "port",
+ dest: "service",
+ prot: "udp",
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "80",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "meas",
+ map[string]string{
+ "port": "80",
+ },
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+
+ r := strings.NewReader(fakeServices)
+ services = readServices(r)
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p := PortName{
+ SourceTag: tt.tag,
+ DestTag: tt.dest,
+ DefaultProtocol: tt.prot,
+ Log: testutil.Logger{},
+ }
+
+ actual := p.Apply(tt.input...)
+
+ testutil.RequireMetricsEqual(t, tt.expected, actual)
+ })
+ }
+}
diff --git a/plugins/processors/port_name/services_path.go b/plugins/processors/port_name/services_path.go
new file mode 100644
index 0000000000000..c8cf73d14157c
--- /dev/null
+++ b/plugins/processors/port_name/services_path.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package portname
+
+import (
+ "os"
+ "path/filepath"
+)
+
+func servicesPath() string {
+ return filepath.Join(os.Getenv("WINDIR"), `system32\drivers\etc\services`)
+}
diff --git a/plugins/processors/port_name/services_path_notwindows.go b/plugins/processors/port_name/services_path_notwindows.go
new file mode 100644
index 0000000000000..5097bfa9c6140
--- /dev/null
+++ b/plugins/processors/port_name/services_path_notwindows.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package portname
+
+func servicesPath() string {
+ return "/etc/services"
+}
diff --git a/plugins/processors/regex/README.md b/plugins/processors/regex/README.md
index c9eec037b5161..a6cef82a09142 100644
--- a/plugins/processors/regex/README.md
+++ b/plugins/processors/regex/README.md
@@ -2,6 +2,8 @@
The `regex` plugin transforms tag and field values with regex pattern. If `result_key` parameter is present, it can produce new tags and fields from existing ones.
+For tags transforms, if `append` is set to `true`, it will append the transformation to the existing tag value, instead of overwriting it.
+
### Configuration:
```toml
@@ -14,10 +16,12 @@ The `regex` plugin transforms tag and field values with regex pattern. If `resul
key = "resp_code"
## Regular expression to match on a tag value
pattern = "^(\\d)\\d\\d$"
- ## Pattern for constructing a new value (${1} represents first subgroup)
+ ## Matches of the pattern will be replaced with this string. Use ${1}
+ ## notation to use the text of the first submatch.
replacement = "${1}xx"
[[processors.regex.fields]]
+ ## Field to change
key = "request"
## All the power of the Go regular expressions available here
## For example, named subgroups
diff --git a/plugins/processors/regex/regex.go b/plugins/processors/regex/regex.go
index f73ed06b61cd1..47b53546f4ffe 100644
--- a/plugins/processors/regex/regex.go
+++ b/plugins/processors/regex/regex.go
@@ -18,6 +18,7 @@ type converter struct {
Pattern string
Replacement string
ResultKey string
+ Append bool
}
const sampleConfig = `
@@ -27,14 +28,16 @@ const sampleConfig = `
# key = "resp_code"
# ## Regular expression to match on a tag value
# pattern = "^(\\d)\\d\\d$"
- # ## Pattern for constructing a new value (${1} represents first subgroup)
+ # ## Matches of the pattern will be replaced with this string. Use ${1}
+ # ## notation to use the text of the first submatch.
# replacement = "${1}xx"
# [[processors.regex.fields]]
+ # ## Field to change
# key = "request"
# ## All the power of the Go regular expressions available here
# ## For example, named subgroups
- # pattern = "^/api(?P/[\\w/]+)\\S*"
+ # pattern = "^/api(?P/[\\w/]+)\\S*"
# replacement = "${method}"
# ## If result_key is present, a new field will be created
# ## instead of changing existing field
@@ -68,6 +71,11 @@ func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, converter := range r.Tags {
if value, ok := metric.GetTag(converter.Key); ok {
if key, newValue := r.convert(converter, value); newValue != "" {
+ if converter.Append {
+ if v, ok := metric.GetTag(key); ok {
+ newValue = v + newValue
+ }
+ }
metric.AddTag(key, newValue)
}
}
diff --git a/plugins/processors/regex/regex_test.go b/plugins/processors/regex/regex_test.go
index f16ef7f5c36cb..b0ddf47d08a7b 100644
--- a/plugins/processors/regex/regex_test.go
+++ b/plugins/processors/regex/regex_test.go
@@ -108,6 +108,20 @@ func TestTagConversions(t *testing.T) {
"resp_code": "2xx",
},
},
+ {
+ message: "Should append to existing tag",
+ converter: converter{
+ Key: "verb",
+ Pattern: "^(.*)$",
+ Replacement: " (${1})",
+ ResultKey: "resp_code",
+ Append: true,
+ },
+ expectedTags: map[string]string{
+ "verb": "GET",
+ "resp_code": "200 (GET)",
+ },
+ },
{
message: "Should add new tag",
converter: converter{
diff --git a/plugins/processors/registry.go b/plugins/processors/registry.go
index 592c688f3dc6b..efade2966be60 100644
--- a/plugins/processors/registry.go
+++ b/plugins/processors/registry.go
@@ -3,9 +3,24 @@ package processors
import "github.com/influxdata/telegraf"
type Creator func() telegraf.Processor
+type StreamingCreator func() telegraf.StreamingProcessor
-var Processors = map[string]Creator{}
+// all processors are streaming processors.
+// telegraf.Processor processors are upgraded to telegraf.StreamingProcessor
+var Processors = map[string]StreamingCreator{}
+// Add adds a telegraf.Processor processor
func Add(name string, creator Creator) {
+ Processors[name] = upgradeToStreamingProcessor(creator)
+}
+
+// AddStreaming adds a telegraf.StreamingProcessor streaming processor
+func AddStreaming(name string, creator StreamingCreator) {
Processors[name] = creator
}
+
+func upgradeToStreamingProcessor(oldCreator Creator) StreamingCreator {
+ return func() telegraf.StreamingProcessor {
+ return NewStreamingProcessorFromProcessor(oldCreator())
+ }
+}
diff --git a/plugins/processors/reverse_dns/README.md b/plugins/processors/reverse_dns/README.md
new file mode 100644
index 0000000000000..c8aa0bfdb58e6
--- /dev/null
+++ b/plugins/processors/reverse_dns/README.md
@@ -0,0 +1,74 @@
+# Reverse DNS Processor Plugin
+
+The `reverse_dns` processor does a reverse-dns lookup on tags (or fields) with
+IPs in them.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration:
+
+```toml
+[[processors.reverse_dns]]
+ ## For optimal performance, you may want to limit which metrics are passed to this
+ ## processor. eg:
+ ## namepass = ["my_metric_*"]
+
+ ## cache_ttl is how long the dns entries should stay cached for.
+ ## generally longer is better, but if you expect a large number of diverse lookups
+ ## you'll want to consider memory use.
+ cache_ttl = "24h"
+
+ ## lookup_timeout is how long should you wait for a single dns request to repsond.
+ ## this is also the maximum acceptable latency for a metric travelling through
+ ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
+ ## be passed on unaltered.
+ ## multiple simultaneous resolution requests for the same IP will only make a
+ ## single rDNS request, and they will all wait for the answer for this long.
+ lookup_timeout = "3s"
+
+ ## max_parallel_lookups is the maximum number of dns requests to be in flight
+ ## at the same time. Requesting hitting cached values do not count against this
+ ## total, and neither do mulptiple requests for the same IP.
+ ## It's probably best to keep this number fairly low.
+ max_parallel_lookups = 10
+
+ ## ordered controls whether or not the metrics need to stay in the same order
+ ## this plugin received them in. If false, this plugin will change the order
+ ## with requests hitting cached results moving through immediately and not
+ ## waiting on slower lookups. This may cause issues for you if you are
+ ## depending on the order of metrics staying the same. If so, set this to true.
+ ## keeping the metrics ordered may be slightly slower.
+ ordered = false
+
+ [[processors.reverse_dns.lookup]]
+ ## get the ip from the field "source_ip", and put the result in the field "source_name"
+ field = "source_ip"
+ dest = "source_name"
+
+ [[processors.reverse_dns.lookup]]
+ ## get the ip from the tag "destination_ip", and put the result in the tag
+ ## "destination_name".
+ tag = "destination_ip"
+ dest = "destination_name"
+
+ ## If you would prefer destination_name to be a field instead, you can use a
+ ## processors.converter after this one, specifying the order attribute.
+```
+
+
+
+### Example processing:
+
+example config:
+
+```toml
+[[processors.reverse_dns]]
+ [[processors.reverse_dns.lookup]]
+ tag = "ip"
+ dest = "domain"
+```
+
+```diff
+- ping,ip=8.8.8.8 elapsed=300i 1502489900000000000
++ ping,ip=8.8.8.8,domain=dns.google. elapsed=300i 1502489900000000000
+```
diff --git a/plugins/processors/reverse_dns/parallel/ordered.go b/plugins/processors/reverse_dns/parallel/ordered.go
new file mode 100644
index 0000000000000..763df2db63f6a
--- /dev/null
+++ b/plugins/processors/reverse_dns/parallel/ordered.go
@@ -0,0 +1,89 @@
+package parallel
+
+import (
+ "sync"
+
+ "github.com/influxdata/telegraf"
+)
+
+type Ordered struct {
+ wg sync.WaitGroup
+ fn func(telegraf.Metric) []telegraf.Metric
+
+ // queue of jobs coming in. Workers pick jobs off this queue for processing
+ workerQueue chan job
+
+ // queue of ordered metrics going out
+ queue chan futureMetric
+}
+
+func NewOrdered(
+ acc telegraf.Accumulator,
+ fn func(telegraf.Metric) []telegraf.Metric,
+ orderedQueueSize int,
+ workerCount int,
+) *Ordered {
+ p := &Ordered{
+ fn: fn,
+ workerQueue: make(chan job, workerCount),
+ queue: make(chan futureMetric, orderedQueueSize),
+ }
+ p.startWorkers(workerCount)
+ p.wg.Add(1)
+ go func() {
+ p.readQueue(acc)
+ p.wg.Done()
+ }()
+ return p
+}
+
+func (p *Ordered) Enqueue(metric telegraf.Metric) {
+ future := make(futureMetric)
+ p.queue <- future
+
+ // write the future to the worker pool. Order doesn't matter now because the
+ // outgoing p.queue will enforce order regardless of the order the jobs are
+ // completed in
+ p.workerQueue <- job{
+ future: future,
+ metric: metric,
+ }
+}
+
+func (p *Ordered) readQueue(acc telegraf.Accumulator) {
+ // wait for the response from each worker in order
+ for mCh := range p.queue {
+ // allow each worker to write out multiple metrics
+ for metrics := range mCh {
+ for _, m := range metrics {
+ acc.AddMetric(m)
+ }
+ }
+ }
+}
+
+func (p *Ordered) startWorkers(count int) {
+ p.wg.Add(count)
+ for i := 0; i < count; i++ {
+ go func() {
+ for job := range p.workerQueue {
+ job.future <- p.fn(job.metric)
+ close(job.future)
+ }
+ p.wg.Done()
+ }()
+ }
+}
+
+func (p *Ordered) Stop() {
+ close(p.queue)
+ close(p.workerQueue)
+ p.wg.Wait()
+}
+
+type futureMetric chan []telegraf.Metric
+
+type job struct {
+ future futureMetric
+ metric telegraf.Metric
+}
diff --git a/plugins/processors/reverse_dns/parallel/parallel.go b/plugins/processors/reverse_dns/parallel/parallel.go
new file mode 100644
index 0000000000000..f3ad04c72c717
--- /dev/null
+++ b/plugins/processors/reverse_dns/parallel/parallel.go
@@ -0,0 +1,8 @@
+package parallel
+
+import "github.com/influxdata/telegraf"
+
+type Parallel interface {
+ Enqueue(telegraf.Metric)
+ Stop()
+}
diff --git a/plugins/processors/reverse_dns/parallel/parallel_test.go b/plugins/processors/reverse_dns/parallel/parallel_test.go
new file mode 100644
index 0000000000000..0d2839a24f4cd
--- /dev/null
+++ b/plugins/processors/reverse_dns/parallel/parallel_test.go
@@ -0,0 +1,119 @@
+package parallel_test
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOrderedJobsStayOrdered(t *testing.T) {
+ acc := &testutil.Accumulator{}
+
+ p := parallel.NewOrdered(acc, jobFunc, 10000, 10)
+ now := time.Now()
+ for i := 0; i < 20000; i++ {
+ m, err := metric.New("test",
+ map[string]string{},
+ map[string]interface{}{
+ "val": i,
+ },
+ now,
+ )
+ require.NoError(t, err)
+ now = now.Add(1)
+ p.Enqueue(m)
+ }
+ p.Stop()
+
+ i := 0
+ require.Len(t, acc.Metrics, 20000, fmt.Sprintf("expected 20k metrics but got %d", len(acc.GetTelegrafMetrics())))
+ for _, m := range acc.GetTelegrafMetrics() {
+ v, ok := m.GetField("val")
+ require.True(t, ok)
+ require.EqualValues(t, i, v)
+ i++
+ }
+}
+
+func TestUnorderedJobsDontDropAnyJobs(t *testing.T) {
+ acc := &testutil.Accumulator{}
+
+ p := parallel.NewUnordered(acc, jobFunc, 10)
+
+ now := time.Now()
+
+ expectedTotal := 0
+ for i := 0; i < 20000; i++ {
+ expectedTotal += i
+ m, err := metric.New("test",
+ map[string]string{},
+ map[string]interface{}{
+ "val": i,
+ },
+ now,
+ )
+ require.NoError(t, err)
+ now = now.Add(1)
+ p.Enqueue(m)
+ }
+ p.Stop()
+
+ actualTotal := int64(0)
+ require.Len(t, acc.Metrics, 20000, fmt.Sprintf("expected 20k metrics but got %d", len(acc.GetTelegrafMetrics())))
+ for _, m := range acc.GetTelegrafMetrics() {
+ v, ok := m.GetField("val")
+ require.True(t, ok)
+ actualTotal += v.(int64)
+ }
+ require.EqualValues(t, expectedTotal, actualTotal)
+}
+
+func BenchmarkOrdered(b *testing.B) {
+ acc := &testutil.Accumulator{}
+
+ p := parallel.NewOrdered(acc, jobFunc, 10000, 10)
+
+ m, _ := metric.New("test",
+ map[string]string{},
+ map[string]interface{}{
+ "val": 1,
+ },
+ time.Now(),
+ )
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p.Enqueue(m)
+ }
+ p.Stop()
+}
+
+func BenchmarkUnordered(b *testing.B) {
+ acc := &testutil.Accumulator{}
+
+ p := parallel.NewUnordered(acc, jobFunc, 10)
+
+ m, _ := metric.New("test",
+ map[string]string{},
+ map[string]interface{}{
+ "val": 1,
+ },
+ time.Now(),
+ )
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p.Enqueue(m)
+ }
+ p.Stop()
+}
+
+func jobFunc(m telegraf.Metric) []telegraf.Metric {
+ return []telegraf.Metric{m}
+}
diff --git a/plugins/processors/reverse_dns/parallel/unordered.go b/plugins/processors/reverse_dns/parallel/unordered.go
new file mode 100644
index 0000000000000..eef6e1f7c0061
--- /dev/null
+++ b/plugins/processors/reverse_dns/parallel/unordered.go
@@ -0,0 +1,60 @@
+package parallel
+
+import (
+ "sync"
+
+ "github.com/influxdata/telegraf"
+)
+
+type Unordered struct {
+ wg sync.WaitGroup
+ acc telegraf.Accumulator
+ fn func(telegraf.Metric) []telegraf.Metric
+ inQueue chan telegraf.Metric
+}
+
+func NewUnordered(
+ acc telegraf.Accumulator,
+ fn func(telegraf.Metric) []telegraf.Metric,
+ workerCount int,
+) *Unordered {
+ p := &Unordered{
+ acc: acc,
+ inQueue: make(chan telegraf.Metric, workerCount),
+ fn: fn,
+ }
+
+ // start workers
+ p.wg.Add(1)
+ go func() {
+ p.startWorkers(workerCount)
+ p.wg.Done()
+ }()
+
+ return p
+}
+
+func (p *Unordered) startWorkers(count int) {
+ wg := sync.WaitGroup{}
+ wg.Add(count)
+ for i := 0; i < count; i++ {
+ go func() {
+ for metric := range p.inQueue {
+ for _, m := range p.fn(metric) {
+ p.acc.AddMetric(m)
+ }
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func (p *Unordered) Stop() {
+ close(p.inQueue)
+ p.wg.Wait()
+}
+
+func (p *Unordered) Enqueue(m telegraf.Metric) {
+ p.inQueue <- m
+}
diff --git a/plugins/processors/reverse_dns/rdnscache.go b/plugins/processors/reverse_dns/rdnscache.go
new file mode 100644
index 0000000000000..1d86b5385d218
--- /dev/null
+++ b/plugins/processors/reverse_dns/rdnscache.go
@@ -0,0 +1,319 @@
+package reverse_dns
+
+import (
+ "context"
+ "errors"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sync/semaphore"
+)
+
+const defaultMaxWorkers = 10
+
+var (
+ ErrTimeout = errors.New("request timed out")
+)
+
+// AnyResolver is for the net.Resolver
+type AnyResolver interface {
+ LookupAddr(ctx context.Context, addr string) (names []string, err error)
+}
+
+// ReverseDNSCache is safe to use across multiple goroutines.
+// if multiple goroutines request the same IP at the same time, one of the
+// requests will trigger the lookup and the rest will wait for its response.
+type ReverseDNSCache struct {
+ Resolver AnyResolver
+ stats RDNSCacheStats
+
+ // settings
+ ttl time.Duration
+ lookupTimeout time.Duration
+ maxWorkers int
+
+ // internal
+ rwLock sync.RWMutex
+ sem *semaphore.Weighted
+ cancelCleanupWorker context.CancelFunc
+
+ cache map[string]*dnslookup
+
+ // keep an ordered list of what needs to be worked on and what is due to expire.
+ // We can use this list for both with a job position marker, and by popping items
+ // off the list as they expire. This avoids iterating over the whole map to find
+ // things to do.
+ // As a bonus, we only have to read the first item to know if anything in the
+ // map has expired.
+ // must lock to get access to this.
+ expireList []*dnslookup
+ expireListLock sync.Mutex
+}
+
+type RDNSCacheStats struct {
+ CacheHit uint64
+ CacheMiss uint64
+ CacheExpire uint64
+ RequestsAbandoned uint64
+ RequestsFilled uint64
+}
+
+func NewReverseDNSCache(ttl, lookupTimeout time.Duration, workerPoolSize int) *ReverseDNSCache {
+ if workerPoolSize <= 0 {
+ workerPoolSize = defaultMaxWorkers
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ d := &ReverseDNSCache{
+ ttl: ttl,
+ lookupTimeout: lookupTimeout,
+ cache: map[string]*dnslookup{},
+ expireList: []*dnslookup{},
+ maxWorkers: workerPoolSize,
+ sem: semaphore.NewWeighted(int64(workerPoolSize)),
+ cancelCleanupWorker: cancel,
+ Resolver: net.DefaultResolver,
+ }
+ d.startCleanupWorker(ctx)
+ return d
+}
+
+// dnslookup represents a lookup request/response. It may or may not be answered yet.
+// interested parties register themselves with existing requests or create new ones
+// to get their dns query answered. Answers will be pushed out to callbacks.
+type dnslookup struct {
+ ip string // keep a copy for the expireList.
+ domains []string
+ expiresAt time.Time
+ completed bool
+ callbacks []callbackChannelType
+}
+
+type lookupResult struct {
+ domains []string
+ err error
+}
+
+type callbackChannelType chan lookupResult
+
+// Lookup takes a string representing a parseable ipv4 or ipv6 IP, and blocks
+// until it has resolved to 0-n results, or until its lookup timeout has elapsed.
+// if the lookup timeout elapses, it returns an empty slice.
+func (d *ReverseDNSCache) Lookup(ip string) ([]string, error) {
+ if len(ip) == 0 {
+ return nil, nil
+ }
+ return d.lookup(ip)
+}
+
+func (d *ReverseDNSCache) lookup(ip string) ([]string, error) {
+ // check if the value is cached
+ d.rwLock.RLock()
+ result, found := d.lockedGetFromCache(ip)
+ if found && result.completed && result.expiresAt.After(time.Now()) {
+ defer d.rwLock.RUnlock()
+ atomic.AddUint64(&d.stats.CacheHit, 1)
+ // cache is valid
+ return result.domains, nil
+ }
+ d.rwLock.RUnlock()
+
+ // if it's not cached, kick off a lookup job and subscribe to the result.
+ lookupChan := d.subscribeTo(ip)
+ timer := time.NewTimer(d.lookupTimeout)
+ defer timer.Stop()
+
+ // timer is still necessary even if doLookup respects timeout due to worker
+ // pool starvation.
+ select {
+ case result := <-lookupChan:
+ return result.domains, result.err
+ case <-timer.C:
+ return nil, ErrTimeout
+ }
+}
+
+func (d *ReverseDNSCache) subscribeTo(ip string) callbackChannelType {
+ callback := make(callbackChannelType, 1)
+
+ d.rwLock.Lock()
+ defer d.rwLock.Unlock()
+
+ // confirm it's still not in the cache. This needs to be done under an active lock.
+ result, found := d.lockedGetFromCache(ip)
+ if found {
+ atomic.AddUint64(&d.stats.CacheHit, 1)
+ // has the request been answered since we last checked?
+ if result.completed {
+ // we can return the answer with the channel.
+ callback <- lookupResult{domains: result.domains}
+ return callback
+ }
+ // there's a request but it hasn't been answered yet;
+ // add yourself to the subscribers and return that.
+ result.callbacks = append(result.callbacks, callback)
+ d.lockedSaveToCache(result)
+ return callback
+ }
+
+ atomic.AddUint64(&d.stats.CacheMiss, 1)
+
+ // otherwise we need to register the request
+ l := &dnslookup{
+ ip: ip,
+ expiresAt: time.Now().Add(d.ttl),
+ callbacks: []callbackChannelType{callback},
+ }
+
+ d.lockedSaveToCache(l)
+ go d.doLookup(l.ip)
+ return callback
+}
+
+// lockedGetFromCache fetches from the correct internal ip cache.
+// you MUST first do a read or write lock before calling it, and keep locks around
+// the dnslookup that is returned until you clone it.
+func (d *ReverseDNSCache) lockedGetFromCache(ip string) (lookup *dnslookup, found bool) {
+ lookup, found = d.cache[ip]
+ if found && lookup.expiresAt.Before(time.Now()) {
+ return nil, false
+ }
+ return lookup, found
+}
+
+// lockedSaveToCache stores a lookup in the correct internal ip cache.
+// you MUST first do a write lock before calling it.
+func (d *ReverseDNSCache) lockedSaveToCache(lookup *dnslookup) {
+ if lookup.expiresAt.Before(time.Now()) {
+ return // don't cache.
+ }
+ d.cache[lookup.ip] = lookup
+}
+
+func (d *ReverseDNSCache) startCleanupWorker(ctx context.Context) {
+ go func() {
+ cleanupTick := time.NewTicker(10 * time.Second)
+ for {
+ select {
+ case <-cleanupTick.C:
+ d.cleanup()
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+}
+
+func (d *ReverseDNSCache) doLookup(ip string) {
+ ctx, cancel := context.WithTimeout(context.Background(), d.lookupTimeout)
+ defer cancel()
+ if err := d.sem.Acquire(ctx, 1); err != nil {
+ // lookup timeout
+ d.abandonLookup(ip, ErrTimeout)
+ return
+ }
+ defer d.sem.Release(1)
+
+ names, err := d.Resolver.LookupAddr(ctx, ip)
+ if err != nil {
+ d.abandonLookup(ip, err)
+ return
+ }
+
+ d.rwLock.Lock()
+ lookup, found := d.lockedGetFromCache(ip)
+ if !found {
+ d.rwLock.Unlock()
+ return
+ }
+
+ lookup.domains = names
+ lookup.completed = true
+ lookup.expiresAt = time.Now().Add(d.ttl) // extend the ttl now that we have a reply.
+ callbacks := lookup.callbacks
+ lookup.callbacks = nil
+
+ d.lockedSaveToCache(lookup)
+ d.rwLock.Unlock()
+
+ d.expireListLock.Lock()
+ // add it to the expireList.
+ d.expireList = append(d.expireList, lookup)
+ d.expireListLock.Unlock()
+
+ atomic.AddUint64(&d.stats.RequestsFilled, uint64(len(callbacks)))
+ for _, cb := range callbacks {
+ cb <- lookupResult{domains: names}
+ close(cb)
+ }
+}
+
+func (d *ReverseDNSCache) abandonLookup(ip string, err error) {
+ d.rwLock.Lock()
+ lookup, found := d.lockedGetFromCache(ip)
+ if !found {
+ d.rwLock.Unlock()
+ return
+ }
+
+ callbacks := lookup.callbacks
+ delete(d.cache, lookup.ip)
+ d.rwLock.Unlock()
+ // resolve the remaining callbacks to free the resources.
+ atomic.AddUint64(&d.stats.RequestsAbandoned, uint64(len(callbacks)))
+ for _, cb := range callbacks {
+ cb <- lookupResult{err: err}
+ close(cb)
+ }
+}
+
+func (d *ReverseDNSCache) cleanup() {
+ now := time.Now()
+ d.expireListLock.Lock()
+ if len(d.expireList) == 0 {
+ d.expireListLock.Unlock()
+ return
+ }
+ ipsToDelete := []string{}
+ for i := 0; i < len(d.expireList); i++ {
+ if d.expireList[i].expiresAt.After(now) {
+ break // done. Nothing after this point is expired.
+ }
+ ipsToDelete = append(ipsToDelete, d.expireList[i].ip)
+ }
+ if len(ipsToDelete) == 0 {
+ d.expireListLock.Unlock()
+ return
+ }
+ d.expireList = d.expireList[len(ipsToDelete):]
+ d.expireListLock.Unlock()
+
+ atomic.AddUint64(&d.stats.CacheExpire, uint64(len(ipsToDelete)))
+
+ d.rwLock.Lock()
+ defer d.rwLock.Unlock()
+ for _, ip := range ipsToDelete {
+ delete(d.cache, ip)
+ }
+}
+
+// blockAllWorkers is a test function that eats up all the worker pool space to
+// make sure workers are done running and there's no room to acquire a new worker.
+func (d *ReverseDNSCache) blockAllWorkers() {
+ d.sem.Acquire(context.Background(), int64(d.maxWorkers))
+}
+
+func (d *ReverseDNSCache) Stats() RDNSCacheStats {
+ stats := RDNSCacheStats{}
+ stats.CacheHit = atomic.LoadUint64(&d.stats.CacheHit)
+ stats.CacheMiss = atomic.LoadUint64(&d.stats.CacheMiss)
+ stats.CacheExpire = atomic.LoadUint64(&d.stats.CacheExpire)
+ stats.RequestsAbandoned = atomic.LoadUint64(&d.stats.RequestsAbandoned)
+ stats.RequestsFilled = atomic.LoadUint64(&d.stats.RequestsFilled)
+ return stats
+}
+
+func (d *ReverseDNSCache) Stop() {
+ d.cancelCleanupWorker()
+}
diff --git a/plugins/processors/reverse_dns/rdnscache_test.go b/plugins/processors/reverse_dns/rdnscache_test.go
new file mode 100644
index 0000000000000..e8466c27fd315
--- /dev/null
+++ b/plugins/processors/reverse_dns/rdnscache_test.go
@@ -0,0 +1,136 @@
+package reverse_dns
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestSimpleReverseDNSLookup(t *testing.T) {
+ d := NewReverseDNSCache(60*time.Second, 1*time.Second, -1)
+ defer d.Stop()
+
+ d.Resolver = &localResolver{}
+ answer, err := d.Lookup("127.0.0.1")
+ require.NoError(t, err)
+ require.Equal(t, []string{"localhost"}, answer)
+ d.blockAllWorkers()
+
+ // do another request with no workers available.
+ // it should read from cache instantly.
+ answer, err = d.Lookup("127.0.0.1")
+ require.NoError(t, err)
+ require.Equal(t, []string{"localhost"}, answer)
+
+ require.Len(t, d.cache, 1)
+ require.Len(t, d.expireList, 1)
+ d.cleanup()
+ require.Len(t, d.expireList, 1) // ttl hasn't hit yet.
+
+ stats := d.Stats()
+
+ require.EqualValues(t, 0, stats.CacheExpire)
+ require.EqualValues(t, 1, stats.CacheMiss)
+ require.EqualValues(t, 1, stats.CacheHit)
+ require.EqualValues(t, 1, stats.RequestsFilled)
+ require.EqualValues(t, 0, stats.RequestsAbandoned)
+}
+
+func TestParallelReverseDNSLookup(t *testing.T) {
+ d := NewReverseDNSCache(1*time.Second, 1*time.Second, -1)
+ defer d.Stop()
+
+ d.Resolver = &localResolver{}
+ var answer1 []string
+ var answer2 []string
+ wg := &sync.WaitGroup{}
+ wg.Add(2)
+ go func() {
+ answer, err := d.Lookup("127.0.0.1")
+ require.NoError(t, err)
+ answer1 = answer
+ wg.Done()
+ }()
+ go func() {
+ answer, err := d.Lookup("127.0.0.1")
+ require.NoError(t, err)
+ answer2 = answer
+ wg.Done()
+ }()
+
+ wg.Wait()
+
+ t.Log(answer1)
+ t.Log(answer2)
+
+ require.Equal(t, []string{"localhost"}, answer1)
+ require.Equal(t, []string{"localhost"}, answer2)
+
+ require.Len(t, d.cache, 1)
+
+ stats := d.Stats()
+
+ require.EqualValues(t, 1, stats.CacheMiss)
+ require.EqualValues(t, 1, stats.CacheHit)
+}
+
+func TestUnavailableDNSServerRespectsTimeout(t *testing.T) {
+ d := NewReverseDNSCache(0, 1, -1)
+ defer d.Stop()
+
+ d.Resolver = &timeoutResolver{}
+
+ result, err := d.Lookup("192.153.33.3")
+ require.Error(t, err)
+ require.Equal(t, ErrTimeout, err)
+
+ require.Nil(t, result)
+}
+
+func TestCleanupHappens(t *testing.T) {
+ ttl := 100 * time.Millisecond
+ d := NewReverseDNSCache(ttl, 1*time.Second, -1)
+ defer d.Stop()
+
+ d.Resolver = &localResolver{}
+ _, err := d.Lookup("127.0.0.1")
+ require.NoError(t, err)
+
+ require.Len(t, d.cache, 1)
+
+ time.Sleep(ttl) // wait for cache entry to expire.
+ d.cleanup()
+ require.Len(t, d.expireList, 0)
+
+ stats := d.Stats()
+
+ require.EqualValues(t, 1, stats.CacheExpire)
+ require.EqualValues(t, 1, stats.CacheMiss)
+ require.EqualValues(t, 0, stats.CacheHit)
+}
+
+func TestLookupTimeout(t *testing.T) {
+ d := NewReverseDNSCache(10*time.Second, 10*time.Second, -1)
+ defer d.Stop()
+
+ d.Resolver = &timeoutResolver{}
+ _, err := d.Lookup("127.0.0.1")
+ require.Error(t, err)
+ require.EqualValues(t, 1, d.Stats().RequestsAbandoned)
+}
+
+type timeoutResolver struct{}
+
+func (r *timeoutResolver) LookupAddr(ctx context.Context, addr string) (names []string, err error) {
+ return nil, errors.New("timeout")
+}
+
+type localResolver struct{}
+
+func (r *localResolver) LookupAddr(ctx context.Context, addr string) (names []string, err error) {
+ return []string{"localhost"}, nil
+}
diff --git a/plugins/processors/reverse_dns/reversedns.go b/plugins/processors/reverse_dns/reversedns.go
new file mode 100644
index 0000000000000..bef79a01c92eb
--- /dev/null
+++ b/plugins/processors/reverse_dns/reversedns.go
@@ -0,0 +1,156 @@
+package reverse_dns
+
+import (
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/plugins/processors"
+ "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel"
+)
+
+const sampleConfig = `
+ ## For optimal performance, you may want to limit which metrics are passed to this
+ ## processor. eg:
+ ## namepass = ["my_metric_*"]
+
+ ## cache_ttl is how long the dns entries should stay cached for.
+ ## generally longer is better, but if you expect a large number of diverse lookups
+ ## you'll want to consider memory use.
+ cache_ttl = "24h"
+
+ ## lookup_timeout is how long should you wait for a single dns request to repsond.
+ ## this is also the maximum acceptable latency for a metric travelling through
+ ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
+ ## be passed on unaltered.
+ ## multiple simultaneous resolution requests for the same IP will only make a
+ ## single rDNS request, and they will all wait for the answer for this long.
+ lookup_timeout = "3s"
+
+ ## max_parallel_lookups is the maximum number of dns requests to be in flight
+ ## at the same time. Requesting hitting cached values do not count against this
+ ## total, and neither do mulptiple requests for the same IP.
+ ## It's probably best to keep this number fairly low.
+ max_parallel_lookups = 10
+
+ ## ordered controls whether or not the metrics need to stay in the same order
+ ## this plugin received them in. If false, this plugin will change the order
+ ## with requests hitting cached results moving through immediately and not
+ ## waiting on slower lookups. This may cause issues for you if you are
+ ## depending on the order of metrics staying the same. If so, set this to true.
+ ## keeping the metrics ordered may be slightly slower.
+ ordered = false
+
+ [[processors.reverse_dns.lookup]]
+ ## get the ip from the field "source_ip", and put the result in the field "source_name"
+ field = "source_ip"
+ dest = "source_name"
+
+ [[processors.reverse_dns.lookup]]
+ ## get the ip from the tag "destination_ip", and put the result in the tag
+ ## "destination_name".
+ tag = "destination_ip"
+ dest = "destination_name"
+
+ ## If you would prefer destination_name to be a field instead, you can use a
+ ## processors.converter after this one, specifying the order attribute.
+`
+
+type lookupEntry struct {
+ Tag string `toml:"tag"`
+ Field string `toml:"field"`
+ Dest string `toml:"dest"`
+}
+
+type ReverseDNS struct {
+ reverseDNSCache *ReverseDNSCache
+ acc telegraf.Accumulator
+ parallel parallel.Parallel
+
+ Lookups []lookupEntry `toml:"lookup"`
+ CacheTTL config.Duration `toml:"cache_ttl"`
+ LookupTimeout config.Duration `toml:"lookup_timeout"`
+ MaxParallelLookups int `toml:"max_parallel_lookups"`
+ Ordered bool `toml:"ordered"`
+ Log telegraf.Logger `toml:"-"`
+}
+
+func (r *ReverseDNS) SampleConfig() string {
+ return sampleConfig
+}
+
+func (r *ReverseDNS) Description() string {
+ return "ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name"
+}
+
+func (r *ReverseDNS) Start(acc telegraf.Accumulator) error {
+ r.acc = acc
+ r.reverseDNSCache = NewReverseDNSCache(
+ time.Duration(r.CacheTTL),
+ time.Duration(r.LookupTimeout),
+ r.MaxParallelLookups, // max parallel reverse-dns lookups
+ )
+ if r.Ordered {
+ r.parallel = parallel.NewOrdered(acc, r.asyncAdd, 10000, r.MaxParallelLookups)
+ } else {
+ r.parallel = parallel.NewUnordered(acc, r.asyncAdd, r.MaxParallelLookups)
+ }
+ return nil
+}
+
+func (r *ReverseDNS) Stop() error {
+ r.parallel.Stop()
+ r.reverseDNSCache.Stop()
+ return nil
+}
+
+func (r *ReverseDNS) Add(metric telegraf.Metric, acc telegraf.Accumulator) error {
+ r.parallel.Enqueue(metric)
+ return nil
+}
+
+func (r *ReverseDNS) asyncAdd(metric telegraf.Metric) []telegraf.Metric {
+ for _, lookup := range r.Lookups {
+ if len(lookup.Field) > 0 {
+ if ipField, ok := metric.GetField(lookup.Field); ok {
+ if ip, ok := ipField.(string); ok {
+ result, err := r.reverseDNSCache.Lookup(ip)
+ if err != nil {
+ r.Log.Errorf("lookup error: %v", err)
+ continue
+ }
+ if len(result) > 0 {
+ metric.AddField(lookup.Dest, result[0])
+ }
+ }
+ }
+ }
+ if len(lookup.Tag) > 0 {
+ if ipTag, ok := metric.GetTag(lookup.Tag); ok {
+ result, err := r.reverseDNSCache.Lookup(ipTag)
+ if err != nil {
+ r.Log.Errorf("lookup error: %v", err)
+ continue
+ }
+ if len(result) > 0 {
+ metric.AddTag(lookup.Dest, result[0])
+ }
+ }
+ }
+ }
+ return []telegraf.Metric{metric}
+}
+
+func init() {
+ processors.AddStreaming("reverse_dns", func() telegraf.StreamingProcessor {
+ return newReverseDNS()
+ })
+}
+
+func newReverseDNS() *ReverseDNS {
+ return &ReverseDNS{
+ CacheTTL: config.Duration(24 * time.Hour),
+ LookupTimeout: config.Duration(1 * time.Minute),
+ MaxParallelLookups: 10,
+ }
+}
diff --git a/plugins/processors/reverse_dns/reversedns_test.go b/plugins/processors/reverse_dns/reversedns_test.go
new file mode 100644
index 0000000000000..499dffb77e08b
--- /dev/null
+++ b/plugins/processors/reverse_dns/reversedns_test.go
@@ -0,0 +1,56 @@
+package reverse_dns
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/config"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSimpleReverseLookup(t *testing.T) {
+ now := time.Now()
+ m, _ := metric.New("name", map[string]string{
+ "dest_ip": "8.8.8.8",
+ }, map[string]interface{}{
+ "source_ip": "127.0.0.1",
+ }, now)
+
+ dns := newReverseDNS()
+ dns.Log = &testutil.Logger{}
+ dns.Lookups = []lookupEntry{
+ {
+ Field: "source_ip",
+ Dest: "source_name",
+ },
+ {
+ Tag: "dest_ip",
+ Dest: "dest_name",
+ },
+ }
+ acc := &testutil.Accumulator{}
+ dns.Start(acc)
+ dns.Add(m, acc)
+ dns.Stop()
+ // should be processed now.
+
+ require.Len(t, acc.GetTelegrafMetrics(), 1)
+ processedMetric := acc.GetTelegrafMetrics()[0]
+ f, ok := processedMetric.GetField("source_name")
+ require.True(t, ok)
+ require.EqualValues(t, "localhost", f)
+
+ tag, ok := processedMetric.GetTag("dest_name")
+ require.True(t, ok)
+ require.EqualValues(t, "dns.google.", tag)
+}
+
+func TestLoadingConfig(t *testing.T) {
+ c := config.NewConfig()
+ err := c.LoadConfigData([]byte("[[processors.reverse_dns]]\n" + sampleConfig))
+ require.NoError(t, err)
+
+ require.Len(t, c.Processors, 1)
+}
diff --git a/plugins/processors/s2geo/README.md b/plugins/processors/s2geo/README.md
new file mode 100644
index 0000000000000..d48947fe67c99
--- /dev/null
+++ b/plugins/processors/s2geo/README.md
@@ -0,0 +1,30 @@
+# S2 Geo Processor Plugin
+
+Use the `s2geo` processor to add tag with S2 cell ID token of specified [cell level][cell levels].
+The tag is used in `experimental/geo` Flux package functions.
+The `lat` and `lon` fields values should contain WGS-84 coordinates in decimal degrees.
+
+### Configuration
+
+```toml
+[[processors.s2geo]]
+ ## The name of the lat and lon fields containing WGS-84 latitude and
+ ## longitude in decimal degrees.
+ # lat_field = "lat"
+ # lon_field = "lon"
+
+ ## New tag to create
+ # tag_key = "s2_cell_id"
+
+ ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
+ # cell_level = 9
+```
+
+### Example
+
+```diff
+- mta,area=llir,id=GO505_20_2704,status=1 lat=40.878738,lon=-72.517572 1560540094
++ mta,area=llir,id=GO505_20_2704,status=1,s2_cell_id=89e8ed4 lat=40.878738,lon=-72.517572 1560540094
+```
+
+[cell levels]: https://s2geometry.io/resources/s2cell_statistics.html
diff --git a/plugins/processors/s2geo/s2geo.go b/plugins/processors/s2geo/s2geo.go
new file mode 100644
index 0000000000000..5376a6657aa4c
--- /dev/null
+++ b/plugins/processors/s2geo/s2geo.go
@@ -0,0 +1,78 @@
+package geo
+
+import (
+ "fmt"
+
+ "github.com/golang/geo/s2"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+type Geo struct {
+ LatField string `toml:"lat_field"`
+ LonField string `toml:"lon_field"`
+ TagKey string `toml:"tag_key"`
+ CellLevel int `toml:"cell_level"`
+}
+
+var SampleConfig = `
+ ## The name of the lat and lon fields containing WGS-84 latitude and
+ ## longitude in decimal degrees.
+ # lat_field = "lat"
+ # lon_field = "lon"
+
+ ## New tag to create
+ # tag_key = "s2_cell_id"
+
+ ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
+ # cell_level = 9
+`
+
+func (g *Geo) SampleConfig() string {
+ return SampleConfig
+}
+
+func (g *Geo) Description() string {
+ return "Add the S2 Cell ID as a tag based on latitude and longitude fields"
+}
+
+func (g *Geo) Init() error {
+ if g.CellLevel < 0 || g.CellLevel > 30 {
+ return fmt.Errorf("invalid cell level %d", g.CellLevel)
+ }
+ return nil
+}
+
+func (g *Geo) Apply(in ...telegraf.Metric) []telegraf.Metric {
+ for _, point := range in {
+ var latOk, lonOk bool
+ var lat, lon float64
+ for _, field := range point.FieldList() {
+ switch field.Key {
+ case g.LatField:
+ lat, latOk = field.Value.(float64)
+ case g.LonField:
+ lon, lonOk = field.Value.(float64)
+ }
+ }
+ if latOk && lonOk {
+ cellID := s2.CellIDFromLatLng(s2.LatLngFromDegrees(lat, lon))
+ if cellID.IsValid() {
+ value := cellID.Parent(g.CellLevel).ToToken()
+ point.AddTag(g.TagKey, value)
+ }
+ }
+ }
+ return in
+}
+
+func init() {
+ processors.Add("s2geo", func() telegraf.Processor {
+ return &Geo{
+ LatField: "lat",
+ LonField: "lon",
+ TagKey: "s2_cell_id",
+ CellLevel: 9,
+ }
+ })
+}
diff --git a/plugins/processors/s2geo/s2geo_test.go b/plugins/processors/s2geo/s2geo_test.go
new file mode 100644
index 0000000000000..b06a1a06d3765
--- /dev/null
+++ b/plugins/processors/s2geo/s2geo_test.go
@@ -0,0 +1,55 @@
+package geo
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGeo(t *testing.T) {
+ plugin := &Geo{
+ LatField: "lat",
+ LonField: "lon",
+ TagKey: "s2_cell_id",
+ CellLevel: 11,
+ }
+
+ pluginMostlyDefault := &Geo{
+ CellLevel: 11,
+ }
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ metric := testutil.MustMetric(
+ "mta",
+ map[string]string{},
+ map[string]interface{}{
+ "lat": 40.878738,
+ "lon": -72.517572,
+ },
+ time.Unix(1578603600, 0),
+ )
+
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "mta",
+ map[string]string{
+ "s2_cell_id": "89e8ed4",
+ },
+ map[string]interface{}{
+ "lat": 40.878738,
+ "lon": -72.517572,
+ },
+ time.Unix(1578603600, 0),
+ ),
+ }
+
+ actual := plugin.Apply(metric)
+ testutil.RequireMetricsEqual(t, expected, actual)
+ actual = pluginMostlyDefault.Apply(metric)
+ testutil.RequireMetricsEqual(t, expected, actual)
+}
diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md
new file mode 100644
index 0000000000000..961151a7fc092
--- /dev/null
+++ b/plugins/processors/starlark/README.md
@@ -0,0 +1,165 @@
+# Starlark Processor
+
+The `starlark` processor calls a Starlark function for each matched metric,
+allowing for custom programmatic metric processing.
+
+The Starlark language is a dialect of Python, and will be familiar to those who
+have experience with the Python language. However, there are major [differences](#python-differences).
+Existing Python code is unlikely to work unmodified. The execution environment
+is sandboxed, and it is not possible to do I/O operations such as reading from
+files or sockets.
+
+The **[Starlark specification][]** has details about the syntax and available
+functions.
+
+Telegraf minimum version: Telegraf 1.15.0
+
+### Configuration
+
+```toml
+[[processors.starlark]]
+ ## The Starlark source can be set as a string in this configuration file, or
+ ## by referencing a file containing the script. Only one source or script
+ ## should be set at once.
+
+ ## Source of the Starlark script.
+ source = '''
+def apply(metric):
+ return metric
+'''
+
+ ## File containing a Starlark script.
+ # script = "/usr/local/bin/myscript.star"
+```
+
+### Usage
+
+The Starlark code should contain a function called `apply` that takes a metric as
+its single argument. The function will be called with each metric, and can
+return `None`, a single metric, or a list of metrics.
+
+```python
+def apply(metric):
+ return metric
+```
+
+For a list of available types and functions that can be used in the code, see
+the [Starlark specification][].
+
+In addition to these, the following InfluxDB-specific
+types and functions are exposed to the script.
+
+- **Metric(*name*)**:
+Create a new metric with the given measurement name. The metric will have no
+tags or fields and defaults to the current time.
+
+- **name**:
+The name is a [string][] containing the metric measurement name.
+
+- **tags**:
+A [dict-like][dict] object containing the metric's tags.
+
+- **fields**:
+A [dict-like][dict] object containing the metric's fields. The values may be
+of type int, float, string, or bool.
+
+- **time**:
+The timestamp of the metric as an integer in nanoseconds since the Unix
+epoch.
+
+- **deepcopy(*metric*)**: Make a copy of an existing metric.
+
+### Python Differences
+
+While Starlark is similar to Python, there are important differences to note:
+
+- Starlark has limited support for error handling and no exceptions. If an
+ error occurs the script will immediately end and Telegraf will drop the
+ metric. Check the Telegraf logfile for details about the error.
+
+- It is not possible to import other packages and the Python standard library
+ is not available.
+
+- It is not possible to open files or sockets.
+
+- These common keywords are **not supported** in the Starlark grammar:
+ ```
+ as finally nonlocal
+ assert from raise
+ class global try
+ del import with
+ except is yield
+ ```
+
+### Common Questions
+
+**How can I drop/delete a metric?**
+
+If you don't return the metric it will be deleted. Usually this means the
+function should `return None`.
+
+**How should I make a copy of a metric?**
+
+Use `deepcopy(metric)` to create a copy of the metric.
+
+**How can I return multiple metrics?**
+
+You can return a list of metrics:
+
+```python
+def apply(metric):
+ m2 = deepcopy(metric)
+ return [metric, m2]
+```
+
+**What happens to a tracking metric if an error occurs in the script?**
+
+The metric is marked as undelivered.
+
+**How do I create a new metric?**
+
+Use the `Metric(name)` function and set at least one field.
+
+**What is the fastest way to iterate over tags/fields?**
+
+The fastest way to iterate is to use a for-loop on the tags or fields attribute:
+
+```python
+def apply(metric):
+ for k in metric.tags:
+ pass
+ return metric
+```
+
+When you use this form, it is not possible to modify the tags inside the loop,
+if this is needed you should use one of the `.keys()`, `.values()`, or `.items()` methods:
+
+```python
+def apply(metric):
+ for k, v in metric.tags.items():
+ pass
+ return metric
+```
+
+**How can I save values across multiple calls to the script?**
+
+Telegraf freezes the global scope, which prevents it from being modified.
+Attempting to modify the global scope will fail with an error.
+
+
+### Examples
+
+- [ratio](/plugins/processors/starlark/testdata/ratio.star) - Compute the ratio of two integer fields
+- [rename](/plugins/processors/starlark/testdata/rename.star) - Rename tags or fields using a name mapping.
+- [scale](/plugins/processors/starlark/testdata/scale.star) - Multiply any field by a number
+- [number logic](/plugins/processors/starlark/testdata/number_logic.star) - transform a numerical value to another numerical value
+- [pivot](/plugins/processors/starlark/testdata/pivot.star) - Pivots a key's value to be the key for another key.
+- [value filter](/plugins/processors/starlark/testdata/value_filter.star) - remove a metric based on a field value.
+
+[All examples](/plugins/processors/starlark/testdata) are in the testdata folder.
+
+Open a Pull Request to add any other useful Starlark examples.
+
+[Starlark specification]: https://github.com/google/starlark-go/blob/master/doc/spec.md
+[string]: https://github.com/google/starlark-go/blob/master/doc/spec.md#strings
+[dict]: https://github.com/google/starlark-go/blob/master/doc/spec.md#dictionaries
diff --git a/plugins/processors/starlark/builtins.go b/plugins/processors/starlark/builtins.go
new file mode 100644
index 0000000000000..4eda39b7d8d12
--- /dev/null
+++ b/plugins/processors/starlark/builtins.go
@@ -0,0 +1,261 @@
+package starlark
+
+import (
+ "fmt"
+ "sort"
+ "time"
+
+ "github.com/influxdata/telegraf/metric"
+ "go.starlark.net/starlark"
+)
+
+func newMetric(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ var name starlark.String
+ if err := starlark.UnpackPositionalArgs("Metric", args, kwargs, 1, &name); err != nil {
+ return nil, err
+ }
+
+ m, err := metric.New(string(name), nil, nil, time.Now())
+ if err != nil {
+ return nil, err
+ }
+
+ return &Metric{metric: m}, nil
+}
+
+func deepcopy(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ var sm *Metric
+ if err := starlark.UnpackPositionalArgs("deepcopy", args, kwargs, 1, &sm); err != nil {
+ return nil, err
+ }
+
+ dup := sm.metric.Copy()
+ dup.Drop()
+ return &Metric{metric: dup}, nil
+}
+
+type builtinMethod func(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error)
+
+func builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) {
+ method := methods[name]
+ if method == nil {
+ return starlark.None, fmt.Errorf("no such method '%s'", name)
+ }
+
+ // Allocate a closure over 'method'.
+ impl := func(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ return method(b, args, kwargs)
+ }
+ return starlark.NewBuiltin(name, impl).BindReceiver(recv), nil
+}
+
+func builtinAttrNames(methods map[string]builtinMethod) []string {
+ names := make([]string, 0, len(methods))
+ for name := range methods {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ return names
+}
+
+// nameErr returns an error message of the form "name: msg"
+// where name is b.Name() and msg is a string or error.
+func nameErr(b *starlark.Builtin, msg interface{}) error {
+ return fmt.Errorf("%s: %v", b.Name(), msg)
+}
+
+// --- dictionary methods ---
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear
+func dict_clear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+
+ type HasClear interface {
+ Clear() error
+ }
+ return starlark.None, b.Receiver().(HasClear).Clear()
+}
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop
+func dict_pop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ var k, d starlark.Value
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+
+ type HasDelete interface {
+ Delete(k starlark.Value) (starlark.Value, bool, error)
+ }
+ if v, found, err := b.Receiver().(HasDelete).Delete(k); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) // dict is frozen or key is unhashable
+ } else if found {
+ return v, nil
+ } else if d != nil {
+ return d, nil
+ }
+ return starlark.None, fmt.Errorf("%s: missing key", b.Name())
+}
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·popitem
+func dict_popitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+
+ type HasPopItem interface {
+ PopItem() (starlark.Value, error)
+ }
+ return b.Receiver().(HasPopItem).PopItem()
+}
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get
+func dict_get(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ var key, dflt starlark.Value
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+ if v, ok, err := b.Receiver().(starlark.Mapping).Get(key); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ } else if ok {
+ return v, nil
+ } else if dflt != nil {
+ return dflt, nil
+ }
+ return starlark.None, nil
+}
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault
+func dict_setdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ var key, dflt starlark.Value = nil, starlark.None
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+
+ recv := b.Receiver().(starlark.HasSetKey)
+ v, found, err := recv.Get(key)
+ if err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+ if !found {
+ v = dflt
+ if err := recv.SetKey(key, dflt); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+ }
+ return v, nil
+}
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update
+func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ // Unpack the arguments
+ if len(args) > 1 {
+ return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args))
+ }
+
+ // Get the target
+ dict := b.Receiver().(starlark.HasSetKey)
+
+ if len(args) == 1 {
+ switch updates := args[0].(type) {
+ case starlark.IterableMapping:
+ // Iterate over dict's key/value pairs, not just keys.
+ for _, item := range updates.Items() {
+ if err := dict.SetKey(item[0], item[1]); err != nil {
+ return nil, err // dict is frozen
+ }
+ }
+ default:
+ // all other sequences
+ iter := starlark.Iterate(updates)
+ if iter == nil {
+ return nil, fmt.Errorf("got %s, want iterable", updates.Type())
+ }
+ defer iter.Done()
+ var pair starlark.Value
+ for i := 0; iter.Next(&pair); i++ {
+ iter2 := starlark.Iterate(pair)
+ if iter2 == nil {
+ return nil, fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type())
+
+ }
+ defer iter2.Done()
+ len := starlark.Len(pair)
+ if len < 0 {
+ return nil, fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type())
+ } else if len != 2 {
+ return nil, fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, len)
+ }
+ var k, v starlark.Value
+ iter2.Next(&k)
+ iter2.Next(&v)
+ if err := dict.SetKey(k, v); err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ // Then add the kwargs.
+ before := starlark.Len(dict)
+ for _, pair := range kwargs {
+ if err := dict.SetKey(pair[0], pair[1]); err != nil {
+ return nil, err // dict is frozen
+ }
+ }
+ // In the common case, each kwarg will add another dict entry.
+ // If that's not so, check whether it is because there was a duplicate kwarg.
+ if starlark.Len(dict) < before+len(kwargs) {
+ keys := make(map[starlark.String]bool, len(kwargs))
+ for _, kv := range kwargs {
+ k := kv[0].(starlark.String)
+ if keys[k] {
+ return nil, fmt.Errorf("duplicate keyword arg: %v", k)
+ }
+ keys[k] = true
+ }
+ }
+
+ return starlark.None, nil
+}
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items
+func dict_items(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+ items := b.Receiver().(starlark.IterableMapping).Items()
+ res := make([]starlark.Value, len(items))
+ for i, item := range items {
+ res[i] = item // convert [2]starlark.Value to starlark.Value
+ }
+ return starlark.NewList(res), nil
+}
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys
+func dict_keys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+
+ items := b.Receiver().(starlark.IterableMapping).Items()
+ res := make([]starlark.Value, len(items))
+ for i, item := range items {
+ res[i] = item[0]
+ }
+ return starlark.NewList(res), nil
+}
+
+// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update
+func dict_values(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {
+ return starlark.None, fmt.Errorf("%s: %v", b.Name(), err)
+ }
+ items := b.Receiver().(starlark.IterableMapping).Items()
+ res := make([]starlark.Value, len(items))
+ for i, item := range items {
+ res[i] = item[1]
+ }
+ return starlark.NewList(res), nil
+}
diff --git a/plugins/processors/starlark/field_dict.go b/plugins/processors/starlark/field_dict.go
new file mode 100644
index 0000000000000..e0c0349b617a1
--- /dev/null
+++ b/plugins/processors/starlark/field_dict.go
@@ -0,0 +1,247 @@
+package starlark
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/influxdata/telegraf"
+ "go.starlark.net/starlark"
+)
+
+// FieldDict is a starlark.Value for the metric fields. It is heavily based on the
+// starlark.Dict.
+type FieldDict struct {
+ *Metric
+}
+
+func (d FieldDict) String() string {
+ buf := new(strings.Builder)
+ buf.WriteString("{")
+ sep := ""
+ for _, item := range d.Items() {
+ k, v := item[0], item[1]
+ buf.WriteString(sep)
+ buf.WriteString(k.String())
+ buf.WriteString(": ")
+ buf.WriteString(v.String())
+ sep = ", "
+ }
+ buf.WriteString("}")
+ return buf.String()
+}
+
+func (d FieldDict) Type() string {
+ return "Fields"
+}
+
+func (d FieldDict) Freeze() {
+ d.frozen = true
+}
+
+func (d FieldDict) Truth() starlark.Bool {
+ return len(d.metric.FieldList()) != 0
+}
+
+func (d FieldDict) Hash() (uint32, error) {
+ return 0, errors.New("not hashable")
+}
+
+// AttrNames implements the starlark.HasAttrs interface.
+func (d FieldDict) AttrNames() []string {
+ return builtinAttrNames(FieldDictMethods)
+}
+
+// Attr implements the starlark.HasAttrs interface.
+func (d FieldDict) Attr(name string) (starlark.Value, error) {
+ return builtinAttr(d, name, FieldDictMethods)
+}
+
+var FieldDictMethods = map[string]builtinMethod{
+ "clear": dict_clear,
+ "get": dict_get,
+ "items": dict_items,
+ "keys": dict_keys,
+ "pop": dict_pop,
+ "popitem": dict_popitem,
+ "setdefault": dict_setdefault,
+ "update": dict_update,
+ "values": dict_values,
+}
+
+// Get implements the starlark.Mapping interface.
+func (d FieldDict) Get(key starlark.Value) (v starlark.Value, found bool, err error) {
+ if k, ok := key.(starlark.String); ok {
+ gv, found := d.metric.GetField(k.GoString())
+ if !found {
+ return starlark.None, false, nil
+ }
+
+ v, err := asStarlarkValue(gv)
+ if err != nil {
+ return starlark.None, false, err
+ }
+ return v, true, nil
+ }
+
+ return starlark.None, false, errors.New("key must be of type 'str'")
+}
+
+// SetKey implements the starlark.HasSetKey interface to support map update
+// using x[k]=v syntax, like a dictionary.
+func (d FieldDict) SetKey(k, v starlark.Value) error {
+ if d.fieldIterCount > 0 {
+ return fmt.Errorf("cannot insert during iteration")
+ }
+
+ key, ok := k.(starlark.String)
+ if !ok {
+ return errors.New("field key must be of type 'str'")
+ }
+
+ gv, err := asGoValue(v)
+ if err != nil {
+ return err
+ }
+
+ d.metric.AddField(key.GoString(), gv)
+ return nil
+}
+
+// Items implements the starlark.IterableMapping interface.
+func (d FieldDict) Items() []starlark.Tuple {
+ items := make([]starlark.Tuple, 0, len(d.metric.FieldList()))
+ for _, field := range d.metric.FieldList() {
+ key := starlark.String(field.Key)
+ sv, err := asStarlarkValue(field.Value)
+ if err != nil {
+ continue
+ }
+ pair := starlark.Tuple{key, sv}
+ items = append(items, pair)
+ }
+ return items
+}
+
+func (d FieldDict) Clear() error {
+ if d.fieldIterCount > 0 {
+ return fmt.Errorf("cannot delete during iteration")
+ }
+
+ keys := make([]string, 0, len(d.metric.FieldList()))
+ for _, field := range d.metric.FieldList() {
+ keys = append(keys, field.Key)
+ }
+
+ for _, key := range keys {
+ d.metric.RemoveField(key)
+ }
+ return nil
+}
+
+func (d FieldDict) PopItem() (v starlark.Value, err error) {
+ if d.fieldIterCount > 0 {
+ return nil, fmt.Errorf("cannot delete during iteration")
+ }
+
+ for _, field := range d.metric.FieldList() {
+ k := field.Key
+ v := field.Value
+
+ d.metric.RemoveField(k)
+
+ sk := starlark.String(k)
+ sv, err := asStarlarkValue(v)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert to starlark value")
+ }
+
+ return starlark.Tuple{sk, sv}, nil
+ }
+
+ return nil, errors.New("popitem(): field dictionary is empty")
+}
+
+func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err error) {
+ if d.fieldIterCount > 0 {
+ return nil, false, fmt.Errorf("cannot delete during iteration")
+ }
+
+ if key, ok := k.(starlark.String); ok {
+ value, ok := d.metric.GetField(key.GoString())
+ if ok {
+ d.metric.RemoveField(key.GoString())
+ sv, err := asStarlarkValue(value)
+ return sv, ok, err
+ }
+ }
+
+ return starlark.None, false, errors.New("key must be of type 'str'")
+}
+
+// Items implements the starlark.Mapping interface.
+func (d FieldDict) Iterate() starlark.Iterator {
+ d.fieldIterCount++
+ return &FieldIterator{Metric: d.Metric, fields: d.metric.FieldList()}
+}
+
+type FieldIterator struct {
+ *Metric
+ fields []*telegraf.Field
+}
+
+// Next implements the starlark.Iterator interface.
+func (i *FieldIterator) Next(p *starlark.Value) bool {
+ if len(i.fields) == 0 {
+ return false
+ }
+
+ field := i.fields[0]
+ i.fields = i.fields[1:]
+ *p = starlark.String(field.Key)
+
+ return true
+}
+
+// Done implements the starlark.Iterator interface.
+func (i *FieldIterator) Done() {
+ i.fieldIterCount--
+}
+
+// AsStarlarkValue converts a field value to a starlark.Value.
+func asStarlarkValue(value interface{}) (starlark.Value, error) {
+ switch v := value.(type) {
+ case float64:
+ return starlark.Float(v), nil
+ case int64:
+ return starlark.MakeInt64(v), nil
+ case uint64:
+ return starlark.MakeUint64(v), nil
+ case string:
+ return starlark.String(v), nil
+ case bool:
+ return starlark.Bool(v), nil
+ }
+
+ return starlark.None, errors.New("invalid type")
+}
+
+// AsGoValue converts a starlark.Value to a field value.
+func asGoValue(value interface{}) (interface{}, error) {
+ switch v := value.(type) {
+ case starlark.Float:
+ return float64(v), nil
+ case starlark.Int:
+ n, ok := v.Int64()
+ if !ok {
+ return nil, errors.New("cannot represent integer as int64")
+ }
+ return n, nil
+ case starlark.String:
+ return string(v), nil
+ case starlark.Bool:
+ return bool(v), nil
+ }
+
+ return nil, errors.New("invalid starlark type")
+}
diff --git a/plugins/processors/starlark/metric.go b/plugins/processors/starlark/metric.go
new file mode 100644
index 0000000000000..031d24ad69635
--- /dev/null
+++ b/plugins/processors/starlark/metric.go
@@ -0,0 +1,148 @@
+package starlark
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "go.starlark.net/starlark"
+)
+
+type Metric struct {
+ metric telegraf.Metric
+ tagIterCount int
+ fieldIterCount int
+ frozen bool
+}
+
+// Wrap updates the starlark.Metric to wrap a new telegraf.Metric.
+func (m *Metric) Wrap(metric telegraf.Metric) {
+ m.metric = metric
+ m.tagIterCount = 0
+ m.fieldIterCount = 0
+ m.frozen = false
+}
+
+// Unwrap removes the telegraf.Metric from the startlark.Metric.
+func (m *Metric) Unwrap() telegraf.Metric {
+ return m.metric
+}
+
+// String returns the starlark representation of the Metric.
+//
+// The String function is called by both the repr() and str() functions, and so
+// it behaves more like the repr function would in Python.
+func (m *Metric) String() string {
+ buf := new(strings.Builder)
+ buf.WriteString("Metric(")
+ buf.WriteString(m.Name().String())
+ buf.WriteString(", tags=")
+ buf.WriteString(m.Tags().String())
+ buf.WriteString(", fields=")
+ buf.WriteString(m.Fields().String())
+ buf.WriteString(", time=")
+ buf.WriteString(m.Time().String())
+ buf.WriteString(")")
+ return buf.String()
+}
+
+func (m *Metric) Type() string {
+ return "Metric"
+}
+
+func (m *Metric) Freeze() {
+ m.frozen = true
+}
+
+func (m *Metric) Truth() starlark.Bool {
+ return true
+}
+
+func (m *Metric) Hash() (uint32, error) {
+ return 0, errors.New("not hashable")
+}
+
+// AttrNames implements the starlark.HasAttrs interface.
+func (m *Metric) AttrNames() []string {
+ return []string{"name", "tags", "fields", "time"}
+}
+
+// Attr implements the starlark.HasAttrs interface.
+func (m *Metric) Attr(name string) (starlark.Value, error) {
+ switch name {
+ case "name":
+ return m.Name(), nil
+ case "tags":
+ return m.Tags(), nil
+ case "fields":
+ return m.Fields(), nil
+ case "time":
+ return m.Time(), nil
+ default:
+ // Returning nil, nil indicates "no such field or method"
+ return nil, nil
+ }
+}
+
+// SetField implements the starlark.HasSetField interface.
+func (m *Metric) SetField(name string, value starlark.Value) error {
+ if m.frozen {
+ return fmt.Errorf("cannot modify frozen metric")
+ }
+
+ switch name {
+ case "name":
+ return m.SetName(value)
+ case "time":
+ return m.SetTime(value)
+ case "tags":
+ return errors.New("cannot set tags")
+ case "fields":
+ return errors.New("cannot set fields")
+ default:
+ return starlark.NoSuchAttrError(
+ fmt.Sprintf("cannot assign to field '%s'", name))
+ }
+}
+
+func (m *Metric) Name() starlark.String {
+ return starlark.String(m.metric.Name())
+}
+
+func (m *Metric) SetName(value starlark.Value) error {
+ if str, ok := value.(starlark.String); ok {
+ m.metric.SetName(str.GoString())
+ return nil
+ }
+
+ return errors.New("type error")
+}
+
+func (m *Metric) Tags() TagDict {
+ return TagDict{m}
+}
+
+func (m *Metric) Fields() FieldDict {
+ return FieldDict{m}
+}
+
+func (m *Metric) Time() starlark.Int {
+ return starlark.MakeInt64(m.metric.Time().UnixNano())
+}
+
+func (m *Metric) SetTime(value starlark.Value) error {
+ switch v := value.(type) {
+ case starlark.Int:
+ ns, ok := v.Int64()
+ if !ok {
+ return errors.New("type error: unrepresentable time")
+ }
+ tm := time.Unix(0, ns)
+ m.metric.SetTime(tm)
+ return nil
+ default:
+ return errors.New("type error")
+ }
+}
diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go
new file mode 100644
index 0000000000000..e2002a146aa68
--- /dev/null
+++ b/plugins/processors/starlark/starlark.go
@@ -0,0 +1,215 @@
+package starlark
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+ "go.starlark.net/resolve"
+ "go.starlark.net/starlark"
+)
+
+const (
+ description = "Process metrics using a Starlark script"
+ sampleConfig = `
+ ## The Starlark source can be set as a string in this configuration file, or
+ ## by referencing a file containing the script. Only one source or script
+ ## should be set at once.
+ ##
+ ## Source of the Starlark script.
+ source = '''
+def apply(metric):
+ return metric
+'''
+
+ ## File containing a Starlark script.
+ # script = "/usr/local/bin/myscript.star"
+`
+)
+
+type Starlark struct {
+ Source string `toml:"source"`
+ Script string `toml:"script"`
+
+ Log telegraf.Logger `toml:"-"`
+
+ thread *starlark.Thread
+ applyFunc *starlark.Function
+ args starlark.Tuple
+ results []telegraf.Metric
+}
+
+func (s *Starlark) Init() error {
+ if s.Source == "" && s.Script == "" {
+ return errors.New("one of source or script must be set")
+ }
+ if s.Source != "" && s.Script != "" {
+ return errors.New("both source or script cannot be set")
+ }
+
+ s.thread = &starlark.Thread{
+ Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) },
+ }
+
+ builtins := starlark.StringDict{}
+ builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric)
+ builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy)
+
+ program, err := s.sourceProgram(builtins)
+ if err != nil {
+ return err
+ }
+
+ // Execute source
+ globals, err := program.Init(s.thread, builtins)
+ if err != nil {
+ return err
+ }
+
+ // Freeze the global state. This prevents modifications to the processor
+ // state and prevents scripts from containing errors storing tracking
+ // metrics. Tasks that require global state will not be possible due to
+ // this, so maybe we should relax this in the future.
+ globals.Freeze()
+
+ // The source should define an apply function.
+ apply := globals["apply"]
+
+ if apply == nil {
+ return errors.New("apply is not defined")
+ }
+
+ var ok bool
+ if s.applyFunc, ok = apply.(*starlark.Function); !ok {
+ return errors.New("apply is not a function")
+ }
+
+ if s.applyFunc.NumParams() != 1 {
+ return errors.New("apply function must take one parameter")
+ }
+
+ // Reusing the same metric wrapper to skip an allocation. This will cause
+ // any saved references to point to the new metric, but due to freezing the
+ // globals none should exist.
+ s.args = make(starlark.Tuple, 1)
+ s.args[0] = &Metric{}
+
+ // Preallocate a slice for return values.
+ s.results = make([]telegraf.Metric, 0, 10)
+
+ return nil
+}
+
+func (s *Starlark) sourceProgram(builtins starlark.StringDict) (*starlark.Program, error) {
+ if s.Source != "" {
+ _, program, err := starlark.SourceProgram("processor.starlark", s.Source, builtins.Has)
+ return program, err
+ }
+ _, program, err := starlark.SourceProgram(s.Script, nil, builtins.Has)
+ return program, err
+}
+
+func (s *Starlark) SampleConfig() string {
+ return sampleConfig
+}
+
+func (s *Starlark) Description() string {
+ return description
+}
+
+func (s *Starlark) Start(acc telegraf.Accumulator) error {
+ return nil
+}
+
+func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error {
+ s.args[0].(*Metric).Wrap(metric)
+
+ rv, err := starlark.Call(s.thread, s.applyFunc, s.args, nil)
+ if err != nil {
+ if err, ok := err.(*starlark.EvalError); ok {
+ for _, line := range strings.Split(err.Backtrace(), "\n") {
+ s.Log.Error(line)
+ }
+ }
+ metric.Reject()
+ return err
+ }
+
+ switch rv := rv.(type) {
+ case *starlark.List:
+ iter := rv.Iterate()
+ defer iter.Done()
+ var v starlark.Value
+ for iter.Next(&v) {
+ switch v := v.(type) {
+ case *Metric:
+ m := v.Unwrap()
+ if containsMetric(s.results, m) {
+ s.Log.Errorf("Duplicate metric reference detected")
+ continue
+ }
+ s.results = append(s.results, m)
+ acc.AddMetric(m)
+ default:
+ s.Log.Errorf("Invalid type returned in list: %s", v.Type())
+ }
+ }
+
+ // If the script didn't return the original metrics, mark it as
+ // successfully handled.
+ if !containsMetric(s.results, metric) {
+ metric.Accept()
+ }
+
+ // clear results
+ for i := range s.results {
+ s.results[i] = nil
+ }
+ s.results = s.results[:0]
+ case *Metric:
+ m := rv.Unwrap()
+
+ // If the script returned a different metric, mark this metric as
+ // successfully handled.
+ if m != metric {
+ metric.Accept()
+ }
+ acc.AddMetric(m)
+ case starlark.NoneType:
+ metric.Drop()
+ default:
+ return fmt.Errorf("Invalid type returned: %T", rv)
+ }
+ return nil
+}
+
+func (s *Starlark) Stop() error {
+ return nil
+}
+
+func containsMetric(metrics []telegraf.Metric, metric telegraf.Metric) bool {
+ for _, m := range metrics {
+ if m == metric {
+ return true
+ }
+ }
+ return false
+}
+
+func init() {
+ // https://github.com/bazelbuild/starlark/issues/20
+ resolve.AllowNestedDef = true
+ resolve.AllowLambda = true
+ resolve.AllowFloat = true
+ resolve.AllowSet = true
+ resolve.AllowGlobalReassign = true
+ resolve.AllowRecursion = true
+}
+
+func init() {
+ processors.AddStreaming("starlark", func() telegraf.StreamingProcessor {
+ return &Starlark{}
+ })
+}
diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go
new file mode 100644
index 0000000000000..ce0b1803c959c
--- /dev/null
+++ b/plugins/processors/starlark/starlark_test.go
@@ -0,0 +1,2865 @@
+package starlark
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/parsers"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+// Tests for runtime errors in the processors Init function.
+func TestInitError(t *testing.T) {
+ tests := []struct {
+ name string
+ plugin *Starlark
+ }{
+ {
+ name: "source must define apply",
+ plugin: &Starlark{
+ Source: "",
+ Log: testutil.Logger{},
+ },
+ },
+ {
+ name: "apply must be a function",
+ plugin: &Starlark{
+ Source: `
+apply = 42
+`,
+ Log: testutil.Logger{},
+ },
+ },
+ {
+ name: "apply function must take one arg",
+ plugin: &Starlark{
+ Source: `
+def apply():
+ pass
+`,
+ Log: testutil.Logger{},
+ },
+ },
+ {
+ name: "package scope must have valid syntax",
+ plugin: &Starlark{
+ Source: `
+for
+`,
+ Log: testutil.Logger{},
+ },
+ },
+ {
+ name: "no source no script",
+ plugin: &Starlark{
+ Log: testutil.Logger{},
+ },
+ },
+ {
+ name: "source and script",
+ plugin: &Starlark{
+ Source: `
+def apply():
+ pass
+`,
+ Script: "testdata/ratio.star",
+ Log: testutil.Logger{},
+ },
+ },
+ {
+ name: "script file not found",
+ plugin: &Starlark{
+ Script: "testdata/file_not_found.star",
+ Log: testutil.Logger{},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.plugin.Init()
+ require.Error(t, err)
+ })
+ }
+}
+
+func TestApply(t *testing.T) {
+ // Tests for the behavior of the processors Apply function.
+ var applyTests = []struct {
+ name string
+ source string
+ input []telegraf.Metric
+ expected []telegraf.Metric
+ expectedErrorStr string
+ }{
+ {
+ name: "drop metric",
+ source: `
+def apply(metric):
+ return None
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ },
+ {
+ name: "passthrough",
+ source: `
+def apply(metric):
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "read value from global scope",
+ source: `
+names = {
+ 'cpu': 'cpu2',
+ 'mem': 'mem2',
+}
+
+def apply(metric):
+ metric.name = names[metric.name]
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu2",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "cannot write to frozen global scope",
+ source: `
+cache = []
+
+def apply(metric):
+ cache.append(deepcopy(metric))
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 1.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "append: cannot append to frozen list",
+ },
+ {
+ name: "cannot return multiple references to same metric",
+ source: `
+def apply(metric):
+ # Should be return [metric, deepcopy(metric)]
+ return [metric, metric]
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+
+ for _, tt := range applyTests {
+ t.Run(tt.name, func(t *testing.T) {
+ plugin := &Starlark{
+ Source: tt.source,
+ Log: testutil.Logger{},
+ }
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ for _, m := range tt.input {
+ err = plugin.Add(m, &acc)
+ if tt.expectedErrorStr != "" {
+ require.EqualError(t, err, tt.expectedErrorStr)
+ } else {
+ require.NoError(t, err)
+ }
+ }
+
+ err = plugin.Stop()
+ require.NoError(t, err)
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
+ })
+ }
+}
+
+// Tests for the behavior of the Metric type.
+func TestMetric(t *testing.T) {
+ var tests = []struct {
+ name string
+ source string
+ input []telegraf.Metric
+ expected []telegraf.Metric
+ expectedErrorStr string
+ }{
+ {
+ name: "create new metric",
+ source: `
+def apply(metric):
+ m = Metric('cpu')
+ m.fields['time_guest'] = 2.0
+ m.time = 0
+ return m
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 2.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "deepcopy",
+ source: `
+def apply(metric):
+ return [metric, deepcopy(metric)]
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set name",
+ source: `
+def apply(metric):
+ metric.name = "howdy"
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("howdy",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set name wrong type",
+ source: `
+def apply(metric):
+ metric.name = 42
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "type error",
+ },
+ {
+ name: "get name",
+ source: `
+def apply(metric):
+ metric.tags['measurement'] = metric.name
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "measurement": "cpu",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "getattr tags",
+ source: `
+def apply(metric):
+ metric.tags
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "setattr tags is not allowed",
+ source: `
+def apply(metric):
+ metric.tags = {}
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "cannot set tags",
+ },
+ {
+ name: "empty tags are false",
+ source: `
+def apply(metric):
+ if not metric.tags:
+ return metric
+ return None
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "non-empty tags are true",
+ source: `
+def apply(metric):
+ if metric.tags:
+ return metric
+ return None
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "tags in operator",
+ source: `
+def apply(metric):
+ if 'host' not in metric.tags:
+ return
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "lookup tag",
+ source: `
+def apply(metric):
+ metric.tags['result'] = metric.tags['host']
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "result": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "lookup tag not set",
+ source: `
+def apply(metric):
+ metric.tags['foo']
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: `key "foo" not in Tags`,
+ },
+ {
+ name: "get tag",
+ source: `
+def apply(metric):
+ metric.tags['result'] = metric.tags.get('host')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "result": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "get tag default",
+ source: `
+def apply(metric):
+ metric.tags['result'] = metric.tags.get('foo', 'example.org')
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "result": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "get tag not set returns none",
+ source: `
+def apply(metric):
+ if metric.tags.get('foo') != None:
+ return
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set tag",
+ source: `
+def apply(metric):
+ metric.tags['host'] = 'example.org'
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set tag type error",
+ source: `
+def apply(metric):
+ metric.tags['host'] = 42
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "tag value must be of type 'str'",
+ },
+ {
+ name: "pop tag",
+ source: `
+def apply(metric):
+ metric.tags['host2'] = metric.tags.pop('host')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host2": "example.org",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "popitem tags",
+ source: `
+def apply(metric):
+ metric.tags['result'] = '='.join(metric.tags.popitem())
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "result": "host=example.org",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "popitem tags empty dict",
+ source: `
+def apply(metric):
+ metric.tags.popitem()
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "popitem(): tag dictionary is empty",
+ },
+ {
+ name: "tags setdefault key not set",
+ source: `
+def apply(metric):
+ metric.tags.setdefault('a', 'b')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "tags setdefault key already set",
+ source: `
+def apply(metric):
+ metric.tags.setdefault('a', 'c')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "tags update list of tuple",
+ source: `
+def apply(metric):
+ metric.tags.update([('b', 'y'), ('c', 'z')])
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "x",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "x",
+ "b": "y",
+ "c": "z",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "tags update kwargs",
+ source: `
+def apply(metric):
+ metric.tags.update(b='y', c='z')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "x",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "x",
+ "b": "y",
+ "c": "z",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "tags update dict",
+ source: `
+def apply(metric):
+ metric.tags.update({'b': 'y', 'c': 'z'})
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "x",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "x",
+ "b": "y",
+ "c": "z",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "tags update list tuple and kwargs",
+ source: `
+def apply(metric):
+ metric.tags.update([('b', 'y'), ('c', 'z')], d='zz')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "x",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "x",
+ "b": "y",
+ "c": "z",
+ "d": "zz",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tags",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ "foo": "bar",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ "foo": "bar",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tags and copy to fields",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ metric.fields[k] = k
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "host": "host",
+ "cpu": "cpu",
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tag keys",
+ source: `
+def apply(metric):
+ for k in metric.tags.keys():
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ "foo": "bar",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ "foo": "bar",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tag keys and copy to fields",
+ source: `
+def apply(metric):
+ for k in metric.tags.keys():
+ metric.fields[k] = k
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "host": "host",
+ "cpu": "cpu",
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tag items",
+ source: `
+def apply(metric):
+ for k, v in metric.tags.items():
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tag items and copy to fields",
+ source: `
+def apply(metric):
+ for k, v in metric.tags.items():
+ metric.fields[k] = v
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tag values",
+ source: `
+def apply(metric):
+ for v in metric.tags.values():
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tag values and copy to fields",
+ source: `
+def apply(metric):
+ for v in metric.tags.values():
+ metric.fields[v] = v
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ "example.org": "example.org",
+ "cpu0": "cpu0",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "clear tags",
+ source: `
+def apply(metric):
+ metric.tags.clear()
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "tags cannot pop while iterating",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ metric.tags.pop(k)
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "pop: cannot delete during iteration",
+ },
+ {
+ name: "tags cannot popitem while iterating",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ metric.tags.popitem()
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "cannot delete during iteration",
+ },
+ {
+ name: "tags cannot clear while iterating",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ metric.tags.clear()
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "cannot delete during iteration",
+ },
+ {
+ name: "tags cannot insert while iterating",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ metric.tags['i'] = 'j'
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "cannot insert during iteration",
+ },
+ {
+ name: "tags can be cleared after iterating",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ pass
+ metric.tags.clear()
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ },
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "getattr fields",
+ source: `
+def apply(metric):
+ metric.fields
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "setattr fields is not allowed",
+ source: `
+def apply(metric):
+ metric.fields = {}
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "cannot set fields",
+ },
+ {
+ name: "empty fields are false",
+ source: `
+def apply(metric):
+ if not metric.fields:
+ metric.fields["time_idle"] = 42
+ return metric
+ return None
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "non-empty fields are true",
+ source: `
+def apply(metric):
+ if metric.fields:
+ return metric
+ return None
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "fields in operator",
+ source: `
+def apply(metric):
+ if 'time_idle' not in metric.fields:
+ return
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "lookup string field",
+ source: `
+def apply(metric):
+ value = metric.fields['value']
+ if value != "xyzzy" and type(value) != "str":
+ return
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": "xyzzy"},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": "xyzzy"},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "lookup integer field",
+ source: `
+def apply(metric):
+ value = metric.fields['value']
+ if value != 42 and type(value) != "int":
+ return
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "lookup unsigned field",
+ source: `
+def apply(metric):
+ value = metric.fields['value']
+ if value != 42 and type(value) != "int":
+ return
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": uint64(42)},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": uint64(42)},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "lookup bool field",
+ source: `
+def apply(metric):
+ value = metric.fields['value']
+ if value != True and type(value) != "bool":
+ return
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": true},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": true},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "lookup float field",
+ source: `
+def apply(metric):
+ value = metric.fields['value']
+ if value != 42.0 and type(value) != "float":
+ return
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"value": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "lookup field not set",
+ source: `
+def apply(metric):
+ metric.fields['foo']
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: `key "foo" not in Fields`,
+ },
+ {
+ name: "get field",
+ source: `
+def apply(metric):
+ metric.fields['result'] = metric.fields.get('time_idle')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "result": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "get field default",
+ source: `
+def apply(metric):
+ metric.fields['result'] = metric.fields.get('foo', 'example.org')
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ "result": "example.org",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "get field not set returns none",
+ source: `
+def apply(metric):
+ if metric.fields.get('foo') != None:
+ return
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set string field",
+ source: `
+def apply(metric):
+ metric.fields['host'] = 'example.org'
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "host": "example.org",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set integer field",
+ source: `
+def apply(metric):
+ metric.fields['time_idle'] = 42
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set float field",
+ source: `
+def apply(metric):
+ metric.fields['time_idle'] = 42.0
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set bool field",
+ source: `
+def apply(metric):
+ metric.fields['time_idle'] = True
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": true,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set field type error",
+ source: `
+def apply(metric):
+ metric.fields['time_idle'] = {}
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "invalid starlark type",
+ },
+ {
+ name: "pop field",
+ source: `
+def apply(metric):
+ time_idle = metric.fields.pop('time_idle')
+ if time_idle != 0:
+ return
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 0,
+ "time_guest": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_guest": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "popitem field",
+ source: `
+def apply(metric):
+ item = metric.fields.popitem()
+ if item != ("time_idle", 0):
+ return
+ metric.fields['time_guest'] = 0
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_guest": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "popitem fields empty dict",
+ source: `
+def apply(metric):
+ metric.fields.popitem()
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "popitem(): field dictionary is empty",
+ },
+ {
+ name: "fields setdefault key not set",
+ source: `
+def apply(metric):
+ metric.fields.setdefault('a', 'b')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"a": "b"},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "fields setdefault key already set",
+ source: `
+def apply(metric):
+ metric.fields.setdefault('a', 'c')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"a": "b"},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"a": "b"},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "fields update list of tuple",
+ source: `
+def apply(metric):
+ metric.fields.update([('a', 'b'), ('c', 'd')])
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "fields update kwargs",
+ source: `
+def apply(metric):
+ metric.fields.update(a='b', c='d')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "fields update dict",
+ source: `
+def apply(metric):
+ metric.fields.update({'a': 'b', 'c': 'd'})
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "fields update list tuple and kwargs",
+ source: `
+def apply(metric):
+ metric.fields.update([('a', 'b'), ('c', 'd')], e='f')
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate fields",
+ source: `
+def apply(metric):
+ for k in metric.fields:
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 1.0,
+ "time_idle": 2.0,
+ "time_system": 3.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 1.0,
+ "time_idle": 2.0,
+ "time_system": 3.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate field keys",
+ source: `
+def apply(metric):
+ for k in metric.fields.keys():
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 1.0,
+ "time_idle": 2.0,
+ "time_system": 3.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 1.0,
+ "time_idle": 2.0,
+ "time_system": 3.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate field keys and copy to tags",
+ source: `
+def apply(metric):
+ for k in metric.fields.keys():
+ metric.tags[k] = k
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 1.0,
+ "time_idle": 2.0,
+ "time_system": 3.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "time_guest": "time_guest",
+ "time_idle": "time_idle",
+ "time_system": "time_system",
+ },
+ map[string]interface{}{
+ "time_guest": 1.0,
+ "time_idle": 2.0,
+ "time_system": 3.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate field items",
+ source: `
+def apply(metric):
+ for k, v in metric.fields.items():
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 1.0,
+ "time_idle": 2.0,
+ "time_system": 3.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 1.0,
+ "time_idle": 2.0,
+ "time_system": 3.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate field items and copy to tags",
+ source: `
+def apply(metric):
+ for k, v in metric.fields.items():
+ metric.tags[k] = str(v)
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 1.1,
+ "time_idle": 2.1,
+ "time_system": 3.1,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "time_guest": "1.1",
+ "time_idle": "2.1",
+ "time_system": "3.1",
+ },
+ map[string]interface{}{
+ "time_guest": 1.1,
+ "time_idle": 2.1,
+ "time_system": 3.1,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate field values",
+ source: `
+def apply(metric):
+ for v in metric.fields.values():
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate field values and copy to tags",
+ source: `
+def apply(metric):
+ for v in metric.fields.values():
+ metric.tags[str(v)] = str(v)
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "b": "b",
+ "d": "d",
+ "f": "f",
+ },
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "clear fields",
+ source: `
+def apply(metric):
+ metric.fields.clear()
+ metric.fields['notempty'] = 0
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 0,
+ "time_guest": 0,
+ "time_system": 0,
+ "time_user": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "notempty": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "fields cannot pop while iterating",
+ source: `
+def apply(metric):
+ for k in metric.fields:
+ metric.fields.pop(k)
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "pop: cannot delete during iteration",
+ },
+ {
+ name: "fields cannot popitem while iterating",
+ source: `
+def apply(metric):
+ for k in metric.fields:
+ metric.fields.popitem()
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "cannot delete during iteration",
+ },
+ {
+ name: "fields cannot clear while iterating",
+ source: `
+def apply(metric):
+ for k in metric.fields:
+ metric.fields.clear()
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "cannot delete during iteration",
+ },
+ {
+ name: "fields cannot insert while iterating",
+ source: `
+def apply(metric):
+ for k in metric.fields:
+ metric.fields['time_guest'] = 0
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "cannot insert during iteration",
+ },
+ {
+ name: "fields can be cleared after iterating",
+ source: `
+def apply(metric):
+ for k in metric.fields:
+ pass
+ metric.fields.clear()
+ metric.fields['notempty'] = 0
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "notempty": 0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set time",
+ source: `
+def apply(metric):
+ metric.time = 42
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0).UTC(),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 42).UTC(),
+ ),
+ },
+ },
+ {
+ name: "set time wrong type",
+ source: `
+def apply(metric):
+ metric.time = 'howdy'
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0).UTC(),
+ ),
+ },
+ expected: []telegraf.Metric{},
+ expectedErrorStr: "type error",
+ },
+ {
+ name: "get time",
+ source: `
+def apply(metric):
+ metric.time -= metric.time % 100000000
+ return metric
+ `,
+ input: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(42, 11).UTC(),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(42, 0).UTC(),
+ ),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ plugin := &Starlark{
+ Source: tt.source,
+ Log: testutil.Logger{},
+ }
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+
+ err = plugin.Start(&acc)
+ require.NoError(t, err)
+
+ for _, m := range tt.input {
+ err = plugin.Add(m, &acc)
+ if tt.expectedErrorStr != "" {
+ require.EqualError(t, err, tt.expectedErrorStr)
+ } else {
+ require.NoError(t, err)
+ }
+ }
+
+ err = plugin.Stop()
+ require.NoError(t, err)
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
+ })
+ }
+}
+
+func TestScript(t *testing.T) {
+ var tests = []struct {
+ name string
+ plugin *Starlark
+ input []telegraf.Metric
+ expected []telegraf.Metric
+ expectedErrorStr string
+ }{
+ {
+ name: "rename",
+ plugin: &Starlark{
+ Script: "testdata/rename.star",
+ Log: testutil.Logger{},
+ },
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "lower": "0",
+ "upper": "10",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "min": "0",
+ "max": "10",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "scale",
+ plugin: &Starlark{
+ Script: "testdata/scale.star",
+ Log: testutil.Logger{},
+ },
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 10.0},
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 100.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "ratio",
+ plugin: &Starlark{
+ Script: "testdata/ratio.star",
+ Log: testutil.Logger{},
+ },
+ input: []telegraf.Metric{
+ testutil.MustMetric("mem",
+ map[string]string{},
+ map[string]interface{}{
+ "used": 2,
+ "total": 10,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("mem",
+ map[string]string{},
+ map[string]interface{}{
+ "used": 2,
+ "total": 10,
+ "usage": 20.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.plugin.Init()
+ require.NoError(t, err)
+
+ var acc testutil.Accumulator
+
+ err = tt.plugin.Start(&acc)
+ require.NoError(t, err)
+
+ for _, m := range tt.input {
+ err = tt.plugin.Add(m, &acc)
+ if tt.expectedErrorStr != "" {
+ require.EqualError(t, err, tt.expectedErrorStr)
+ } else {
+ require.NoError(t, err)
+ }
+ }
+
+ err = tt.plugin.Stop()
+ require.NoError(t, err)
+
+ testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
+ })
+ }
+}
+
+// Benchmarks modify the metric in place, so the scripts shouldn't modify the
+// metric.
+func Benchmark(b *testing.B) {
+ var tests = []struct {
+ name string
+ source string
+ input []telegraf.Metric
+ }{
+ {
+ name: "passthrough",
+ source: `
+def apply(metric):
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "create new metric",
+ source: `
+def apply(metric):
+ m = Metric('cpu')
+ m.fields['time_guest'] = 2.0
+ m.time = 0
+ return m
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set name",
+ source: `
+def apply(metric):
+ metric.name = "cpu"
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set tag",
+ source: `
+def apply(metric):
+ metric.tags['host'] = 'example.org'
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "tag in operator",
+ source: `
+def apply(metric):
+ if 'c' in metric.tags:
+ return metric
+ return None
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tags",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ map[string]interface{}{"time_idle": 42.0},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ // This should be faster than calling items()
+ name: "iterate tags and get values",
+ source: `
+def apply(metric):
+ for k in metric.tags:
+ v = metric.tags[k]
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate tag items",
+ source: `
+def apply(metric):
+ for k, v in metric.tags.items():
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ map[string]interface{}{"time_idle": 42},
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "set string field",
+ source: `
+def apply(metric):
+ metric.fields['host'] = 'example.org'
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "host": "example.org",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate fields",
+ source: `
+def apply(metric):
+ for k in metric.fields:
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "time_user": 42.0,
+ "time_guest": 42.0,
+ "time_system": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ // This should be faster than calling items()
+ name: "iterate fields and get values",
+ source: `
+def apply(metric):
+ for k in metric.fields:
+ v = metric.fields[k]
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "time_user": 42.0,
+ "time_guest": 42.0,
+ "time_system": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "iterate field items",
+ source: `
+def apply(metric):
+ for k, v in metric.fields.items():
+ pass
+ return metric
+`,
+ input: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ b.Run(tt.name, func(b *testing.B) {
+ plugin := &Starlark{
+ Source: tt.source,
+ Log: testutil.Logger{},
+ }
+
+ err := plugin.Init()
+ require.NoError(b, err)
+
+ var acc testutil.NopAccumulator
+
+ err = plugin.Start(&acc)
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ for _, m := range tt.input {
+ plugin.Add(m, &acc)
+ }
+ }
+
+ err = plugin.Stop()
+ require.NoError(b, err)
+ })
+ }
+}
+
+func TestAllScriptTestData(t *testing.T) {
+ // can be run from multiple folders
+ paths := []string{"testdata", "plugins/processors/starlark/testdata"}
+ for _, testdataPath := range paths {
+ filepath.Walk(testdataPath, func(path string, info os.FileInfo, err error) error {
+ if info == nil || info.IsDir() {
+ return nil
+ }
+ fn := path
+ t.Run(fn, func(t *testing.T) {
+ b, err := ioutil.ReadFile(fn)
+ require.NoError(t, err)
+ lines := strings.Split(string(b), "\n")
+ inputMetrics := parseMetricsFrom(t, lines, "Example Input:")
+ outputMetrics := parseMetricsFrom(t, lines, "Example Output:")
+ plugin := &Starlark{
+ Script: fn,
+ Log: testutil.Logger{},
+ }
+ require.NoError(t, plugin.Init())
+
+ acc := &testutil.Accumulator{}
+
+ err = plugin.Start(acc)
+ require.NoError(t, err)
+
+ for _, m := range inputMetrics {
+ err = plugin.Add(m, acc)
+ require.NoError(t, err)
+ }
+
+ err = plugin.Stop()
+ require.NoError(t, err)
+
+ testutil.RequireMetricsEqual(t, outputMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime())
+ })
+ return nil
+ })
+ }
+}
+
+var parser, _ = parsers.NewInfluxParser() // literally never returns errors.
+
+// parses metric lines out of line protocol following a header, with a trailing blank line
+func parseMetricsFrom(t *testing.T, lines []string, header string) (metrics []telegraf.Metric) {
+ require.NotZero(t, len(lines), "Expected some lines to parse from .star file, found none")
+ startIdx := -1
+ endIdx := len(lines)
+ for i := range lines {
+ if strings.TrimLeft(lines[i], "# ") == header {
+ startIdx = i + 1
+ break
+ }
+ }
+ require.NotEqual(t, -1, startIdx, fmt.Sprintf("Header %q must exist in file", header))
+ for i := startIdx; i < len(lines); i++ {
+ line := strings.TrimLeft(lines[i], "# ")
+ if line == "" || line == "'''" {
+ endIdx = i
+ break
+ }
+ }
+ for i := startIdx; i < endIdx; i++ {
+ m, err := parser.ParseLine(strings.TrimLeft(lines[i], "# "))
+ require.NoError(t, err, fmt.Sprintf("Expected to be able to parse %q metric, but found error", header))
+ metrics = append(metrics, m)
+ }
+ return metrics
+}
diff --git a/plugins/processors/starlark/tag_dict.go b/plugins/processors/starlark/tag_dict.go
new file mode 100644
index 0000000000000..3d95264382db5
--- /dev/null
+++ b/plugins/processors/starlark/tag_dict.go
@@ -0,0 +1,197 @@
+package starlark
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/influxdata/telegraf"
+ "go.starlark.net/starlark"
+)
+
+// TagDict is a starlark.Value for the metric tags. It is heavily based on the
+// starlark.Dict.
+type TagDict struct {
+ *Metric
+}
+
+func (d TagDict) String() string {
+ buf := new(strings.Builder)
+ buf.WriteString("{")
+ sep := ""
+ for _, item := range d.Items() {
+ k, v := item[0], item[1]
+ buf.WriteString(sep)
+ buf.WriteString(k.String())
+ buf.WriteString(": ")
+ buf.WriteString(v.String())
+ sep = ", "
+ }
+ buf.WriteString("}")
+ return buf.String()
+}
+
+func (d TagDict) Type() string {
+ return "Tags"
+}
+
+func (d TagDict) Freeze() {
+ d.frozen = true
+}
+
+func (d TagDict) Truth() starlark.Bool {
+ return len(d.metric.TagList()) != 0
+}
+
+func (d TagDict) Hash() (uint32, error) {
+ return 0, errors.New("not hashable")
+}
+
+// AttrNames implements the starlark.HasAttrs interface.
+func (d TagDict) AttrNames() []string {
+ return builtinAttrNames(TagDictMethods)
+}
+
+// Attr implements the starlark.HasAttrs interface.
+func (d TagDict) Attr(name string) (starlark.Value, error) {
+ return builtinAttr(d, name, TagDictMethods)
+}
+
+var TagDictMethods = map[string]builtinMethod{
+ "clear": dict_clear,
+ "get": dict_get,
+ "items": dict_items,
+ "keys": dict_keys,
+ "pop": dict_pop,
+ "popitem": dict_popitem,
+ "setdefault": dict_setdefault,
+ "update": dict_update,
+ "values": dict_values,
+}
+
+// Get implements the starlark.Mapping interface.
+func (d TagDict) Get(key starlark.Value) (v starlark.Value, found bool, err error) {
+ if k, ok := key.(starlark.String); ok {
+ gv, found := d.metric.GetTag(k.GoString())
+ if !found {
+ return starlark.None, false, nil
+ }
+ return starlark.String(gv), true, err
+ }
+
+ return starlark.None, false, errors.New("key must be of type 'str'")
+}
+
+// SetKey implements the starlark.HasSetKey interface to support map update
+// using x[k]=v syntax, like a dictionary.
+func (d TagDict) SetKey(k, v starlark.Value) error {
+ if d.tagIterCount > 0 {
+ return fmt.Errorf("cannot insert during iteration")
+ }
+
+ key, ok := k.(starlark.String)
+ if !ok {
+ return errors.New("tag key must be of type 'str'")
+ }
+
+ value, ok := v.(starlark.String)
+ if !ok {
+ return errors.New("tag value must be of type 'str'")
+ }
+
+ d.metric.AddTag(key.GoString(), value.GoString())
+ return nil
+}
+
+// Items implements the starlark.IterableMapping interface.
+func (d TagDict) Items() []starlark.Tuple {
+ items := make([]starlark.Tuple, 0, len(d.metric.TagList()))
+ for _, tag := range d.metric.TagList() {
+ key := starlark.String(tag.Key)
+ value := starlark.String(tag.Value)
+ pair := starlark.Tuple{key, value}
+ items = append(items, pair)
+ }
+ return items
+}
+
+func (d TagDict) Clear() error {
+ if d.tagIterCount > 0 {
+ return fmt.Errorf("cannot delete during iteration")
+ }
+
+ keys := make([]string, 0, len(d.metric.TagList()))
+ for _, tag := range d.metric.TagList() {
+ keys = append(keys, tag.Key)
+ }
+
+ for _, key := range keys {
+ d.metric.RemoveTag(key)
+ }
+ return nil
+}
+
+func (d TagDict) PopItem() (v starlark.Value, err error) {
+ if d.tagIterCount > 0 {
+ return nil, fmt.Errorf("cannot delete during iteration")
+ }
+
+ for _, tag := range d.metric.TagList() {
+ k := tag.Key
+ v := tag.Value
+
+ d.metric.RemoveTag(k)
+
+ sk := starlark.String(k)
+ sv := starlark.String(v)
+ return starlark.Tuple{sk, sv}, nil
+ }
+
+ return nil, errors.New("popitem(): tag dictionary is empty")
+}
+
+func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err error) {
+ if d.tagIterCount > 0 {
+ return nil, false, fmt.Errorf("cannot delete during iteration")
+ }
+
+ if key, ok := k.(starlark.String); ok {
+ value, ok := d.metric.GetTag(key.GoString())
+ if ok {
+ d.metric.RemoveTag(key.GoString())
+ v := starlark.String(value)
+ return v, ok, err
+ }
+ }
+
+ return starlark.None, false, errors.New("key must be of type 'str'")
+}
+
+// Items implements the starlark.Mapping interface.
+func (d TagDict) Iterate() starlark.Iterator {
+ d.tagIterCount++
+ return &TagIterator{Metric: d.Metric, tags: d.metric.TagList()}
+}
+
+type TagIterator struct {
+ *Metric
+ tags []*telegraf.Tag
+}
+
+// Next implements the starlark.Iterator interface.
+func (i *TagIterator) Next(p *starlark.Value) bool {
+ if len(i.tags) == 0 {
+ return false
+ }
+
+ tag := i.tags[0]
+ i.tags = i.tags[1:]
+ *p = starlark.String(tag.Key)
+
+ return true
+}
+
+// Done implements the starlark.Iterator interface.
+func (i *TagIterator) Done() {
+ i.tagIterCount--
+}
diff --git a/plugins/processors/starlark/testdata/number_logic.star b/plugins/processors/starlark/testdata/number_logic.star
new file mode 100644
index 0000000000000..fced8c76ddc2c
--- /dev/null
+++ b/plugins/processors/starlark/testdata/number_logic.star
@@ -0,0 +1,17 @@
+# Set a logic function to transform a numerical value to another numerical value
+# Example: Set any 'status' field between 1 and 6 to a value of 0
+#
+# Example Input:
+# lb,http_method=GET status=5i 1465839830100400201
+#
+# Example Output:
+# lb,http_method=GET status=0i 1465839830100400201
+
+
+def apply(metric):
+ v = metric.fields.get('status')
+ if v == None:
+ return metric
+ if 1 < v and v < 6:
+ metric.fields['status'] = 0
+ return metric
diff --git a/plugins/processors/starlark/testdata/pivot.star b/plugins/processors/starlark/testdata/pivot.star
new file mode 100644
index 0000000000000..f32ebf45d9763
--- /dev/null
+++ b/plugins/processors/starlark/testdata/pivot.star
@@ -0,0 +1,17 @@
+'''
+Pivots a key's value to be the key for another key.
+In this example it pivots the value of key `sensor`
+to be the key of the value in key `value`
+
+Example Input:
+temperature sensor="001A0",value=111.48
+
+Example Output:
+temperature 001A0=111.48
+'''
+
+def apply(metric):
+ metric.fields[str(metric.fields['sensor'])] = metric.fields['value']
+ metric.fields.pop('value',None)
+ metric.fields.pop('sensor',None)
+ return metric
diff --git a/plugins/processors/starlark/testdata/ratio.star b/plugins/processors/starlark/testdata/ratio.star
new file mode 100644
index 0000000000000..60dcedaf54517
--- /dev/null
+++ b/plugins/processors/starlark/testdata/ratio.star
@@ -0,0 +1,15 @@
+# Compute the ratio of two integer fields.
+#
+# Example: A new field 'usage' from an existing fields 'used' and 'total'
+#
+# Example Input:
+# memory,host=hostname used=11038756864.4948,total=17179869184.1221 1597255082000000000
+#
+# Example Output:
+# memory,host=hostname used=11038756864.4948,total=17179869184.1221,usage=64.25402164701573 1597255082000000000
+
+def apply(metric):
+ used = float(metric.fields['used'])
+ total = float(metric.fields['total'])
+ metric.fields['usage'] = (used / total) * 100
+ return metric
diff --git a/plugins/processors/starlark/testdata/rename.star b/plugins/processors/starlark/testdata/rename.star
new file mode 100644
index 0000000000000..cf5d118ddf767
--- /dev/null
+++ b/plugins/processors/starlark/testdata/rename.star
@@ -0,0 +1,23 @@
+# Rename any tags using the mapping in the renames dict.
+#
+# Example Input:
+# measurement,host=hostname lower=0,upper=100 1597255410000000000
+#
+# Example Output:
+# measurement,host=hostname min=0,max=100 1597255410000000000
+
+renames = {
+ 'lower': 'min',
+ 'upper': 'max',
+}
+
+def apply(metric):
+ for k, v in metric.tags.items():
+ if k in renames:
+ metric.tags[renames[k]] = v
+ metric.tags.pop(k)
+ for k, v in metric.fields.items():
+ if k in renames:
+ metric.fields[renames[k]] = v
+ metric.fields.pop(k)
+ return metric
diff --git a/plugins/processors/starlark/testdata/scale.star b/plugins/processors/starlark/testdata/scale.star
new file mode 100644
index 0000000000000..efba6042b7e82
--- /dev/null
+++ b/plugins/processors/starlark/testdata/scale.star
@@ -0,0 +1,13 @@
+# Multiply any float fields by 10
+#
+# Example Input:
+# modbus,host=hostname Current=1.22,Energy=0,Frequency=60i,Power=0,Voltage=123.9000015258789 1554079521000000000
+#
+# Example Output:
+# modbus,host=hostname Current=12.2,Energy=0,Frequency=60i,Power=0,Voltage=1239.000015258789 1554079521000000000
+
+def apply(metric):
+ for k, v in metric.fields.items():
+ if type(v) == "float":
+ metric.fields[k] = v * 10
+ return metric
diff --git a/plugins/processors/starlark/testdata/value_filter.star b/plugins/processors/starlark/testdata/value_filter.star
new file mode 100644
index 0000000000000..eeb2432f6679f
--- /dev/null
+++ b/plugins/processors/starlark/testdata/value_filter.star
@@ -0,0 +1,18 @@
+# Filter metrics by value
+'''
+In this example we look at the `value` field of the metric.
+If the value is zeor, we delete all the fields, effectively dropping the metric.
+
+Example Input:
+temperature sensor="001A0",value=111.48
+temperature sensor="001B0",value=0.0
+
+Example Output:
+temperature sensor="001A0",value=111.48
+'''
+
+def apply(metric):
+ if metric.fields["value"] == 0.0:
+ # removing all fields deletes a metric
+ metric.fields.clear()
+ return metric
diff --git a/plugins/processors/streamingprocessor.go b/plugins/processors/streamingprocessor.go
new file mode 100644
index 0000000000000..95b2e0748d771
--- /dev/null
+++ b/plugins/processors/streamingprocessor.go
@@ -0,0 +1,63 @@
+package processors
+
+import (
+ "github.com/influxdata/telegraf"
+)
+
+// NewStreamingProcessorFromProcessor is a converter that turns a standard
+// processor into a streaming processor
+func NewStreamingProcessorFromProcessor(p telegraf.Processor) telegraf.StreamingProcessor {
+ sp := &streamingProcessor{
+ processor: p,
+ }
+ return sp
+}
+
+type streamingProcessor struct {
+ processor telegraf.Processor
+ acc telegraf.Accumulator
+}
+
+func (sp *streamingProcessor) SampleConfig() string {
+ return sp.processor.SampleConfig()
+}
+
+func (sp *streamingProcessor) Description() string {
+ return sp.processor.Description()
+}
+
+func (sp *streamingProcessor) Start(acc telegraf.Accumulator) error {
+ sp.acc = acc
+ return nil
+}
+
+func (sp *streamingProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error {
+ for _, m := range sp.processor.Apply(m) {
+ acc.AddMetric(m)
+ }
+ return nil
+}
+
+func (sp *streamingProcessor) Stop() error {
+ return nil
+}
+
+// Make the streamingProcessor of type Initializer to be able
+// to call the Init method of the wrapped processor if
+// needed
+func (sp *streamingProcessor) Init() error {
+ if p, ok := sp.processor.(telegraf.Initializer); ok {
+ err := p.Init()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Unwrap lets you retrieve the original telegraf.Processor from the
+// StreamingProcessor. This is necessary because the toml Unmarshaller won't
+// look inside composed types.
+func (sp *streamingProcessor) Unwrap() telegraf.Processor {
+ return sp.processor
+}
diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md
index 06bffaee8ef0a..a7aa0e2a585bd 100644
--- a/plugins/processors/strings/README.md
+++ b/plugins/processors/strings/README.md
@@ -5,54 +5,79 @@ The `strings` plugin maps certain go string functions onto measurement, tag, and
Implemented functions are:
- lowercase
- uppercase
+- titlecase
- trim
- trim_left
- trim_right
- trim_prefix
- trim_suffix
- replace
+- left
+- base64decode
Please note that in this implementation these are processed in the order that they appear above.
-Specify the `measurement`, `tag` or `field` that you want processed in each section and optionally a `dest` if you want the result stored in a new tag or field. You can specify lots of transformations on data with a single strings processor.
+Specify the `measurement`, `tag`, `tag_key`, `field`, or `field_key` that you want processed in each section and optionally a `dest` if you want the result stored in a new tag or field. You can specify lots of transformations on data with a single strings processor.
-If you'd like to apply the change to every `tag`, `field`, or `measurement`, use the value "*" for each respective field. Note that the `dest` field will be ignored if "*" is used
+If you'd like to apply the change to every `tag`, `tag_key`, `field`, `field_key`, or `measurement`, use the value `"*"` for each respective field. Note that the `dest` field will be ignored if `"*"` is used.
+
+If you'd like to apply multiple processings to the same `tag_key` or `field_key`, note the process order stated above. See [Example 2]() for an example.
### Configuration:
```toml
[[processors.strings]]
- # [[processors.strings.uppercase]]
- # tag = "method"
-
+ ## Convert a field value to lowercase and store in a new field
# [[processors.strings.lowercase]]
# field = "uri_stem"
# dest = "uri_stem_normalised"
- ## Convert a tag value to lowercase
+ ## Convert a tag value to uppercase
+ # [[processors.strings.uppercase]]
+ # tag = "method"
+
+ ## Convert a field value to titlecase
+ # [[processors.strings.titlecase]]
+ # field = "status"
+
+ ## Trim leading and trailing whitespace using the default cutset
# [[processors.strings.trim]]
# field = "message"
+ ## Trim leading characters in cutset
# [[processors.strings.trim_left]]
# field = "message"
# cutset = "\t"
+ ## Trim trailing characters in cutset
# [[processors.strings.trim_right]]
# field = "message"
# cutset = "\r\n"
+ ## Trim the given prefix from the field
# [[processors.strings.trim_prefix]]
# field = "my_value"
# prefix = "my_"
+ ## Trim the given suffix from the field
# [[processors.strings.trim_suffix]]
# field = "read_count"
# suffix = "_count"
+ ## Replace all non-overlapping instances of old with new
# [[processors.strings.replace]]
# measurement = "*"
# old = ":"
# new = "_"
+
+ ## Trims strings based on width
+ # [[processors.strings.left]]
+ # field = "message"
+ # width = 10
+
+ ## Decode a base64 encoded utf-8 string
+ # [[processors.strings.base64decode]]
+ # field = "message"
```
#### Trim, TrimLeft, TrimRight
@@ -79,10 +104,10 @@ the operation and keep the old name.
```toml
[[processors.strings]]
[[processors.strings.lowercase]]
- field = "uri-stem"
+ tag = "uri_stem"
[[processors.strings.trim_prefix]]
- field = "uri_stem"
+ tag = "uri_stem"
prefix = "/api/"
[[processors.strings.uppercase]]
@@ -92,10 +117,33 @@ the operation and keep the old name.
**Input**
```
-iis_log,method=get,uri_stem=/API/HealthCheck cs-host="MIXEDCASE_host",referrer="-",ident="-",http_version=1.1,agent="UserAgent",resp_bytes=270i 1519652321000000000
+iis_log,method=get,uri_stem=/API/HealthCheck cs-host="MIXEDCASE_host",http_version=1.1 1519652321000000000
+```
+
+**Output**
+```
+iis_log,method=get,uri_stem=healthcheck cs-host="MIXEDCASE_host",http_version=1.1,cs-host_normalised="MIXEDCASE_HOST" 1519652321000000000
+```
+
+### Example 2
+**Config**
+```toml
+[[processors.strings]]
+ [[processors.strings.lowercase]]
+ tag_key = "URI-Stem"
+
+ [[processors.strings.replace]]
+ tag_key = "uri-stem"
+ old = "-"
+ new = "_"
+```
+
+**Input**
+```
+iis_log,URI-Stem=/API/HealthCheck http_version=1.1 1519652321000000000
```
**Output**
```
-iis_log,method=get,uri_stem=healthcheck cs-host="MIXEDCASE_host",cs-host_normalised="MIXEDCASE_HOST",referrer="-",ident="-",http_version=1.1,agent="UserAgent",resp_bytes=270i 1519652321000000000
+iis_log,uri_stem=/API/HealthCheck http_version=1.1 1519652321000000000
```
diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go
index 00c7d99b118c9..1ac6c61019c6f 100644
--- a/plugins/processors/strings/strings.go
+++ b/plugins/processors/strings/strings.go
@@ -1,22 +1,27 @@
package strings
import (
+ "encoding/base64"
"strings"
"unicode"
+ "unicode/utf8"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
type Strings struct {
- Lowercase []converter `toml:"lowercase"`
- Uppercase []converter `toml:"uppercase"`
- Trim []converter `toml:"trim"`
- TrimLeft []converter `toml:"trim_left"`
- TrimRight []converter `toml:"trim_right"`
- TrimPrefix []converter `toml:"trim_prefix"`
- TrimSuffix []converter `toml:"trim_suffix"`
- Replace []converter `toml:"replace"`
+ Lowercase []converter `toml:"lowercase"`
+ Uppercase []converter `toml:"uppercase"`
+ Titlecase []converter `toml:"titlecase"`
+ Trim []converter `toml:"trim"`
+ TrimLeft []converter `toml:"trim_left"`
+ TrimRight []converter `toml:"trim_right"`
+ TrimPrefix []converter `toml:"trim_prefix"`
+ TrimSuffix []converter `toml:"trim_suffix"`
+ Replace []converter `toml:"replace"`
+ Left []converter `toml:"left"`
+ Base64Decode []converter `toml:"base64decode"`
converters []converter
init bool
@@ -26,7 +31,9 @@ type ConvertFunc func(s string) string
type converter struct {
Field string
+ FieldKey string
Tag string
+ TagKey string
Measurement string
Dest string
Cutset string
@@ -34,6 +41,7 @@ type converter struct {
Prefix string
Old string
New string
+ Width int
fn ConvertFunc
}
@@ -48,6 +56,10 @@ const sampleConfig = `
# field = "uri_stem"
# dest = "uri_stem_normalised"
+ ## Convert a field value to titlecase
+ # [[processors.strings.titlecase]]
+ # field = "status"
+
## Trim leading and trailing whitespace using the default cutset
# [[processors.strings.trim]]
# field = "message"
@@ -77,6 +89,15 @@ const sampleConfig = `
# measurement = "*"
# old = ":"
# new = "_"
+
+ ## Trims strings based on width
+ # [[processors.strings.left]]
+ # field = "message"
+ # width = 10
+
+ ## Decode a base64 encoded utf-8 string
+ # [[processors.strings.base64decode]]
+ # field = "message"
`
func (s *Strings) SampleConfig() string {
@@ -109,6 +130,27 @@ func (c *converter) convertTag(metric telegraf.Metric) {
}
}
+func (c *converter) convertTagKey(metric telegraf.Metric) {
+ var tags map[string]string
+ if c.TagKey == "*" {
+ tags = metric.Tags()
+ } else {
+ tags = make(map[string]string)
+ tv, ok := metric.GetTag(c.TagKey)
+ if !ok {
+ return
+ }
+ tags[c.TagKey] = tv
+ }
+
+ for key, value := range tags {
+ if k := c.fn(key); k != "" {
+ metric.RemoveTag(key)
+ metric.AddTag(k, value)
+ }
+ }
+}
+
func (c *converter) convertField(metric telegraf.Metric) {
var fields map[string]interface{}
if c.Field == "*" {
@@ -133,6 +175,27 @@ func (c *converter) convertField(metric telegraf.Metric) {
}
}
+func (c *converter) convertFieldKey(metric telegraf.Metric) {
+ var fields map[string]interface{}
+ if c.FieldKey == "*" {
+ fields = metric.Fields()
+ } else {
+ fields = make(map[string]interface{})
+ fv, ok := metric.GetField(c.FieldKey)
+ if !ok {
+ return
+ }
+ fields[c.FieldKey] = fv
+ }
+
+ for key, value := range fields {
+ if k := c.fn(key); k != "" {
+ metric.RemoveField(key)
+ metric.AddField(k, value)
+ }
+ }
+}
+
func (c *converter) convertMeasurement(metric telegraf.Metric) {
if metric.Name() != c.Measurement && c.Measurement != "*" {
return
@@ -146,10 +209,18 @@ func (c *converter) convert(metric telegraf.Metric) {
c.convertField(metric)
}
+ if c.FieldKey != "" {
+ c.convertFieldKey(metric)
+ }
+
if c.Tag != "" {
c.convertTag(metric)
}
+ if c.TagKey != "" {
+ c.convertTagKey(metric)
+ }
+
if c.Measurement != "" {
c.convertMeasurement(metric)
}
@@ -169,6 +240,10 @@ func (s *Strings) initOnce() {
c.fn = strings.ToUpper
s.converters = append(s.converters, c)
}
+ for _, c := range s.Titlecase {
+ c.fn = strings.Title
+ s.converters = append(s.converters, c)
+ }
for _, c := range s.Trim {
c := c
if c.Cutset != "" {
@@ -218,6 +293,31 @@ func (s *Strings) initOnce() {
}
s.converters = append(s.converters, c)
}
+ for _, c := range s.Left {
+ c := c
+ c.fn = func(s string) string {
+ if len(s) < c.Width {
+ return s
+ } else {
+ return s[:c.Width]
+ }
+ }
+ s.converters = append(s.converters, c)
+ }
+ for _, c := range s.Base64Decode {
+ c := c
+ c.fn = func(s string) string {
+ data, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return s
+ }
+ if utf8.Valid(data) {
+ return string(data)
+ }
+ return s
+ }
+ s.converters = append(s.converters, c)
+ }
s.init = true
}
diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go
index e108c04f72374..2c1be510ef9b6 100644
--- a/plugins/processors/strings/strings_test.go
+++ b/plugins/processors/strings/strings_test.go
@@ -6,6 +6,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -25,6 +26,22 @@ func newM1() telegraf.Metric {
return m1
}
+func newM2() telegraf.Metric {
+ m1, _ := metric.New("IIS_log",
+ map[string]string{
+ "verb": "GET",
+ "S-ComputerName": "MIXEDCASE_hostname",
+ },
+ map[string]interface{}{
+ "Request": "/mixed/CASE/paTH/?from=-1D&to=now",
+ "req/sec": 5,
+ " whitespace ": " whitespace\t",
+ },
+ time.Now(),
+ )
+ return m1
+}
+
func TestFieldConversions(t *testing.T) {
tests := []struct {
name string
@@ -61,6 +78,21 @@ func TestFieldConversions(t *testing.T) {
require.Equal(t, "/MIXED/CASE/PATH/?FROM=-1D&TO=NOW", fv)
},
},
+ {
+ name: "Should change existing field to titlecase",
+ plugin: &Strings{
+ Titlecase: []converter{
+ {
+ Field: "request",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("request")
+ require.True(t, ok)
+ require.Equal(t, "/Mixed/CASE/PaTH/?From=-1D&To=Now", fv)
+ },
+ },
{
name: "Should add new lowercase field",
plugin: &Strings{
@@ -253,6 +285,259 @@ func TestFieldConversions(t *testing.T) {
}
}
+func TestFieldKeyConversions(t *testing.T) {
+ tests := []struct {
+ name string
+ plugin *Strings
+ check func(t *testing.T, actual telegraf.Metric)
+ }{
+ {
+ name: "Should change existing field key to lowercase",
+ plugin: &Strings{
+ Lowercase: []converter{
+ {
+ FieldKey: "Request",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("request")
+ require.True(t, ok)
+ require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv)
+ },
+ },
+ {
+ name: "Should change existing field key to uppercase",
+ plugin: &Strings{
+ Uppercase: []converter{
+ {
+ FieldKey: "Request",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("Request")
+ require.False(t, ok)
+
+ fv, ok = actual.GetField("REQUEST")
+ require.True(t, ok)
+ require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv)
+ },
+ },
+ {
+ name: "Should trim from both sides",
+ plugin: &Strings{
+ Trim: []converter{
+ {
+ FieldKey: "Request",
+ Cutset: "eR",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("quest")
+ require.True(t, ok)
+ require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv)
+ },
+ },
+ {
+ name: "Should trim from both sides but not make lowercase",
+ plugin: &Strings{
+ // Tag/field key multiple executions occur in the following order: (initOnce)
+ // Lowercase
+ // Uppercase
+ // Titlecase
+ // Trim
+ // TrimLeft
+ // TrimRight
+ // TrimPrefix
+ // TrimSuffix
+ // Replace
+ Lowercase: []converter{
+ {
+ FieldKey: "Request",
+ },
+ },
+ Trim: []converter{
+ {
+ FieldKey: "request",
+ Cutset: "tse",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("requ")
+ require.True(t, ok)
+ require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv)
+ },
+ },
+ {
+ name: "Should trim from left side",
+ plugin: &Strings{
+ TrimLeft: []converter{
+ {
+ FieldKey: "req/sec",
+ Cutset: "req/",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("sec")
+ require.True(t, ok)
+ require.Equal(t, int64(5), fv)
+ },
+ },
+ {
+ name: "Should trim from right side",
+ plugin: &Strings{
+ TrimRight: []converter{
+ {
+ FieldKey: "req/sec",
+ Cutset: "req/",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("req/sec")
+ require.True(t, ok)
+ require.Equal(t, int64(5), fv)
+ },
+ },
+ {
+ name: "Should trim prefix 'req/'",
+ plugin: &Strings{
+ TrimPrefix: []converter{
+ {
+ FieldKey: "req/sec",
+ Prefix: "req/",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("sec")
+ require.True(t, ok)
+ require.Equal(t, int64(5), fv)
+ },
+ },
+ {
+ name: "Should trim suffix '/sec'",
+ plugin: &Strings{
+ TrimSuffix: []converter{
+ {
+ FieldKey: "req/sec",
+ Suffix: "/sec",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("req")
+ require.True(t, ok)
+ require.Equal(t, int64(5), fv)
+ },
+ },
+ {
+ name: "Trim without cutset removes whitespace",
+ plugin: &Strings{
+ Trim: []converter{
+ {
+ FieldKey: " whitespace ",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("whitespace")
+ require.True(t, ok)
+ require.Equal(t, " whitespace\t", fv)
+ },
+ },
+ {
+ name: "Trim left without cutset removes whitespace",
+ plugin: &Strings{
+ TrimLeft: []converter{
+ {
+ FieldKey: " whitespace ",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("whitespace ")
+ require.True(t, ok)
+ require.Equal(t, " whitespace\t", fv)
+ },
+ },
+ {
+ name: "Trim right without cutset removes whitespace",
+ plugin: &Strings{
+ TrimRight: []converter{
+ {
+ FieldKey: " whitespace ",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField(" whitespace")
+ require.True(t, ok)
+ require.Equal(t, " whitespace\t", fv)
+ },
+ },
+ {
+ name: "No change if field missing",
+ plugin: &Strings{
+ Lowercase: []converter{
+ {
+ FieldKey: "xyzzy",
+ Suffix: "-1D&to=now",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("Request")
+ require.True(t, ok)
+ require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv)
+ },
+ },
+ {
+ name: "Should trim the existing field to 6 characters",
+ plugin: &Strings{
+ Left: []converter{
+ {
+ Field: "Request",
+ Width: 6,
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("Request")
+ require.True(t, ok)
+ require.Equal(t, "/mixed", fv)
+ },
+ },
+ {
+ name: "Should do nothing to the string",
+ plugin: &Strings{
+ Left: []converter{
+ {
+ Field: "Request",
+ Width: 600,
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ fv, ok := actual.GetField("Request")
+ require.True(t, ok)
+ require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv)
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ metrics := tt.plugin.Apply(newM2())
+ require.Len(t, metrics, 1)
+ tt.check(t, metrics[0])
+ })
+ }
+}
+
func TestTagConversions(t *testing.T) {
tests := []struct {
name string
@@ -326,6 +611,30 @@ func TestTagConversions(t *testing.T) {
require.Equal(t, "MIXEDCASE_HOSTNAME", tv)
},
},
+ {
+ name: "Should add new titlecase tag",
+ plugin: &Strings{
+ Titlecase: []converter{
+ {
+ Tag: "s-computername",
+ Dest: "s-computername_titlecase",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ tv, ok := actual.GetTag("verb")
+ require.True(t, ok)
+ require.Equal(t, "GET", tv)
+
+ tv, ok = actual.GetTag("s-computername")
+ require.True(t, ok)
+ require.Equal(t, "MIXEDCASE_hostname", tv)
+
+ tv, ok = actual.GetTag("s-computername_titlecase")
+ require.True(t, ok)
+ require.Equal(t, "MIXEDCASE_hostname", tv)
+ },
+ },
}
for _, tt := range tests {
@@ -337,6 +646,87 @@ func TestTagConversions(t *testing.T) {
}
}
+func TestTagKeyConversions(t *testing.T) {
+ tests := []struct {
+ name string
+ plugin *Strings
+ check func(t *testing.T, actual telegraf.Metric)
+ }{
+ {
+ name: "Should change existing tag key to lowercase",
+ plugin: &Strings{
+ Lowercase: []converter{
+ {
+ Tag: "S-ComputerName",
+ TagKey: "S-ComputerName",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ tv, ok := actual.GetTag("verb")
+ require.True(t, ok)
+ require.Equal(t, "GET", tv)
+
+ tv, ok = actual.GetTag("s-computername")
+ require.True(t, ok)
+ require.Equal(t, "mixedcase_hostname", tv)
+ },
+ },
+ {
+ name: "Should add new lowercase tag key",
+ plugin: &Strings{
+ Lowercase: []converter{
+ {
+ TagKey: "S-ComputerName",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ tv, ok := actual.GetTag("verb")
+ require.True(t, ok)
+ require.Equal(t, "GET", tv)
+
+ tv, ok = actual.GetTag("S-ComputerName")
+ require.False(t, ok)
+
+ tv, ok = actual.GetTag("s-computername")
+ require.True(t, ok)
+ require.Equal(t, "MIXEDCASE_hostname", tv)
+ },
+ },
+ {
+ name: "Should add new uppercase tag key",
+ plugin: &Strings{
+ Uppercase: []converter{
+ {
+ TagKey: "S-ComputerName",
+ },
+ },
+ },
+ check: func(t *testing.T, actual telegraf.Metric) {
+ tv, ok := actual.GetTag("verb")
+ require.True(t, ok)
+ require.Equal(t, "GET", tv)
+
+ tv, ok = actual.GetTag("S-ComputerName")
+ require.False(t, ok)
+
+ tv, ok = actual.GetTag("S-COMPUTERNAME")
+ require.True(t, ok)
+ require.Equal(t, "MIXEDCASE_hostname", tv)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ metrics := tt.plugin.Apply(newM2())
+ require.Len(t, metrics, 1)
+ tt.check(t, metrics[0])
+ })
+ }
+}
+
func TestMeasurementConversions(t *testing.T) {
tests := []struct {
name string
@@ -386,6 +776,11 @@ func TestMultipleConversions(t *testing.T) {
Tag: "verb",
},
},
+ Titlecase: []converter{
+ {
+ Field: "status",
+ },
+ },
Replace: []converter{
{
Tag: "foo",
@@ -413,6 +808,7 @@ func TestMultipleConversions(t *testing.T) {
"cs-host": "AAAbbb",
"ignore_number": int64(200),
"ignore_bool": true,
+ "status": "green",
},
time.Now(),
)
@@ -425,6 +821,7 @@ func TestMultipleConversions(t *testing.T) {
"ignore_bool": true,
"cs-host": "AAAbbb",
"cs-host_lowercase": "aaabbb",
+ "status": "Green",
}
expectedTags := map[string]string{
"verb": "GET",
@@ -543,3 +940,110 @@ func TestMeasurementCharDeletion(t *testing.T) {
assert.Equal(t, "foofoofoo", results[1].Name(), "Should have refused to delete the whole string")
assert.Equal(t, "barbarbar", results[2].Name(), "Should not have changed the input")
}
+
+func TestBase64Decode(t *testing.T) {
+ tests := []struct {
+ name string
+ plugin *Strings
+ metric []telegraf.Metric
+ expected []telegraf.Metric
+ }{
+ {
+ name: "base64decode success",
+ plugin: &Strings{
+ Base64Decode: []converter{
+ {
+ Field: "message",
+ },
+ },
+ },
+ metric: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "message": "aG93ZHk=",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "message": "howdy",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "base64decode not valid base64 returns original string",
+ plugin: &Strings{
+ Base64Decode: []converter{
+ {
+ Field: "message",
+ },
+ },
+ },
+ metric: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "message": "_not_base64_",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "message": "_not_base64_",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ {
+ name: "base64decode not valid utf-8 returns original string",
+ plugin: &Strings{
+ Base64Decode: []converter{
+ {
+ Field: "message",
+ },
+ },
+ },
+ metric: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "message": "//5oAG8AdwBkAHkA",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "message": "//5oAG8AdwBkAHkA",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := tt.plugin.Apply(tt.metric...)
+ testutil.RequireMetricsEqual(t, tt.expected, actual)
+ })
+ }
+}
diff --git a/plugins/processors/tag_limit/README.md b/plugins/processors/tag_limit/README.md
new file mode 100644
index 0000000000000..b287f0f8d4df3
--- /dev/null
+++ b/plugins/processors/tag_limit/README.md
@@ -0,0 +1,27 @@
+# Tag Limit Processor Plugin
+
+Use the `tag_limit` processor to ensure that only a certain number of tags are
+preserved for any given metric, and to choose the tags to preserve when the
+number of tags appended by the data source is over the limit.
+
+This can be useful when dealing with output systems (e.g. Stackdriver) that
+impose hard limits on the number of tags/labels per metric or where high
+levels of cardinality are computationally and/or financially expensive.
+
+### Configuration
+
+```toml
+[[processors.tag_limit]]
+ ## Maximum number of tags to preserve
+ limit = 3
+
+ ## List of tags to preferentially preserve
+ keep = ["environment", "region"]
+```
+
+### Example
+
+```diff
++ throughput month=Jun,environment=qa,region=us-east1,lower=10i,upper=1000i,mean=500i 1560540094000000000
++ throughput environment=qa,region=us-east1,lower=10i 1560540094000000000
+```
diff --git a/plugins/processors/tag_limit/tag_limit.go b/plugins/processors/tag_limit/tag_limit.go
new file mode 100644
index 0000000000000..41353a8f863c4
--- /dev/null
+++ b/plugins/processors/tag_limit/tag_limit.go
@@ -0,0 +1,86 @@
+package taglimit
+
+import (
+ "fmt"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+ "log"
+)
+
+const sampleConfig = `
+ ## Maximum number of tags to preserve
+ limit = 10
+
+ ## List of tags to preferentially preserve
+ keep = ["foo", "bar", "baz"]
+`
+
+type TagLimit struct {
+ Limit int `toml:"limit"`
+ Keep []string `toml:"keep"`
+ init bool
+ keepTags map[string]string
+}
+
+func (d *TagLimit) SampleConfig() string {
+ return sampleConfig
+}
+
+func (d *TagLimit) Description() string {
+ return "Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit."
+}
+
+func (d *TagLimit) initOnce() error {
+ if d.init {
+ return nil
+ }
+ if len(d.Keep) > d.Limit {
+ return fmt.Errorf("%d keep tags is greater than %d total tag limit", len(d.Keep), d.Limit)
+ }
+ d.keepTags = make(map[string]string)
+ // convert list of tags-to-keep to a map so we can do constant-time lookups
+ for _, tag_key := range d.Keep {
+ d.keepTags[tag_key] = ""
+ }
+ d.init = true
+ return nil
+}
+
+func (d *TagLimit) Apply(in ...telegraf.Metric) []telegraf.Metric {
+ err := d.initOnce()
+ if err != nil {
+ log.Printf("E! [processors.tag_limit] could not create tag_limit processor: %v", err)
+ return in
+ }
+ for _, point := range in {
+ pointOriginalTags := point.TagList()
+ lenPointTags := len(pointOriginalTags)
+ if lenPointTags <= d.Limit {
+ continue
+ }
+ tagsToRemove := make([]string, lenPointTags-d.Limit)
+ removeIdx := 0
+ // remove extraneous tags, stop once we're at the limit
+ for _, t := range pointOriginalTags {
+ if _, ok := d.keepTags[t.Key]; !ok {
+ tagsToRemove[removeIdx] = t.Key
+ removeIdx++
+ lenPointTags--
+ }
+ if lenPointTags <= d.Limit {
+ break
+ }
+ }
+ for _, t := range tagsToRemove {
+ point.RemoveTag(t)
+ }
+ }
+
+ return in
+}
+
+func init() {
+ processors.Add("tag_limit", func() telegraf.Processor {
+ return &TagLimit{}
+ })
+}
diff --git a/plugins/processors/tag_limit/tag_limit_test.go b/plugins/processors/tag_limit/tag_limit_test.go
new file mode 100644
index 0000000000000..9412d866b78e8
--- /dev/null
+++ b/plugins/processors/tag_limit/tag_limit_test.go
@@ -0,0 +1,86 @@
+package taglimit
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/metric"
+ "github.com/stretchr/testify/assert"
+)
+
+func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric {
+ if tags == nil {
+ tags = map[string]string{}
+ }
+ if fields == nil {
+ fields = map[string]interface{}{}
+ }
+ m, _ := metric.New(name, tags, fields, metricTime)
+ return m
+}
+
+func TestUnderLimit(t *testing.T) {
+ currentTime := time.Now()
+
+ oneTags := make(map[string]string)
+ oneTags["foo"] = "bar"
+
+ tenTags := make(map[string]string)
+ tenTags["a"] = "bar"
+ tenTags["b"] = "bar"
+ tenTags["c"] = "bar"
+ tenTags["d"] = "bar"
+ tenTags["e"] = "bar"
+ tenTags["f"] = "bar"
+ tenTags["g"] = "bar"
+ tenTags["h"] = "bar"
+ tenTags["i"] = "bar"
+ tenTags["j"] = "bar"
+
+ tagLimitConfig := TagLimit{
+ Limit: 10,
+ Keep: []string{"foo", "bar"},
+ }
+
+ m1 := MustMetric("foo", oneTags, nil, currentTime)
+ m2 := MustMetric("bar", tenTags, nil, currentTime)
+ limitApply := tagLimitConfig.Apply(m1, m2)
+ assert.Equal(t, oneTags, limitApply[0].Tags(), "one tag")
+ assert.Equal(t, tenTags, limitApply[1].Tags(), "ten tags")
+}
+
+func TestTrim(t *testing.T) {
+ currentTime := time.Now()
+
+ threeTags := make(map[string]string)
+ threeTags["a"] = "foo"
+ threeTags["b"] = "bar"
+ threeTags["z"] = "baz"
+
+ tenTags := make(map[string]string)
+ tenTags["a"] = "foo"
+ tenTags["b"] = "bar"
+ tenTags["c"] = "baz"
+ tenTags["d"] = "abc"
+ tenTags["e"] = "def"
+ tenTags["f"] = "ghi"
+ tenTags["g"] = "jkl"
+ tenTags["h"] = "mno"
+ tenTags["i"] = "pqr"
+ tenTags["j"] = "stu"
+
+ tagLimitConfig := TagLimit{
+ Limit: 3,
+ Keep: []string{"a", "b"},
+ }
+
+ m1 := MustMetric("foo", threeTags, nil, currentTime)
+ m2 := MustMetric("bar", tenTags, nil, currentTime)
+ limitApply := tagLimitConfig.Apply(m1, m2)
+ assert.Equal(t, threeTags, limitApply[0].Tags(), "three tags")
+ trimmedTags := limitApply[1].Tags()
+ assert.Equal(t, 3, len(trimmedTags), "ten tags")
+ assert.Equal(t, "foo", trimmedTags["a"], "preserved: a")
+ assert.Equal(t, "bar", trimmedTags["b"], "preserved: b")
+}
diff --git a/plugins/processors/template/README.md b/plugins/processors/template/README.md
new file mode 100644
index 0000000000000..348dae096b0c2
--- /dev/null
+++ b/plugins/processors/template/README.md
@@ -0,0 +1,59 @@
+# Template Processor
+
+The `template` processor applies a Go template to metrics to generate a new
+tag. The primary use case of this plugin is to create a tag that can be used
+for dynamic routing to multiple output plugins or using an output specific
+routing option.
+
+The template has access to each metric's measurement name, tags, fields, and
+timestamp using the [interface in `/template_metric.go`](template_metric.go).
+
+Read the full [Go Template Documentation][].
+
+### Configuration
+
+```toml
+[[processors.template]]
+ ## Tag to set with the output of the template.
+ tag = "topic"
+
+ ## Go template used to create the tag value. In order to ease TOML
+ ## escaping requirements, you may wish to use single quotes around the
+ ## template string.
+ template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'
+```
+
+### Example
+
+Combine multiple tags to create a single tag:
+```toml
+[[processors.template]]
+ tag = "topic"
+ template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'
+```
+
+```diff
+- cpu,level=debug,hostname=localhost time_idle=42
++ cpu,level=debug,hostname=localhost,topic=localhost.debug time_idle=42
+```
+
+Add measurement name as a tag:
+```toml
+[[processors.template]]
+ tag = "measurement"
+ template = '{{ .Name }}'
+```
+
+```diff
+- cpu,hostname=localhost time_idle=42
++ cpu,hostname=localhost,measurement=cpu time_idle=42
+```
+
+Add the year as a tag, similar to the date processor:
+```toml
+[[processors.template]]
+ tag = "year"
+ template = '{{.Time.UTC.Year}}'
+```
+
+[Go Template Documentation]: https://golang.org/pkg/text/template/
diff --git a/plugins/processors/template/template.go b/plugins/processors/template/template.go
new file mode 100644
index 0000000000000..f4470a07c51a3
--- /dev/null
+++ b/plugins/processors/template/template.go
@@ -0,0 +1,66 @@
+package template
+
+import (
+ "strings"
+ "text/template"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+type TemplateProcessor struct {
+ Tag string `toml:"tag"`
+ Template string `toml:"template"`
+ Log telegraf.Logger `toml:"-"`
+ tmpl *template.Template
+}
+
+const sampleConfig = `
+ ## Tag to set with the output of the template.
+ tag = "topic"
+
+ ## Go template used to create the tag value. In order to ease TOML
+ ## escaping requirements, you may wish to use single quotes around the
+ ## template string.
+ template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'
+`
+
+func (r *TemplateProcessor) SampleConfig() string {
+ return sampleConfig
+}
+
+func (r *TemplateProcessor) Description() string {
+ return "Uses a Go template to create a new tag"
+}
+
+func (r *TemplateProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
+ // for each metric in "in" array
+ for _, metric := range in {
+ var b strings.Builder
+ newM := TemplateMetric{metric}
+
+ // supply TemplateMetric and Template from configuration to Template.Execute
+ err := r.tmpl.Execute(&b, &newM)
+ if err != nil {
+ r.Log.Errorf("failed to execute template: %v", err)
+ continue
+ }
+
+ metric.AddTag(r.Tag, b.String())
+ }
+ return in
+}
+
+func (r *TemplateProcessor) Init() error {
+ // create template
+ t, err := template.New("configured_template").Parse(r.Template)
+
+ r.tmpl = t
+ return err
+}
+
+func init() {
+ processors.Add("template", func() telegraf.Processor {
+ return &TemplateProcessor{}
+ })
+}
diff --git a/plugins/processors/template/template_metric.go b/plugins/processors/template/template_metric.go
new file mode 100644
index 0000000000000..e4a81bd1c4779
--- /dev/null
+++ b/plugins/processors/template/template_metric.go
@@ -0,0 +1,29 @@
+package template
+
+import (
+ "time"
+
+ "github.com/influxdata/telegraf"
+)
+
+type TemplateMetric struct {
+ metric telegraf.Metric
+}
+
+func (m *TemplateMetric) Name() string {
+ return m.metric.Name()
+}
+
+func (m *TemplateMetric) Tag(key string) string {
+ tagString, _ := m.metric.GetTag(key)
+ return tagString
+}
+
+func (m *TemplateMetric) Field(key string) interface{} {
+ field, _ := m.metric.GetField(key)
+ return field
+}
+
+func (m *TemplateMetric) Time() time.Time {
+ return m.metric.Time()
+}
diff --git a/plugins/processors/template/template_test.go b/plugins/processors/template/template_test.go
new file mode 100644
index 0000000000000..f43d697956193
--- /dev/null
+++ b/plugins/processors/template/template_test.go
@@ -0,0 +1,117 @@
+package template
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestName(t *testing.T) {
+ plugin := TemplateProcessor{
+ Tag: "measurement",
+ Template: "{{ .Name }}",
+ }
+
+ err := plugin.Init()
+ require.NoError(t, err)
+
+ input := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ actual := plugin.Apply(input...)
+ expected := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "measurement": "cpu",
+ },
+ map[string]interface{}{
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+ testutil.RequireMetricsEqual(t, expected, actual)
+}
+
+func TestTagTemplateConcatenate(t *testing.T) {
+ now := time.Now()
+
+ // Create Template processor
+ tmp := TemplateProcessor{Tag: "topic", Template: `{{.Tag "hostname"}}.{{ .Tag "level" }}`}
+ // manually init
+ err := tmp.Init()
+
+ if err != nil {
+ panic(err)
+ }
+
+ // create metric for testing
+ input := []telegraf.Metric{testutil.MustMetric("Tags", map[string]string{"hostname": "localhost", "level": "debug"}, nil, now)}
+
+ // act
+ actual := tmp.Apply(input[0])
+
+ // assert
+ expected := []telegraf.Metric{testutil.MustMetric("Tags", map[string]string{"hostname": "localhost", "level": "debug", "topic": "localhost.debug"}, nil, now)}
+ testutil.RequireMetricsEqual(t, expected, actual)
+}
+
+func TestMetricMissingTagsIsNotLost(t *testing.T) {
+ now := time.Now()
+
+ // Create Template processor
+ tmp := TemplateProcessor{Tag: "topic", Template: `{{.Tag "hostname"}}.{{ .Tag "level" }}`}
+ // manually init
+ err := tmp.Init()
+
+ if err != nil {
+ panic(err)
+ }
+
+ // create metrics for testing
+ m1 := testutil.MustMetric("Works", map[string]string{"hostname": "localhost", "level": "debug"}, nil, now)
+ m2 := testutil.MustMetric("Fails", map[string]string{"hostname": "localhost"}, nil, now)
+
+ // act
+ actual := tmp.Apply(m1, m2)
+
+ // assert
+ // make sure no metrics are lost when a template process fails
+ assert.Equal(t, 2, len(actual), "Number of metrics input should equal number of metrics output")
+}
+
+func TestTagAndFieldConcatenate(t *testing.T) {
+ now := time.Now()
+
+ // Create Template processor
+ tmp := TemplateProcessor{Tag: "LocalTemp", Template: `{{.Tag "location"}} is {{ .Field "temperature" }}`}
+ // manually init
+ err := tmp.Init()
+
+ if err != nil {
+ panic(err)
+ }
+
+ // create metric for testing
+ m1 := testutil.MustMetric("weather", map[string]string{"location": "us-midwest"}, map[string]interface{}{"temperature": "too warm"}, now)
+
+ // act
+ actual := tmp.Apply(m1)
+
+ // assert
+ expected := []telegraf.Metric{testutil.MustMetric("weather", map[string]string{"location": "us-midwest", "LocalTemp": "us-midwest is too warm"}, map[string]interface{}{"temperature": "too warm"}, now)}
+ testutil.RequireMetricsEqual(t, expected, actual)
+}
diff --git a/plugins/processors/topk/README.md b/plugins/processors/topk/README.md
index 15046991d7ff1..308d4f9f85f05 100644
--- a/plugins/processors/topk/README.md
+++ b/plugins/processors/topk/README.md
@@ -53,7 +53,7 @@ Note that depending on the amount of metrics on each computed bucket, more than
# add_rank_fields = []
## These settings provide a way to know what values the plugin is generating
- ## when aggregating metrics. The 'add_agregate_field' setting allows to
+ ## when aggregating metrics. The 'add_aggregate_field' setting allows to
## specify for which fields the final aggregation value is required. If the
## list is non empty, then a field will be added to each every metric for
## each field present in this setting. This field will contain
diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go
index c2244c6e32dad..907ec1cc41fc6 100644
--- a/plugins/processors/topk/topk.go
+++ b/plugins/processors/topk/topk.go
@@ -90,7 +90,7 @@ var sampleConfig = `
# add_rank_fields = []
## These settings provide a way to know what values the plugin is generating
- ## when aggregating metrics. The 'add_agregate_field' setting allows to
+ ## when aggregating metrics. The 'add_aggregate_field' setting allows to
## specify for which fields the final aggregation value is required. If the
## list is non empty, then a field will be added to each every metric for
## each field present in this setting. This field will contain
diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go
index ff0eb4d8b1844..928111b29d7da 100644
--- a/plugins/processors/topk/topk_test.go
+++ b/plugins/processors/topk/topk_test.go
@@ -35,7 +35,7 @@ type metricChange struct {
newTags []tag // Tags that should be added to the metric
runHash bool // Sometimes the metrics' HashID must be run so reflect.DeepEqual works
- // This happens because telegraf.Metric mantains an internal cache of
+ // This happens because telegraf.Metric maintains an internal cache of
// its hash value that is set when HashID() is called for the first time
}
@@ -149,7 +149,7 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) {
aggregators := []string{"mean", "sum", "max", "min"}
- //The answer is equal to the original set for these particual scenarios
+ //The answer is equal to the original set for these particular scenarios
input := MetricsSet1
answer := MetricsSet1
diff --git a/plugins/processors/unpivot/README.md b/plugins/processors/unpivot/README.md
new file mode 100644
index 0000000000000..beee6c276608a
--- /dev/null
+++ b/plugins/processors/unpivot/README.md
@@ -0,0 +1,26 @@
+# Unpivot Processor
+
+You can use the `unpivot` processor to rotate a multi field series into single valued metrics. This transformation often results in data that is more easy to aggregate across fields.
+
+To perform the reverse operation use the [pivot] processor.
+
+### Configuration
+
+```toml
+[[processors.unpivot]]
+ ## Tag to use for the name.
+ tag_key = "name"
+ ## Field to use for the name of the value.
+ value_key = "value"
+```
+
+### Example
+
+```diff
+- cpu,cpu=cpu0 time_idle=42i,time_user=43i
++ cpu,cpu=cpu0,name=time_idle value=42i
++ cpu,cpu=cpu0,name=time_user value=43i
+```
+
+[pivot]: /plugins/processors/pivot/README.md
+
diff --git a/plugins/processors/unpivot/unpivot.go b/plugins/processors/unpivot/unpivot.go
new file mode 100644
index 0000000000000..4a081a428d403
--- /dev/null
+++ b/plugins/processors/unpivot/unpivot.go
@@ -0,0 +1,71 @@
+package unpivot
+
+import (
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/plugins/processors"
+)
+
+const (
+ description = "Rotate multi field metric into several single field metrics"
+ sampleConfig = `
+ ## Tag to use for the name.
+ tag_key = "name"
+ ## Field to use for the name of the value.
+ value_key = "value"
+`
+)
+
+type Unpivot struct {
+ TagKey string `toml:"tag_key"`
+ ValueKey string `toml:"value_key"`
+}
+
+func (p *Unpivot) SampleConfig() string {
+ return sampleConfig
+}
+
+func (p *Unpivot) Description() string {
+ return description
+}
+
+func copyWithoutFields(metric telegraf.Metric) telegraf.Metric {
+ m := metric.Copy()
+
+ fieldKeys := make([]string, 0, len(m.FieldList()))
+ for _, field := range m.FieldList() {
+ fieldKeys = append(fieldKeys, field.Key)
+ }
+
+ for _, fk := range fieldKeys {
+ m.RemoveField(fk)
+ }
+
+ return m
+}
+
+func (p *Unpivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
+ fieldCount := 0
+ for _, m := range metrics {
+ fieldCount += len(m.FieldList())
+ }
+
+ results := make([]telegraf.Metric, 0, fieldCount)
+
+ for _, m := range metrics {
+ base := copyWithoutFields(m)
+ for _, field := range m.FieldList() {
+ newMetric := base.Copy()
+ newMetric.AddField(p.ValueKey, field.Value)
+ newMetric.AddTag(p.TagKey, field.Key)
+ results = append(results, newMetric)
+ }
+ m.Accept()
+ }
+ return results
+}
+
+func init() {
+ processors.Add("unpivot", func() telegraf.Processor {
+ return &Unpivot{}
+ })
+}
diff --git a/plugins/processors/unpivot/unpivot_test.go b/plugins/processors/unpivot/unpivot_test.go
new file mode 100644
index 0000000000000..a3a5385031661
--- /dev/null
+++ b/plugins/processors/unpivot/unpivot_test.go
@@ -0,0 +1,90 @@
+package unpivot
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+)
+
+func TestUnpivot(t *testing.T) {
+ now := time.Now()
+ tests := []struct {
+ name string
+ unpivot *Unpivot
+ metrics []telegraf.Metric
+ expected []telegraf.Metric
+ }{
+ {
+ name: "simple",
+ unpivot: &Unpivot{
+ TagKey: "name",
+ ValueKey: "value",
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "idle_time": int64(42),
+ },
+ now,
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "name": "idle_time",
+ },
+ map[string]interface{}{
+ "value": int64(42),
+ },
+ now,
+ ),
+ },
+ },
+ {
+ name: "multi fields",
+ unpivot: &Unpivot{
+ TagKey: "name",
+ ValueKey: "value",
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "idle_time": int64(42),
+ "idle_user": int64(43),
+ },
+ now,
+ ),
+ },
+ expected: []telegraf.Metric{
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "name": "idle_time",
+ },
+ map[string]interface{}{
+ "value": int64(42),
+ },
+ now,
+ ),
+ testutil.MustMetric("cpu",
+ map[string]string{
+ "name": "idle_user",
+ },
+ map[string]interface{}{
+ "value": int64(43),
+ },
+ now,
+ ),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := tt.unpivot.Apply(tt.metrics...)
+ testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.SortMetrics())
+ })
+ }
+}
diff --git a/plugins/serializers/carbon2/README.md b/plugins/serializers/carbon2/README.md
index e88b18cf079d6..e32a420aec0af 100644
--- a/plugins/serializers/carbon2/README.md
+++ b/plugins/serializers/carbon2/README.md
@@ -2,7 +2,7 @@
The `carbon2` serializer translates the Telegraf metric format to the [Carbon2 format](http://metrics20.org/implementations/).
-### Configuration
+## Configuration
```toml
[[outputs.file]]
@@ -14,20 +14,51 @@ The `carbon2` serializer translates the Telegraf metric format to the [Carbon2 f
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "carbon2"
+
+ ## Optionally configure metrics format, whether to merge metric name and field name.
+ ## Possible options:
+ ## * "field_separate"
+ ## * "metric_includes_field"
+ ## * "" - defaults to "field_separate"
+ # carbon2_format = "field_separate"
```
Standard form:
+
```
metric=name field=field_1 host=foo 30 1234567890
metric=name field=field_2 host=foo 4 1234567890
metric=name field=field_N host=foo 59 1234567890
```
-### Metrics
+### Metrics format
+
+`Carbon2` serializer has a configuration option - `carbon2_format` - to change how
+metrics names are being constructed.
-The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields. So, if one Telegraf metric has 4 fields, the `carbon2` output will be 4 separate metrics. There will be a `metric` tag that represents the name of the metric and a `field` tag to represent the field.
+By default `metric` will only inclue the metric name and a separate field `field`
+will contain the field name.
+This is the behavior of `carbon2_format = "field_separate"` which is the default
+behavior (even if unspecified).
-### Example
+Optionally user can opt in to change this to make the metric inclue the field name
+after the `_`.
+This is the behavior of `carbon2_format = "metric_includes_field"` which would
+make the above example look like:
+
+```
+metric=name_field_1 host=foo 30 1234567890
+metric=name_field_2 host=foo 4 1234567890
+metric=name_field_N host=foo 59 1234567890
+```
+
+## Metrics
+
+The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields.
+So, if one Telegraf metric has 4 fields, the `carbon2` output will be 4 separate metrics.
+There will be a `metric` tag that represents the name of the metric and a `field` tag to represent the field.
+
+## Example
If we take the following InfluxDB Line Protocol:
@@ -42,8 +73,10 @@ metric=weather field=temperature location=us-midwest season=summer 82 123456789
metric=weather field=wind location=us-midwest season=summer 100 1234567890
```
-### Fields and Tags with spaces
+## Fields and Tags with spaces
+
When a field key or tag key/value have spaces, spaces will be replaced with `_`.
-### Tags with empty values
+## Tags with empty values
+
When a tag's value is empty, it will be replaced with `null`
diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go
index fc11de0624760..10611815b8a7e 100644
--- a/plugins/serializers/carbon2/carbon2.go
+++ b/plugins/serializers/carbon2/carbon2.go
@@ -3,24 +3,52 @@ package carbon2
import (
"bytes"
"fmt"
- "github.com/influxdata/telegraf"
"strconv"
"strings"
+
+ "github.com/influxdata/telegraf"
)
-type serializer struct {
+type format string
+
+const (
+ Carbon2FormatFieldSeparate string = "field_separate"
+ Carbon2FormatMetricIncludesField string = "metric_includes_field"
+
+ formatFieldSeparate = format(Carbon2FormatFieldSeparate)
+ formatMetricIncludesField = format(Carbon2FormatMetricIncludesField)
+)
+
+var formats = map[string]format{
+ // Field separate is the default when no format specified.
+ "": formatFieldSeparate,
+ Carbon2FormatFieldSeparate: formatFieldSeparate,
+ Carbon2FormatMetricIncludesField: formatMetricIncludesField,
+}
+
+type Serializer struct {
+ metricsFormat format
}
-func NewSerializer() (*serializer, error) {
- s := &serializer{}
- return s, nil
+func NewSerializer(f string) (*Serializer, error) {
+ var (
+ ok bool
+ metricsFormat format
+ )
+ if metricsFormat, ok = formats[f]; !ok {
+ return nil, fmt.Errorf("unknown carbon2 format: %s", f)
+ }
+
+ return &Serializer{
+ metricsFormat: metricsFormat,
+ }, nil
}
-func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
+func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
return s.createObject(metric), nil
}
-func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
+func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
var batch bytes.Buffer
for _, metric := range metrics {
batch.Write(s.createObject(metric))
@@ -28,35 +56,57 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
return batch.Bytes(), nil
}
-func (s *serializer) createObject(metric telegraf.Metric) []byte {
+func (s *Serializer) createObject(metric telegraf.Metric) []byte {
var m bytes.Buffer
for fieldName, fieldValue := range metric.Fields() {
- if isNumeric(fieldValue) {
- m.WriteString("metric=")
- m.WriteString(strings.Replace(metric.Name(), " ", "_", -1))
- m.WriteString(" field=")
- m.WriteString(strings.Replace(fieldName, " ", "_", -1))
- m.WriteString(" ")
- for _, tag := range metric.TagList() {
- m.WriteString(strings.Replace(tag.Key, " ", "_", -1))
- m.WriteString("=")
- value := tag.Value
- if len(value) == 0 {
- value = "null"
- }
- m.WriteString(strings.Replace(value, " ", "_", -1))
- m.WriteString(" ")
+ if !isNumeric(fieldValue) {
+ continue
+ }
+
+ switch s.metricsFormat {
+ case formatFieldSeparate:
+ m.WriteString(serializeMetricFieldSeparate(
+ metric.Name(), fieldName,
+ ))
+ case formatMetricIncludesField:
+ m.WriteString(serializeMetricIncludeField(
+ metric.Name(), fieldName,
+ ))
+ }
+
+ for _, tag := range metric.TagList() {
+ m.WriteString(strings.Replace(tag.Key, " ", "_", -1))
+ m.WriteString("=")
+ value := tag.Value
+ if len(value) == 0 {
+ value = "null"
}
+ m.WriteString(strings.Replace(value, " ", "_", -1))
m.WriteString(" ")
- m.WriteString(fmt.Sprintf("%v", fieldValue))
- m.WriteString(" ")
- m.WriteString(strconv.FormatInt(metric.Time().Unix(), 10))
- m.WriteString("\n")
}
+ m.WriteString(" ")
+ m.WriteString(fmt.Sprintf("%v", fieldValue))
+ m.WriteString(" ")
+ m.WriteString(strconv.FormatInt(metric.Time().Unix(), 10))
+ m.WriteString("\n")
}
return m.Bytes()
}
+func serializeMetricFieldSeparate(name, fieldName string) string {
+ return fmt.Sprintf("metric=%s field=%s ",
+ strings.Replace(name, " ", "_", -1),
+ strings.Replace(fieldName, " ", "_", -1),
+ )
+}
+
+func serializeMetricIncludeField(name, fieldName string) string {
+ return fmt.Sprintf("metric=%s_%s ",
+ strings.Replace(name, " ", "_", -1),
+ strings.Replace(fieldName, " ", "_", -1),
+ )
+}
+
func isNumeric(v interface{}) bool {
switch v.(type) {
case string:
diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go
index f335342d54b18..aadc55f7ede96 100644
--- a/plugins/serializers/carbon2/carbon2_test.go
+++ b/plugins/serializers/carbon2/carbon2_test.go
@@ -2,13 +2,14 @@ package carbon2
import (
"fmt"
- "github.com/stretchr/testify/require"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
- "github.com/stretchr/testify/assert"
)
func MustMetric(v telegraf.Metric, err error) telegraf.Metric {
@@ -27,14 +28,33 @@ func TestSerializeMetricFloat(t *testing.T) {
"usage_idle": float64(91.5),
}
m, err := metric.New("cpu", tags, fields, now)
- assert.NoError(t, err)
+ require.NoError(t, err)
- s, _ := NewSerializer()
- var buf []byte
- buf, err = s.Serialize(m)
- assert.NoError(t, err)
- expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=cpu0 91.5 %d`, now.Unix()) + "\n")
- assert.Equal(t, string(expS), string(buf))
+ testcases := []struct {
+ format string
+ expected string
+ }{
+ {
+ format: Carbon2FormatFieldSeparate,
+ expected: fmt.Sprintf("metric=cpu field=usage_idle cpu=cpu0 91.5 %d\n", now.Unix()),
+ },
+ {
+ format: Carbon2FormatMetricIncludesField,
+ expected: fmt.Sprintf("metric=cpu_usage_idle cpu=cpu0 91.5 %d\n", now.Unix()),
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.format, func(t *testing.T) {
+ s, err := NewSerializer(tc.format)
+ require.NoError(t, err)
+
+ buf, err := s.Serialize(m)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expected, string(buf))
+ })
+ }
}
func TestSerializeMetricWithEmptyStringTag(t *testing.T) {
@@ -46,14 +66,33 @@ func TestSerializeMetricWithEmptyStringTag(t *testing.T) {
"usage_idle": float64(91.5),
}
m, err := metric.New("cpu", tags, fields, now)
- assert.NoError(t, err)
+ require.NoError(t, err)
- s, _ := NewSerializer()
- var buf []byte
- buf, err = s.Serialize(m)
- assert.NoError(t, err)
- expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=null 91.5 %d`, now.Unix()) + "\n")
- assert.Equal(t, string(expS), string(buf))
+ testcases := []struct {
+ format string
+ expected string
+ }{
+ {
+ format: Carbon2FormatFieldSeparate,
+ expected: fmt.Sprintf("metric=cpu field=usage_idle cpu=null 91.5 %d\n", now.Unix()),
+ },
+ {
+ format: Carbon2FormatMetricIncludesField,
+ expected: fmt.Sprintf("metric=cpu_usage_idle cpu=null 91.5 %d\n", now.Unix()),
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.format, func(t *testing.T) {
+ s, err := NewSerializer(tc.format)
+ require.NoError(t, err)
+
+ buf, err := s.Serialize(m)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expected, string(buf))
+ })
+ }
}
func TestSerializeWithSpaces(t *testing.T) {
@@ -65,14 +104,33 @@ func TestSerializeWithSpaces(t *testing.T) {
"usage_idle 1": float64(91.5),
}
m, err := metric.New("cpu metric", tags, fields, now)
- assert.NoError(t, err)
+ require.NoError(t, err)
- s, _ := NewSerializer()
- var buf []byte
- buf, err = s.Serialize(m)
- assert.NoError(t, err)
- expS := []byte(fmt.Sprintf(`metric=cpu_metric field=usage_idle_1 cpu_0=cpu_0 91.5 %d`, now.Unix()) + "\n")
- assert.Equal(t, string(expS), string(buf))
+ testcases := []struct {
+ format string
+ expected string
+ }{
+ {
+ format: Carbon2FormatFieldSeparate,
+ expected: fmt.Sprintf("metric=cpu_metric field=usage_idle_1 cpu_0=cpu_0 91.5 %d\n", now.Unix()),
+ },
+ {
+ format: Carbon2FormatMetricIncludesField,
+ expected: fmt.Sprintf("metric=cpu_metric_usage_idle_1 cpu_0=cpu_0 91.5 %d\n", now.Unix()),
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.format, func(t *testing.T) {
+ s, err := NewSerializer(tc.format)
+ require.NoError(t, err)
+
+ buf, err := s.Serialize(m)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expected, string(buf))
+ })
+ }
}
func TestSerializeMetricInt(t *testing.T) {
@@ -84,15 +142,33 @@ func TestSerializeMetricInt(t *testing.T) {
"usage_idle": int64(90),
}
m, err := metric.New("cpu", tags, fields, now)
- assert.NoError(t, err)
+ require.NoError(t, err)
- s, _ := NewSerializer()
- var buf []byte
- buf, err = s.Serialize(m)
- assert.NoError(t, err)
+ testcases := []struct {
+ format string
+ expected string
+ }{
+ {
+ format: Carbon2FormatFieldSeparate,
+ expected: fmt.Sprintf("metric=cpu field=usage_idle cpu=cpu0 90 %d\n", now.Unix()),
+ },
+ {
+ format: Carbon2FormatMetricIncludesField,
+ expected: fmt.Sprintf("metric=cpu_usage_idle cpu=cpu0 90 %d\n", now.Unix()),
+ },
+ }
- expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=cpu0 90 %d`, now.Unix()) + "\n")
- assert.Equal(t, string(expS), string(buf))
+ for _, tc := range testcases {
+ t.Run(tc.format, func(t *testing.T) {
+ s, err := NewSerializer(tc.format)
+ require.NoError(t, err)
+
+ buf, err := s.Serialize(m)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expected, string(buf))
+ })
+ }
}
func TestSerializeMetricString(t *testing.T) {
@@ -106,13 +182,31 @@ func TestSerializeMetricString(t *testing.T) {
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
- s, _ := NewSerializer()
- var buf []byte
- buf, err = s.Serialize(m)
- assert.NoError(t, err)
+ testcases := []struct {
+ format string
+ expected string
+ }{
+ {
+ format: Carbon2FormatFieldSeparate,
+ expected: "",
+ },
+ {
+ format: Carbon2FormatMetricIncludesField,
+ expected: "",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.format, func(t *testing.T) {
+ s, err := NewSerializer(tc.format)
+ require.NoError(t, err)
- expS := []byte("")
- assert.Equal(t, string(expS), string(buf))
+ buf, err := s.Serialize(m)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expected, string(buf))
+ })
+ }
}
func TestSerializeBatch(t *testing.T) {
@@ -128,11 +222,34 @@ func TestSerializeBatch(t *testing.T) {
)
metrics := []telegraf.Metric{m, m}
- s, _ := NewSerializer()
- buf, err := s.SerializeBatch(metrics)
- require.NoError(t, err)
- expS := []byte(`metric=cpu field=value 42 0
+
+ testcases := []struct {
+ format string
+ expected string
+ }{
+ {
+ format: Carbon2FormatFieldSeparate,
+ expected: `metric=cpu field=value 42 0
metric=cpu field=value 42 0
-`)
- assert.Equal(t, string(expS), string(buf))
+`,
+ },
+ {
+ format: Carbon2FormatMetricIncludesField,
+ expected: `metric=cpu_value 42 0
+metric=cpu_value 42 0
+`,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.format, func(t *testing.T) {
+ s, err := NewSerializer(tc.format)
+ require.NoError(t, err)
+
+ buf, err := s.SerializeBatch(metrics)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expected, string(buf))
+ })
+ }
}
diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md
index 6cff2cbe516fc..f6fd0c2ccd9bd 100644
--- a/plugins/serializers/graphite/README.md
+++ b/plugins/serializers/graphite/README.md
@@ -23,8 +23,20 @@ method is used, otherwise the [Template Pattern](templates) is used.
## Graphite template pattern
template = "host.tags.measurement.field"
+ ## Graphite templates patterns
+ ## 1. Template for cpu
+ ## 2. Template for disk*
+ ## 3. Default template
+ # templates = [
+ # "cpu tags.measurement.host.field",
+ # "disk* measurement.field",
+ # "host.measurement.tags.field"
+ #]
+
## Support Graphite tags, recommended to enable when using Graphite 1.1 or later.
# graphite_tag_support = false
+ ## Character for separating metric name and field for Graphite tags
+ # graphite_separator = "."
```
#### graphite_tag_support
@@ -44,5 +56,12 @@ cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 145532
cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690
cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690
```
+With set option `graphite_separator` to "_"
+```
+cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
+=>
+cpu_usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690
+cpu_usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690
+```
[templates]: /docs/TEMPLATE_PATTERN.md
diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go
index d02b0e26bda85..e580409fe2b9f 100644
--- a/plugins/serializers/graphite/graphite.go
+++ b/plugins/serializers/graphite/graphite.go
@@ -10,13 +10,14 @@ import (
"strings"
"github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/filter"
)
const DEFAULT_TEMPLATE = "host.tags.measurement.field"
var (
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`)
- hypenChars = strings.NewReplacer(
+ hyphenChars = strings.NewReplacer(
"/", "-",
"@", "-",
"*", "-",
@@ -29,10 +30,17 @@ var (
fieldDeleter = strings.NewReplacer(".FIELDNAME", "", "FIELDNAME.", "")
)
+type GraphiteTemplate struct {
+ Filter filter.Filter
+ Value string
+}
+
type GraphiteSerializer struct {
Prefix string
Template string
TagSupport bool
+ Separator string
+ Templates []*GraphiteTemplate
}
func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
@@ -48,7 +56,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
if fieldValue == "" {
continue
}
- bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, fieldName)
+ bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, s.Separator, fieldName)
metricString := fmt.Sprintf("%s %s %d\n",
// insert "field" section of template
bucket,
@@ -59,7 +67,15 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
out = append(out, point...)
}
default:
- bucket := SerializeBucketName(metric.Name(), metric.Tags(), s.Template, s.Prefix)
+ template := s.Template
+ for _, graphiteTemplate := range s.Templates {
+ if graphiteTemplate.Filter.Match(metric.Name()) {
+ template = graphiteTemplate.Value
+ break
+ }
+ }
+
+ bucket := SerializeBucketName(metric.Name(), metric.Tags(), template, s.Prefix)
if bucket == "" {
return out, nil
}
@@ -185,6 +201,45 @@ func SerializeBucketName(
return prefix + "." + strings.Join(out, ".")
}
+func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, error) {
+ var graphiteTemplates []*GraphiteTemplate
+ defaultTemplate := ""
+
+ for i, t := range templates {
+ parts := strings.Fields(t)
+
+ if len(parts) == 0 {
+ return nil, "", fmt.Errorf("missing template at position: %d", i)
+ }
+ if len(parts) == 1 {
+ if parts[0] == "" {
+ return nil, "", fmt.Errorf("missing template at position: %d", i)
+ } else {
+ // Override default template
+ defaultTemplate = t
+ continue
+ }
+ }
+
+ if len(parts) > 2 {
+ return nil, "", fmt.Errorf("invalid template format: '%s'", t)
+ }
+
+ tFilter, err := filter.Compile([]string{parts[0]})
+
+ if err != nil {
+ return nil, "", err
+ }
+
+ graphiteTemplates = append(graphiteTemplates, &GraphiteTemplate{
+ Filter: tFilter,
+ Value: parts[1],
+ })
+ }
+
+ return graphiteTemplates, defaultTemplate, nil
+}
+
// SerializeBucketNameWithTags will take the given measurement name and tags and
// produce a graphite bucket. It will use the Graphite11Serializer.
// http://graphite.readthedocs.io/en/latest/tags.html
@@ -192,6 +247,7 @@ func SerializeBucketNameWithTags(
measurement string,
tags map[string]string,
prefix string,
+ separator string,
field string,
) string {
var out string
@@ -205,13 +261,13 @@ func SerializeBucketNameWithTags(
sort.Strings(tagsCopy)
if prefix != "" {
- out = prefix + "."
+ out = prefix + separator
}
out += measurement
if field != "value" {
- out += "." + field
+ out += separator + field
}
out = sanitize(out)
@@ -254,8 +310,8 @@ func buildTags(tags map[string]string) string {
}
func sanitize(value string) string {
- // Apply special hypenation rules to preserve backwards compatibility
- value = hypenChars.Replace(value)
+ // Apply special hyphenation rules to preserve backwards compatibility
+ value = hyphenChars.Replace(value)
// Apply rule to drop some chars to preserve backwards compatibility
value = dropChars.Replace(value)
// Replace any remaining illegal chars
diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go
index e72ed7a306bf9..b6fcad696dc2e 100644
--- a/plugins/serializers/graphite/graphite_test.go
+++ b/plugins/serializers/graphite/graphite_test.go
@@ -102,6 +102,7 @@ func TestSerializeMetricNoHostWithTagSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
@@ -144,6 +145,97 @@ func TestSerializeMetricHost(t *testing.T) {
assert.Equal(t, expS, mS)
}
+func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) {
+ now := time.Now()
+ tags := map[string]string{
+ "host": "localhost",
+ "cpu": "cpu0",
+ "datacenter": "us-west-2",
+ }
+ fields := map[string]interface{}{
+ "usage_idle": float64(91.5),
+ "usage_busy": float64(8.5),
+ }
+ m1, err := metric.New("cpu", tags, fields, now)
+ m2, err := metric.New("new_cpu", tags, fields, now)
+ assert.NoError(t, err)
+
+ templates, defaultTemplate, err := InitGraphiteTemplates([]string{
+ "cp* tags.measurement.host.field",
+ "new_cpu tags.host.measurement.field",
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, defaultTemplate, "")
+
+ s := GraphiteSerializer{
+ Templates: templates,
+ }
+
+ buf, _ := s.Serialize(m1)
+ buf2, _ := s.Serialize(m2)
+
+ buf = append(buf, buf2...)
+
+ mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
+ assert.NoError(t, err)
+
+ expS := []string{
+ fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_idle 91.5 %d", now.Unix()),
+ fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_busy 8.5 %d", now.Unix()),
+ fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_idle 91.5 %d", now.Unix()),
+ fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_busy 8.5 %d", now.Unix()),
+ }
+ sort.Strings(mS)
+ sort.Strings(expS)
+ assert.Equal(t, expS, mS)
+}
+
+func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) {
+ now := time.Now()
+ tags := map[string]string{
+ "host": "localhost",
+ "cpu": "cpu0",
+ "datacenter": "us-west-2",
+ }
+ fields := map[string]interface{}{
+ "usage_idle": float64(91.5),
+ "usage_busy": float64(8.5),
+ }
+ m1, err := metric.New("cpu", tags, fields, now)
+ m2, err := metric.New("new_cpu", tags, fields, now)
+ assert.NoError(t, err)
+
+ templates, defaultTemplate, err := InitGraphiteTemplates([]string{
+ "cp* tags.measurement.host.field",
+ "tags.host.measurement.field",
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, defaultTemplate, "tags.host.measurement.field")
+
+ s := GraphiteSerializer{
+ Templates: templates,
+ Template: defaultTemplate,
+ }
+
+ buf, _ := s.Serialize(m1)
+ buf2, _ := s.Serialize(m2)
+
+ buf = append(buf, buf2...)
+
+ mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
+ assert.NoError(t, err)
+
+ expS := []string{
+ fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_idle 91.5 %d", now.Unix()),
+ fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_busy 8.5 %d", now.Unix()),
+ fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_idle 91.5 %d", now.Unix()),
+ fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_busy 8.5 %d", now.Unix()),
+ }
+ sort.Strings(mS)
+ sort.Strings(expS)
+ assert.Equal(t, expS, mS)
+}
+
func TestSerializeMetricHostWithTagSupport(t *testing.T) {
now := time.Now()
tags := map[string]string{
@@ -160,6 +252,7 @@ func TestSerializeMetricHostWithTagSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
@@ -214,6 +307,7 @@ func TestSerializeValueFieldWithTagSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
@@ -289,6 +383,7 @@ func TestSerializeValueStringWithTagSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
@@ -342,6 +437,7 @@ func TestSerializeValueBooleanWithTagSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
@@ -414,6 +510,7 @@ func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
@@ -467,6 +564,7 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
@@ -577,6 +675,7 @@ func TestSerializeMetricPrefixWithTagSupport(t *testing.T) {
s := GraphiteSerializer{
Prefix: "prefix",
TagSupport: true,
+ Separator: ".",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
@@ -882,6 +981,7 @@ func TestCleanWithTagsSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -942,6 +1042,7 @@ func TestSerializeBatchWithTagsSupport(t *testing.T) {
s := GraphiteSerializer{
TagSupport: true,
+ Separator: ".",
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
diff --git a/plugins/serializers/influx/README.md b/plugins/serializers/influx/README.md
index d97fd42c83f6b..d21ead8758f38 100644
--- a/plugins/serializers/influx/README.md
+++ b/plugins/serializers/influx/README.md
@@ -5,6 +5,7 @@ protocol]. This is the recommended format unless another format is required
for interoperability.
### Configuration
+
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
@@ -31,4 +32,13 @@ for interoperability.
influx_uint_support = false
```
+### Metrics
+
+Conversion is direct taking into account some limitations of the Line Protocol
+format:
+- Float fields that are `NaN` or `Inf` are skipped.
+- Trailing backslash `\` characters are removed from tag keys and values.
+- Tags with a key or value that is the empty string are skipped.
+- When not using `influx_uint_support`, unsigned integers are capped at the max int64.
+
[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/
diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go
index e7063cbd2f62a..aa76b8accb8e1 100644
--- a/plugins/serializers/influx/influx.go
+++ b/plugins/serializers/influx/influx.go
@@ -8,6 +8,7 @@ import (
"math"
"sort"
"strconv"
+ "strings"
"github.com/influxdata/telegraf"
)
@@ -113,6 +114,9 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
for _, m := range metrics {
_, err := s.Write(&s.buf, m)
if err != nil {
+ if _, ok := err.(*MetricError); ok {
+ continue
+ }
return nil, err
}
}
@@ -151,8 +155,16 @@ func (s *Serializer) buildHeader(m telegraf.Metric) error {
key := escape(tag.Key)
value := escape(tag.Value)
- // Some keys and values are not encodeable as line protocol, such as
- // those with a trailing '\' or empty strings.
+ // Tag keys and values that end with a backslash cannot be encoded by
+ // line protocol.
+ if strings.HasSuffix(key, `\`) {
+ key = strings.TrimRight(key, `\`)
+ }
+ if strings.HasSuffix(value, `\`) {
+ value = strings.TrimRight(value, `\`)
+ }
+
+ // Tag keys and values must not be the empty string.
if key == "" || value == "" {
continue
}
diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go
index 8102bd973702b..a86215d94bf4b 100644
--- a/plugins/serializers/influx/influx_test.go
+++ b/plugins/serializers/influx/influx_test.go
@@ -323,6 +323,102 @@ var tests = []struct {
),
output: []byte("cpu,host=x\\ny value=42i 0\n"),
},
+ {
+ name: "empty tag value is removed",
+ input: MustMetric(
+ metric.New(
+ "cpu",
+ map[string]string{
+ "host": "",
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ ),
+ output: []byte("cpu value=42i 0\n"),
+ },
+ {
+ name: "empty tag key is removed",
+ input: MustMetric(
+ metric.New(
+ "cpu",
+ map[string]string{
+ "": "example.org",
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ ),
+ output: []byte("cpu value=42i 0\n"),
+ },
+ {
+ name: "tag value ends with backslash is trimmed",
+ input: MustMetric(
+ metric.New(
+ "disk",
+ map[string]string{
+ "path": `C:\`,
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ ),
+ output: []byte("disk,path=C: value=42i 0\n"),
+ },
+ {
+ name: "tag key ends with backslash is trimmed",
+ input: MustMetric(
+ metric.New(
+ "disk",
+ map[string]string{
+ `path\`: "/",
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ ),
+ output: []byte("disk,path=/ value=42i 0\n"),
+ },
+ {
+ name: "tag key backslash is trimmed and removed",
+ input: MustMetric(
+ metric.New(
+ "disk",
+ map[string]string{
+ `\`: "example.org",
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ ),
+ output: []byte("disk value=42i 0\n"),
+ },
+ {
+ name: "tag value backslash is trimmed and removed",
+ input: MustMetric(
+ metric.New(
+ "disk",
+ map[string]string{
+ "host": `\`,
+ },
+ map[string]interface{}{
+ "value": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ ),
+ output: []byte("disk value=42i 0\n"),
+ },
{
name: "string newline",
input: MustMetric(
@@ -385,7 +481,6 @@ var tests = []struct {
"cpu_time_nice": float64(0),
"cpu_time_soft_irq": float64(0),
"cpu_time_steal": float64(0),
- "cpu_time_stolen": float64(0),
"cpu_time_system": float64(0),
"cpu_time_user": float64(0.02),
"cpu_usage": float64(0),
@@ -433,7 +528,7 @@ var tests = []struct {
time.Unix(0, 1517620624000000000),
),
),
- output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_stolen=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"),
+ output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"),
},
}
diff --git a/plugins/serializers/influx/reader.go b/plugins/serializers/influx/reader.go
index d0dad8eebb984..55b6c2b4130ec 100644
--- a/plugins/serializers/influx/reader.go
+++ b/plugins/serializers/influx/reader.go
@@ -53,12 +53,13 @@ func (r *reader) Read(p []byte) (int, error) {
r.offset += 1
if err != nil {
r.buf.Reset()
- if err != nil {
- // Since we are serializing multiple metrics, don't fail the
- // the entire batch just because of one unserializable metric.
- log.Printf("E! [serializers.influx] could not serialize metric: %v; discarding metric", err)
+ if _, ok := err.(*MetricError); ok {
continue
}
+ // Since we are serializing multiple metrics, don't fail the
+ // the entire batch just because of one unserializable metric.
+ log.Printf("E! [serializers.influx] could not serialize metric: %v; discarding metric", err)
+ continue
}
break
}
diff --git a/plugins/serializers/influx/reader_test.go b/plugins/serializers/influx/reader_test.go
index 642b71b1cfc3e..7aaf3fccf41e9 100644
--- a/plugins/serializers/influx/reader_test.go
+++ b/plugins/serializers/influx/reader_test.go
@@ -189,3 +189,92 @@ func TestZeroLengthBufferNoError(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, n)
}
+
+func BenchmarkReader(b *testing.B) {
+ m := MustMetric(
+ metric.New(
+ "procstat",
+ map[string]string{
+ "exe": "bash",
+ "process_name": "bash",
+ },
+ map[string]interface{}{
+ "cpu_time": 0,
+ "cpu_time_guest": float64(0),
+ "cpu_time_guest_nice": float64(0),
+ "cpu_time_idle": float64(0),
+ "cpu_time_iowait": float64(0),
+ "cpu_time_irq": float64(0),
+ "cpu_time_nice": float64(0),
+ "cpu_time_soft_irq": float64(0),
+ "cpu_time_steal": float64(0),
+ "cpu_time_system": float64(0),
+ "cpu_time_user": float64(0.02),
+ "cpu_usage": float64(0),
+ "involuntary_context_switches": 2,
+ "memory_data": 1576960,
+ "memory_locked": 0,
+ "memory_rss": 5103616,
+ "memory_stack": 139264,
+ "memory_swap": 0,
+ "memory_vms": 21659648,
+ "nice_priority": 20,
+ "num_fds": 4,
+ "num_threads": 1,
+ "pid": 29417,
+ "read_bytes": 0,
+ "read_count": 259,
+ "realtime_priority": 0,
+ "rlimit_cpu_time_hard": 2147483647,
+ "rlimit_cpu_time_soft": 2147483647,
+ "rlimit_file_locks_hard": 2147483647,
+ "rlimit_file_locks_soft": 2147483647,
+ "rlimit_memory_data_hard": 2147483647,
+ "rlimit_memory_data_soft": 2147483647,
+ "rlimit_memory_locked_hard": 65536,
+ "rlimit_memory_locked_soft": 65536,
+ "rlimit_memory_rss_hard": 2147483647,
+ "rlimit_memory_rss_soft": 2147483647,
+ "rlimit_memory_stack_hard": 2147483647,
+ "rlimit_memory_stack_soft": 8388608,
+ "rlimit_memory_vms_hard": 2147483647,
+ "rlimit_memory_vms_soft": 2147483647,
+ "rlimit_nice_priority_hard": 0,
+ "rlimit_nice_priority_soft": 0,
+ "rlimit_num_fds_hard": 4096,
+ "rlimit_num_fds_soft": 1024,
+ "rlimit_realtime_priority_hard": 0,
+ "rlimit_realtime_priority_soft": 0,
+ "rlimit_signals_pending_hard": 78994,
+ "rlimit_signals_pending_soft": 78994,
+ "signals_pending": 0,
+ "voluntary_context_switches": 42,
+ "write_bytes": 106496,
+ "write_count": 35,
+ },
+ time.Unix(0, 1517620624000000000),
+ ),
+ )
+
+ metrics := make([]telegraf.Metric, 1000, 1000)
+ for i := range metrics {
+ metrics[i] = m
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ readbuf := make([]byte, 4096, 4096)
+ serializer := NewSerializer()
+ reader := NewReader(metrics, serializer)
+ for {
+ _, err := reader.Read(readbuf)
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ panic(err.Error())
+ }
+ }
+ }
+}
diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go
index bfb84f9a72b7f..e2d7af3305117 100644
--- a/plugins/serializers/json/json.go
+++ b/plugins/serializers/json/json.go
@@ -2,6 +2,7 @@ package json
import (
"encoding/json"
+ "math"
"time"
"github.com/influxdata/telegraf"
@@ -49,8 +50,26 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} {
m := make(map[string]interface{}, 4)
- m["tags"] = metric.Tags()
- m["fields"] = metric.Fields()
+
+ tags := make(map[string]string, len(metric.TagList()))
+ for _, tag := range metric.TagList() {
+ tags[tag.Key] = tag.Value
+ }
+ m["tags"] = tags
+
+ fields := make(map[string]interface{}, len(metric.FieldList()))
+ for _, field := range metric.FieldList() {
+ switch fv := field.Value.(type) {
+ case float64:
+ // JSON does not support these special values
+ if math.IsNaN(fv) || math.IsInf(fv, 0) {
+ continue
+ }
+ }
+ fields[field.Key] = field.Value
+ }
+ m["fields"] = fields
+
m["name"] = metric.Name()
m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits)
return m
diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go
index 82990b74743b8..9ea304c88eedb 100644
--- a/plugins/serializers/json/json_test.go
+++ b/plugins/serializers/json/json_test.go
@@ -2,14 +2,15 @@ package json
import (
"fmt"
+ "math"
"testing"
"time"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func MustMetric(v telegraf.Metric, err error) telegraf.Metric {
@@ -193,3 +194,42 @@ func TestSerializeBatch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, []byte(`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf)
}
+
+func TestSerializeBatchSkipInf(t *testing.T) {
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "inf": math.Inf(1),
+ "time_idle": 42,
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ s, err := NewSerializer(0)
+ require.NoError(t, err)
+ buf, err := s.SerializeBatch(metrics)
+ require.NoError(t, err)
+ require.Equal(t, []byte(`{"metrics":[{"fields":{"time_idle":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf)
+}
+
+func TestSerializeBatchSkipInfAllFields(t *testing.T) {
+ metrics := []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "inf": math.Inf(1),
+ },
+ time.Unix(0, 0),
+ ),
+ }
+
+ s, err := NewSerializer(0)
+ require.NoError(t, err)
+ buf, err := s.SerializeBatch(metrics)
+ require.NoError(t, err)
+ require.Equal(t, []byte(`{"metrics":[{"fields":{},"name":"cpu","tags":{},"timestamp":0}]}`), buf)
+}
diff --git a/plugins/serializers/prometheus/README.md b/plugins/serializers/prometheus/README.md
new file mode 100644
index 0000000000000..19c869ffbccb3
--- /dev/null
+++ b/plugins/serializers/prometheus/README.md
@@ -0,0 +1,81 @@
+# Prometheus
+
+The `prometheus` data format converts metrics into the Prometheus text
+exposition format. When used with the `prometheus` input, the input should be
+use the `metric_version = 2` option in order to properly round trip metrics.
+
+**Warning**: When generating histogram and summary types, output may
+not be correct if the metric spans multiple batches. This issue can be
+somewhat, but not fully, mitigated by using outputs that support writing in
+"batch format". When using histogram and summary types, it is recommended to
+use only the `prometheus_client` output.
+
+### Configuration
+
+```toml
+[[outputs.file]]
+ files = ["stdout"]
+ use_batch_format = true
+
+ ## Include the metric timestamp on each sample.
+ prometheus_export_timestamp = false
+
+ ## Sort prometheus metric families and metric samples. Useful for
+ ## debugging.
+ prometheus_sort_metrics = false
+
+ ## Output string fields as metric labels; when false string fields are
+ ## discarded.
+ prometheus_string_as_label = false
+
+ ## Data format to output.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "prometheus"
+```
+
+### Metrics
+
+A Prometheus metric is created for each integer, float, boolean or unsigned
+field. Boolean values are converted to *1.0* for true and *0.0* for false.
+
+The Prometheus metric names are produced by joining the measurement name with
+the field key. In the special case where the measurement name is `prometheus`
+it is not included in the final metric name.
+
+Prometheus labels are produced for each tag.
+
+**Note:** String fields are ignored and do not produce Prometheus metrics.
+
+### Example
+
+**Example Input**
+```
+cpu,cpu=cpu0 time_guest=8022.6,time_system=26145.98,time_user=92512.89 1574317740000000000
+cpu,cpu=cpu1 time_guest=8097.88,time_system=25223.35,time_user=96519.58 1574317740000000000
+cpu,cpu=cpu2 time_guest=7386.28,time_system=24870.37,time_user=95631.59 1574317740000000000
+cpu,cpu=cpu3 time_guest=7434.19,time_system=24843.71,time_user=93753.88 1574317740000000000
+```
+
+**Example Output**
+```
+# HELP cpu_time_guest Telegraf collected metric
+# TYPE cpu_time_guest counter
+cpu_time_guest{cpu="cpu0"} 9582.54
+cpu_time_guest{cpu="cpu1"} 9660.88
+cpu_time_guest{cpu="cpu2"} 8946.45
+cpu_time_guest{cpu="cpu3"} 9002.31
+# HELP cpu_time_system Telegraf collected metric
+# TYPE cpu_time_system counter
+cpu_time_system{cpu="cpu0"} 28675.47
+cpu_time_system{cpu="cpu1"} 27779.34
+cpu_time_system{cpu="cpu2"} 27406.18
+cpu_time_system{cpu="cpu3"} 27404.97
+# HELP cpu_time_user Telegraf collected metric
+# TYPE cpu_time_user counter
+cpu_time_user{cpu="cpu0"} 99551.84
+cpu_time_user{cpu="cpu1"} 103468.52
+cpu_time_user{cpu="cpu2"} 102591.45
+cpu_time_user{cpu="cpu3"} 100717.05
+```
diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go
new file mode 100644
index 0000000000000..d37ecaaaf2f1d
--- /dev/null
+++ b/plugins/serializers/prometheus/collection.go
@@ -0,0 +1,481 @@
+package prometheus
+
+import (
+ "hash/fnv"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/gogo/protobuf/proto"
+ "github.com/influxdata/telegraf"
+ dto "github.com/prometheus/client_model/go"
+)
+
+const helpString = "Telegraf collected metric"
+
+type TimeFunc func() time.Time
+
+type MetricFamily struct {
+ Name string
+ Type telegraf.ValueType
+}
+
+type Metric struct {
+ Labels []LabelPair
+ Time time.Time
+ AddTime time.Time
+ Scaler *Scaler
+ Histogram *Histogram
+ Summary *Summary
+}
+
+type LabelPair struct {
+ Name string
+ Value string
+}
+
+type Scaler struct {
+ Value float64
+}
+
+type Bucket struct {
+ Bound float64
+ Count uint64
+}
+
+type Quantile struct {
+ Quantile float64
+ Value float64
+}
+
+type Histogram struct {
+ Buckets []Bucket
+ Count uint64
+ Sum float64
+}
+
+func (h *Histogram) merge(b Bucket) {
+ for i := range h.Buckets {
+ if h.Buckets[i].Bound == b.Bound {
+ h.Buckets[i].Count = b.Count
+ return
+ }
+ }
+ h.Buckets = append(h.Buckets, b)
+}
+
+type Summary struct {
+ Quantiles []Quantile
+ Count uint64
+ Sum float64
+}
+
+func (s *Summary) merge(q Quantile) {
+ for i := range s.Quantiles {
+ if s.Quantiles[i].Quantile == q.Quantile {
+ s.Quantiles[i].Value = q.Value
+ return
+ }
+ }
+ s.Quantiles = append(s.Quantiles, q)
+}
+
+type MetricKey uint64
+
+func MakeMetricKey(labels []LabelPair) MetricKey {
+ h := fnv.New64a()
+ for _, label := range labels {
+ h.Write([]byte(label.Name))
+ h.Write([]byte("\x00"))
+ h.Write([]byte(label.Value))
+ h.Write([]byte("\x00"))
+ }
+ return MetricKey(h.Sum64())
+}
+
+type Entry struct {
+ Family MetricFamily
+ Metrics map[MetricKey]*Metric
+}
+
+type Collection struct {
+ Entries map[MetricFamily]Entry
+ config FormatConfig
+}
+
+func NewCollection(config FormatConfig) *Collection {
+ cache := &Collection{
+ Entries: make(map[MetricFamily]Entry),
+ config: config,
+ }
+ return cache
+}
+
+func hasLabel(name string, labels []LabelPair) bool {
+ for _, label := range labels {
+ if name == label.Name {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair {
+ labels := make([]LabelPair, 0, len(metric.TagList()))
+ for _, tag := range metric.TagList() {
+ // Ignore special tags for histogram and summary types.
+ switch metric.Type() {
+ case telegraf.Histogram:
+ if tag.Key == "le" {
+ continue
+ }
+ case telegraf.Summary:
+ if tag.Key == "quantile" {
+ continue
+ }
+ }
+
+ name, ok := SanitizeLabelName(tag.Key)
+ if !ok {
+ continue
+ }
+
+ labels = append(labels, LabelPair{Name: name, Value: tag.Value})
+ }
+
+ if c.config.StringHandling != StringAsLabel {
+ return labels
+ }
+
+ addedFieldLabel := false
+ for _, field := range metric.FieldList() {
+ value, ok := field.Value.(string)
+ if !ok {
+ continue
+ }
+
+ name, ok := SanitizeLabelName(field.Key)
+ if !ok {
+ continue
+ }
+
+ // If there is a tag with the same name as the string field, discard
+ // the field and use the tag instead.
+ if hasLabel(name, labels) {
+ continue
+ }
+
+ labels = append(labels, LabelPair{Name: name, Value: value})
+ addedFieldLabel = true
+
+ }
+
+ if addedFieldLabel {
+ sort.Slice(labels, func(i, j int) bool {
+ return labels[i].Name < labels[j].Name
+ })
+ }
+
+ return labels
+}
+
+func (c *Collection) Add(metric telegraf.Metric, now time.Time) {
+ labels := c.createLabels(metric)
+ for _, field := range metric.FieldList() {
+ metricName := MetricName(metric.Name(), field.Key, metric.Type())
+ metricName, ok := SanitizeMetricName(metricName)
+ if !ok {
+ continue
+ }
+
+ family := MetricFamily{
+ Name: metricName,
+ Type: metric.Type(),
+ }
+
+ entry, ok := c.Entries[family]
+ if !ok {
+ entry = Entry{
+ Family: family,
+ Metrics: make(map[MetricKey]*Metric),
+ }
+ c.Entries[family] = entry
+
+ }
+
+ metricKey := MakeMetricKey(labels)
+
+ m, ok := entry.Metrics[metricKey]
+ if ok {
+ // A batch of metrics can contain multiple values for a single
+ // Prometheus sample. If this metric is older than the existing
+ // sample then we can skip over it.
+ if metric.Time().Before(m.Time) {
+ continue
+ }
+ }
+
+ switch metric.Type() {
+ case telegraf.Counter:
+ fallthrough
+ case telegraf.Gauge:
+ fallthrough
+ case telegraf.Untyped:
+ value, ok := SampleValue(field.Value)
+ if !ok {
+ continue
+ }
+
+ m = &Metric{
+ Labels: labels,
+ Time: metric.Time(),
+ AddTime: now,
+ Scaler: &Scaler{Value: value},
+ }
+
+ entry.Metrics[metricKey] = m
+ case telegraf.Histogram:
+ if m == nil {
+ m = &Metric{
+ Labels: labels,
+ Time: metric.Time(),
+ AddTime: now,
+ Histogram: &Histogram{},
+ }
+ }
+ switch {
+ case strings.HasSuffix(field.Key, "_bucket"):
+ le, ok := metric.GetTag("le")
+ if !ok {
+ continue
+ }
+ bound, err := strconv.ParseFloat(le, 64)
+ if err != nil {
+ continue
+ }
+
+ count, ok := SampleCount(field.Value)
+ if !ok {
+ continue
+ }
+
+ m.Histogram.merge(Bucket{
+ Bound: bound,
+ Count: count,
+ })
+ case strings.HasSuffix(field.Key, "_sum"):
+ sum, ok := SampleSum(field.Value)
+ if !ok {
+ continue
+ }
+
+ m.Histogram.Sum = sum
+ case strings.HasSuffix(field.Key, "_count"):
+ count, ok := SampleCount(field.Value)
+ if !ok {
+ continue
+ }
+
+ m.Histogram.Count = count
+ default:
+ continue
+ }
+
+ entry.Metrics[metricKey] = m
+ case telegraf.Summary:
+ if m == nil {
+ m = &Metric{
+ Labels: labels,
+ Time: metric.Time(),
+ AddTime: now,
+ Summary: &Summary{},
+ }
+ }
+ switch {
+ case strings.HasSuffix(field.Key, "_sum"):
+ sum, ok := SampleSum(field.Value)
+ if !ok {
+ continue
+ }
+
+ m.Summary.Sum = sum
+ case strings.HasSuffix(field.Key, "_count"):
+ count, ok := SampleCount(field.Value)
+ if !ok {
+ continue
+ }
+
+ m.Summary.Count = count
+ default:
+ quantileTag, ok := metric.GetTag("quantile")
+ if !ok {
+ continue
+ }
+ quantile, err := strconv.ParseFloat(quantileTag, 64)
+ if err != nil {
+ continue
+ }
+
+ value, ok := SampleValue(field.Value)
+ if !ok {
+ continue
+ }
+
+ m.Summary.merge(Quantile{
+ Quantile: quantile,
+ Value: value,
+ })
+ }
+
+ entry.Metrics[metricKey] = m
+ }
+ }
+}
+
+func (c *Collection) Expire(now time.Time, age time.Duration) {
+ expireTime := now.Add(-age)
+ for _, entry := range c.Entries {
+ for key, metric := range entry.Metrics {
+ if metric.AddTime.Before(expireTime) {
+ delete(entry.Metrics, key)
+ if len(entry.Metrics) == 0 {
+ delete(c.Entries, entry.Family)
+ }
+ }
+ }
+ }
+}
+
+func (c *Collection) GetEntries(order MetricSortOrder) []Entry {
+ entries := make([]Entry, 0, len(c.Entries))
+ for _, entry := range c.Entries {
+ entries = append(entries, entry)
+ }
+
+ switch order {
+ case SortMetrics:
+ sort.Slice(entries, func(i, j int) bool {
+ lhs := entries[i].Family
+ rhs := entries[j].Family
+ if lhs.Name != rhs.Name {
+ return lhs.Name < rhs.Name
+ }
+
+ return lhs.Type < rhs.Type
+ })
+ }
+ return entries
+}
+
+func (c *Collection) GetMetrics(entry Entry, order MetricSortOrder) []*Metric {
+ metrics := make([]*Metric, 0, len(entry.Metrics))
+ for _, metric := range entry.Metrics {
+ metrics = append(metrics, metric)
+ }
+
+ switch order {
+ case SortMetrics:
+ sort.Slice(metrics, func(i, j int) bool {
+ lhs := metrics[i].Labels
+ rhs := metrics[j].Labels
+ if len(lhs) != len(rhs) {
+ return len(lhs) < len(rhs)
+ }
+
+ for index := range lhs {
+ l := lhs[index]
+ r := rhs[index]
+
+ if l.Name != r.Name {
+ return l.Name < r.Name
+ }
+
+ if l.Value != r.Value {
+ return l.Value < r.Value
+ }
+ }
+
+ return false
+ })
+ }
+
+ return metrics
+}
+
+func (c *Collection) GetProto() []*dto.MetricFamily {
+ result := make([]*dto.MetricFamily, 0, len(c.Entries))
+
+ for _, entry := range c.GetEntries(c.config.MetricSortOrder) {
+ mf := &dto.MetricFamily{
+ Name: proto.String(entry.Family.Name),
+ Help: proto.String(helpString),
+ Type: MetricType(entry.Family.Type),
+ }
+
+ for _, metric := range c.GetMetrics(entry, c.config.MetricSortOrder) {
+ l := make([]*dto.LabelPair, 0, len(metric.Labels))
+ for _, label := range metric.Labels {
+ l = append(l, &dto.LabelPair{
+ Name: proto.String(label.Name),
+ Value: proto.String(label.Value),
+ })
+ }
+
+ m := &dto.Metric{
+ Label: l,
+ }
+
+ if c.config.TimestampExport == ExportTimestamp {
+ m.TimestampMs = proto.Int64(metric.Time.UnixNano() / int64(time.Millisecond))
+ }
+
+ switch entry.Family.Type {
+ case telegraf.Gauge:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(metric.Scaler.Value)}
+ case telegraf.Counter:
+ m.Counter = &dto.Counter{Value: proto.Float64(metric.Scaler.Value)}
+ case telegraf.Untyped:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(metric.Scaler.Value)}
+ case telegraf.Histogram:
+ buckets := make([]*dto.Bucket, 0, len(metric.Histogram.Buckets))
+ for _, bucket := range metric.Histogram.Buckets {
+ buckets = append(buckets, &dto.Bucket{
+ UpperBound: proto.Float64(bucket.Bound),
+ CumulativeCount: proto.Uint64(bucket.Count),
+ })
+ }
+
+ m.Histogram = &dto.Histogram{
+ Bucket: buckets,
+ SampleCount: proto.Uint64(metric.Histogram.Count),
+ SampleSum: proto.Float64(metric.Histogram.Sum),
+ }
+ case telegraf.Summary:
+ quantiles := make([]*dto.Quantile, 0, len(metric.Summary.Quantiles))
+ for _, quantile := range metric.Summary.Quantiles {
+ quantiles = append(quantiles, &dto.Quantile{
+ Quantile: proto.Float64(quantile.Quantile),
+ Value: proto.Float64(quantile.Value),
+ })
+ }
+
+ m.Summary = &dto.Summary{
+ Quantile: quantiles,
+ SampleCount: proto.Uint64(metric.Summary.Count),
+ SampleSum: proto.Float64(metric.Summary.Sum),
+ }
+ default:
+ panic("unknown telegraf.ValueType")
+ }
+
+ mf.Metric = append(mf.Metric, m)
+ }
+
+ if len(mf.Metric) != 0 {
+ result = append(result, mf)
+ }
+ }
+
+ return result
+}
diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go
new file mode 100644
index 0000000000000..d2c5f5d098162
--- /dev/null
+++ b/plugins/serializers/prometheus/collection_test.go
@@ -0,0 +1,427 @@
+package prometheus
+
+import (
+ "math"
+ "testing"
+ "time"
+
+ "github.com/gogo/protobuf/proto"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/require"
+)
+
+type Input struct {
+ metric telegraf.Metric
+ addtime time.Time
+}
+
+func TestCollectionExpire(t *testing.T) {
+ tests := []struct {
+ name string
+ now time.Time
+ age time.Duration
+ input []Input
+ expected []*dto.MetricFamily
+ }{
+ {
+ name: "not expired",
+ now: time.Unix(1, 0),
+ age: 10 * time.Second,
+ input: []Input{
+ {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ addtime: time.Unix(0, 0),
+ },
+ },
+ expected: []*dto.MetricFamily{
+ {
+ Name: proto.String("cpu_time_idle"),
+ Help: proto.String(helpString),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "update metric expiration",
+ now: time.Unix(20, 0),
+ age: 10 * time.Second,
+ input: []Input{
+ {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ addtime: time.Unix(0, 0),
+ },
+ {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 43.0,
+ },
+ time.Unix(12, 0),
+ ),
+ addtime: time.Unix(12, 0),
+ },
+ },
+ expected: []*dto.MetricFamily{
+ {
+ Name: proto.String("cpu_time_idle"),
+ Help: proto.String(helpString),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Untyped: &dto.Untyped{Value: proto.Float64(43.0)},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "update metric expiration descending order",
+ now: time.Unix(20, 0),
+ age: 10 * time.Second,
+ input: []Input{
+ {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(12, 0),
+ ),
+ addtime: time.Unix(12, 0),
+ }, {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 43.0,
+ },
+ time.Unix(0, 0),
+ ),
+ addtime: time.Unix(0, 0),
+ },
+ },
+ expected: []*dto.MetricFamily{
+ {
+ Name: proto.String("cpu_time_idle"),
+ Help: proto.String(helpString),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "expired single metric in metric family",
+ now: time.Unix(20, 0),
+ age: 10 * time.Second,
+ input: []Input{
+ {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ addtime: time.Unix(0, 0),
+ },
+ },
+ expected: []*dto.MetricFamily{},
+ },
+ {
+ name: "expired one metric in metric family",
+ now: time.Unix(20, 0),
+ age: 10 * time.Second,
+ input: []Input{
+ {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_guest": 42.0,
+ },
+ time.Unix(15, 0),
+ ),
+ addtime: time.Unix(15, 0),
+ },
+ },
+ expected: []*dto.MetricFamily{
+ {
+ Name: proto.String("cpu_time_guest"),
+ Help: proto.String(helpString),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "histogram bucket updates",
+ now: time.Unix(0, 0),
+ age: 10 * time.Second,
+ input: []Input{
+ {
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "http_request_duration_seconds_sum": 10.0,
+ "http_request_duration_seconds_count": 2,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "0.05"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 1.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "+Inf"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 1.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ // Next interval
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "http_request_duration_seconds_sum": 20.0,
+ "http_request_duration_seconds_count": 4,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "0.05"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 2.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "+Inf"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 2.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ addtime: time.Unix(0, 0),
+ },
+ },
+ expected: []*dto.MetricFamily{
+ {
+ Name: proto.String("http_request_duration_seconds"),
+ Help: proto.String(helpString),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(4),
+ SampleSum: proto.Float64(20.0),
+ Bucket: []*dto.Bucket{
+ {
+ UpperBound: proto.Float64(0.05),
+ CumulativeCount: proto.Uint64(2),
+ },
+ {
+ UpperBound: proto.Float64(math.Inf(1)),
+ CumulativeCount: proto.Uint64(2),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "summary quantile updates",
+ now: time.Unix(0, 0),
+ age: 10 * time.Second,
+ input: []Input{
+ {
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "rpc_duration_seconds_sum": 1.0,
+ "rpc_duration_seconds_count": 1,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{"quantile": "0.01"},
+ map[string]interface{}{
+ "rpc_duration_seconds": 1.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ // Updated Summary
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "rpc_duration_seconds_sum": 2.0,
+ "rpc_duration_seconds_count": 2,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ addtime: time.Unix(0, 0),
+ }, {
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{"quantile": "0.01"},
+ map[string]interface{}{
+ "rpc_duration_seconds": 2.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ addtime: time.Unix(0, 0),
+ },
+ },
+ expected: []*dto.MetricFamily{
+ {
+ Name: proto.String("rpc_duration_seconds"),
+ Help: proto.String(helpString),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(2),
+ SampleSum: proto.Float64(2.0),
+ Quantile: []*dto.Quantile{
+ {
+ Quantile: proto.Float64(0.01),
+ Value: proto.Float64(2),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "expire based on add time",
+ now: time.Unix(20, 0),
+ age: 10 * time.Second,
+ input: []Input{
+ {
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ addtime: time.Unix(15, 0),
+ },
+ },
+ expected: []*dto.MetricFamily{
+ {
+ Name: proto.String("cpu_time_idle"),
+ Help: proto.String(helpString),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
+ },
+ },
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewCollection(FormatConfig{})
+ for _, item := range tt.input {
+ c.Add(item.metric, item.addtime)
+ }
+ c.Expire(tt.now, tt.age)
+
+ actual := c.GetProto()
+
+ require.Equal(t, tt.expected, actual)
+ })
+ }
+}
diff --git a/plugins/serializers/prometheus/convert.go b/plugins/serializers/prometheus/convert.go
new file mode 100644
index 0000000000000..131ac31b8036c
--- /dev/null
+++ b/plugins/serializers/prometheus/convert.go
@@ -0,0 +1,215 @@
+package prometheus
+
+import (
+ "strings"
+ "unicode"
+
+ "github.com/influxdata/telegraf"
+ dto "github.com/prometheus/client_model/go"
+)
+
+type Table struct {
+ First *unicode.RangeTable
+ Rest *unicode.RangeTable
+}
+
+var MetricNameTable = Table{
+ First: &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x003A, 0x003A, 1}, // :
+ {0x0041, 0x005A, 1}, // A-Z
+ {0x005F, 0x005F, 1}, // _
+ {0x0061, 0x007A, 1}, // a-z
+ },
+ LatinOffset: 4,
+ },
+ Rest: &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x0030, 0x003A, 1}, // 0-:
+ {0x0041, 0x005A, 1}, // A-Z
+ {0x005F, 0x005F, 1}, // _
+ {0x0061, 0x007A, 1}, // a-z
+ },
+ LatinOffset: 4,
+ },
+}
+
+var LabelNameTable = Table{
+ First: &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x0041, 0x005A, 1}, // A-Z
+ {0x005F, 0x005F, 1}, // _
+ {0x0061, 0x007A, 1}, // a-z
+ },
+ LatinOffset: 3,
+ },
+ Rest: &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x0030, 0x0039, 1}, // 0-9
+ {0x0041, 0x005A, 1}, // A-Z
+ {0x005F, 0x005F, 1}, // _
+ {0x0061, 0x007A, 1}, // a-z
+ },
+ LatinOffset: 4,
+ },
+}
+
+func isValid(name string, table Table) bool {
+ if name == "" {
+ return false
+ }
+
+ for i, r := range name {
+ switch {
+ case i == 0:
+ if !unicode.In(r, table.First) {
+ return false
+ }
+ default:
+ if !unicode.In(r, table.Rest) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// Sanitize checks if the name is valid according to the table. If not, it
+// attempts to replaces invalid runes with an underscore to create a valid
+// name.
+func sanitize(name string, table Table) (string, bool) {
+ if isValid(name, table) {
+ return name, true
+ }
+
+ var b strings.Builder
+
+ for i, r := range name {
+ switch {
+ case i == 0:
+ if unicode.In(r, table.First) {
+ b.WriteRune(r)
+ }
+ default:
+ if unicode.In(r, table.Rest) {
+ b.WriteRune(r)
+ } else {
+ b.WriteString("_")
+ }
+ }
+ }
+
+ name = strings.Trim(b.String(), "_")
+ if name == "" {
+ return "", false
+ }
+
+ return name, true
+}
+
+// SanitizeMetricName checks if the name is a valid Prometheus metric name. If
+// not, it attempts to replaces invalid runes with an underscore to create a
+// valid name.
+func SanitizeMetricName(name string) (string, bool) {
+ return sanitize(name, MetricNameTable)
+}
+
+// SanitizeLabelName checks if the name is a valid Prometheus label name. If
+// not, it attempts to replaces invalid runes with an underscore to create a
+// valid name.
+func SanitizeLabelName(name string) (string, bool) {
+ return sanitize(name, LabelNameTable)
+}
+
+// MetricName returns the Prometheus metric name.
+func MetricName(measurement, fieldKey string, valueType telegraf.ValueType) string {
+ switch valueType {
+ case telegraf.Histogram, telegraf.Summary:
+ switch {
+ case strings.HasSuffix(fieldKey, "_bucket"):
+ fieldKey = strings.TrimSuffix(fieldKey, "_bucket")
+ case strings.HasSuffix(fieldKey, "_sum"):
+ fieldKey = strings.TrimSuffix(fieldKey, "_sum")
+ case strings.HasSuffix(fieldKey, "_count"):
+ fieldKey = strings.TrimSuffix(fieldKey, "_count")
+ }
+ }
+
+ if measurement == "prometheus" {
+ return fieldKey
+ }
+ return measurement + "_" + fieldKey
+}
+
+func MetricType(valueType telegraf.ValueType) *dto.MetricType {
+ switch valueType {
+ case telegraf.Counter:
+ return dto.MetricType_COUNTER.Enum()
+ case telegraf.Gauge:
+ return dto.MetricType_GAUGE.Enum()
+ case telegraf.Summary:
+ return dto.MetricType_SUMMARY.Enum()
+ case telegraf.Untyped:
+ return dto.MetricType_UNTYPED.Enum()
+ case telegraf.Histogram:
+ return dto.MetricType_HISTOGRAM.Enum()
+ default:
+ panic("unknown telegraf.ValueType")
+ }
+}
+
+// SampleValue converts a field value into a value suitable for a simple sample value.
+func SampleValue(value interface{}) (float64, bool) {
+ switch v := value.(type) {
+ case float64:
+ return v, true
+ case int64:
+ return float64(v), true
+ case uint64:
+ return float64(v), true
+ case bool:
+ if v {
+ return 1.0, true
+ }
+ return 0.0, true
+ default:
+ return 0, false
+ }
+}
+
+// SampleCount converts a field value into a count suitable for a metric family
+// of the Histogram or Summary type.
+func SampleCount(value interface{}) (uint64, bool) {
+ switch v := value.(type) {
+ case float64:
+ if v < 0 {
+ return 0, false
+ }
+ return uint64(v), true
+ case int64:
+ if v < 0 {
+ return 0, false
+ }
+ return uint64(v), true
+ case uint64:
+ return v, true
+ default:
+ return 0, false
+ }
+}
+
+// SampleSum converts a field value into a sum suitable for a metric family
+// of the Histogram or Summary type.
+func SampleSum(value interface{}) (float64, bool) {
+ switch v := value.(type) {
+ case float64:
+ return v, true
+ case int64:
+ return float64(v), true
+ case uint64:
+ return float64(v), true
+ default:
+ return 0, false
+ }
+}
diff --git a/plugins/serializers/prometheus/prometheus.go b/plugins/serializers/prometheus/prometheus.go
new file mode 100644
index 0000000000000..9e5df588287ac
--- /dev/null
+++ b/plugins/serializers/prometheus/prometheus.go
@@ -0,0 +1,70 @@
+package prometheus
+
+import (
+ "bytes"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/prometheus/common/expfmt"
+)
+
+// TimestampExport controls if the output contains timestamps.
+type TimestampExport int
+
+const (
+ NoExportTimestamp TimestampExport = iota
+ ExportTimestamp
+)
+
+// MetricSortOrder controls if the output is sorted.
+type MetricSortOrder int
+
+const (
+ NoSortMetrics MetricSortOrder = iota
+ SortMetrics
+)
+
+// StringHandling defines how to process string fields.
+type StringHandling int
+
+const (
+ DiscardStrings StringHandling = iota
+ StringAsLabel
+)
+
+type FormatConfig struct {
+ TimestampExport TimestampExport
+ MetricSortOrder MetricSortOrder
+ StringHandling StringHandling
+}
+
+type Serializer struct {
+ config FormatConfig
+}
+
+func NewSerializer(config FormatConfig) (*Serializer, error) {
+ s := &Serializer{config: config}
+ return s, nil
+}
+
+func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
+ return s.SerializeBatch([]telegraf.Metric{metric})
+}
+
+func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
+ coll := NewCollection(s.config)
+ for _, metric := range metrics {
+ coll.Add(metric, time.Now())
+ }
+
+ var buf bytes.Buffer
+ for _, mf := range coll.GetProto() {
+ enc := expfmt.NewEncoder(&buf, expfmt.FmtText)
+ err := enc.Encode(mf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return buf.Bytes(), nil
+}
diff --git a/plugins/serializers/prometheus/prometheus_test.go b/plugins/serializers/prometheus/prometheus_test.go
new file mode 100644
index 0000000000000..a2a95482d305c
--- /dev/null
+++ b/plugins/serializers/prometheus/prometheus_test.go
@@ -0,0 +1,693 @@
+package prometheus
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSerialize(t *testing.T) {
+ tests := []struct {
+ name string
+ config FormatConfig
+ metric telegraf.Metric
+ expected []byte
+ }{
+ {
+ name: "simple",
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42
+`),
+ },
+ {
+ name: "prometheus input untyped",
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{
+ "code": "400",
+ "method": "post",
+ },
+ map[string]interface{}{
+ "http_requests_total": 3.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Untyped,
+ ),
+ expected: []byte(`
+# HELP http_requests_total Telegraf collected metric
+# TYPE http_requests_total untyped
+http_requests_total{code="400",method="post"} 3
+`),
+ },
+ {
+ name: "prometheus input counter",
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{
+ "code": "400",
+ "method": "post",
+ },
+ map[string]interface{}{
+ "http_requests_total": 3.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Counter,
+ ),
+ expected: []byte(`
+# HELP http_requests_total Telegraf collected metric
+# TYPE http_requests_total counter
+http_requests_total{code="400",method="post"} 3
+`),
+ },
+ {
+ name: "prometheus input gauge",
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{
+ "code": "400",
+ "method": "post",
+ },
+ map[string]interface{}{
+ "http_requests_total": 3.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Gauge,
+ ),
+ expected: []byte(`
+# HELP http_requests_total Telegraf collected metric
+# TYPE http_requests_total gauge
+http_requests_total{code="400",method="post"} 3
+`),
+ },
+ {
+ name: "prometheus input histogram no buckets",
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "http_request_duration_seconds_sum": 53423,
+ "http_request_duration_seconds_count": 144320,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ expected: []byte(`
+# HELP http_request_duration_seconds Telegraf collected metric
+# TYPE http_request_duration_seconds histogram
+http_request_duration_seconds_bucket{le="+Inf"} 144320
+http_request_duration_seconds_sum 53423
+http_request_duration_seconds_count 144320
+`),
+ },
+ {
+ name: "prometheus input histogram only bucket",
+ metric: testutil.MustMetric(
+ "prometheus",
+ map[string]string{
+ "le": "0.5",
+ },
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 129389.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ expected: []byte(`
+# HELP http_request_duration_seconds Telegraf collected metric
+# TYPE http_request_duration_seconds histogram
+http_request_duration_seconds_bucket{le="0.5"} 129389
+http_request_duration_seconds_bucket{le="+Inf"} 0
+http_request_duration_seconds_sum 0
+http_request_duration_seconds_count 0
+`),
+ },
+ {
+ name: "simple with timestamp",
+ config: FormatConfig{
+ TimestampExport: ExportTimestamp,
+ },
+ metric: testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(1574279268, 0),
+ ),
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="example.org"} 42 1574279268000
+`),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s, err := NewSerializer(FormatConfig{
+ MetricSortOrder: SortMetrics,
+ TimestampExport: tt.config.TimestampExport,
+ StringHandling: tt.config.StringHandling,
+ })
+ require.NoError(t, err)
+ actual, err := s.Serialize(tt.metric)
+ require.NoError(t, err)
+
+ require.Equal(t, strings.TrimSpace(string(tt.expected)),
+ strings.TrimSpace(string(actual)))
+ })
+ }
+}
+
+func TestSerializeBatch(t *testing.T) {
+ tests := []struct {
+ name string
+ config FormatConfig
+ metrics []telegraf.Metric
+ expected []byte
+ }{
+ {
+ name: "simple",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host": "one.example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host": "two.example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="one.example.org"} 42
+cpu_time_idle{host="two.example.org"} 42
+`),
+ },
+ {
+ name: "multiple metric families",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host": "one.example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "time_guest": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_guest Telegraf collected metric
+# TYPE cpu_time_guest untyped
+cpu_time_guest{host="one.example.org"} 42
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host="one.example.org"} 42
+`),
+ },
+ {
+ name: "histogram",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "http_request_duration_seconds_sum": 53423,
+ "http_request_duration_seconds_count": 144320,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "0.05"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 24054.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "0.1"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 33444.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "0.2"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 100392.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "0.5"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 129389.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "1.0"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 133988.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"le": "+Inf"},
+ map[string]interface{}{
+ "http_request_duration_seconds_bucket": 144320.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Histogram,
+ ),
+ },
+ expected: []byte(`
+# HELP http_request_duration_seconds Telegraf collected metric
+# TYPE http_request_duration_seconds histogram
+http_request_duration_seconds_bucket{le="0.05"} 24054
+http_request_duration_seconds_bucket{le="0.1"} 33444
+http_request_duration_seconds_bucket{le="0.2"} 100392
+http_request_duration_seconds_bucket{le="0.5"} 129389
+http_request_duration_seconds_bucket{le="1"} 133988
+http_request_duration_seconds_bucket{le="+Inf"} 144320
+http_request_duration_seconds_sum 53423
+http_request_duration_seconds_count 144320
+`),
+ },
+ {
+ name: "",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "rpc_duration_seconds_sum": 1.7560473e+07,
+ "rpc_duration_seconds_count": 2693,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"quantile": "0.01"},
+ map[string]interface{}{
+ "rpc_duration_seconds": 3102.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"quantile": "0.05"},
+ map[string]interface{}{
+ "rpc_duration_seconds": 3272.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"quantile": "0.5"},
+ map[string]interface{}{
+ "rpc_duration_seconds": 4773.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"quantile": "0.9"},
+ map[string]interface{}{
+ "rpc_duration_seconds": 9001.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{"quantile": "0.99"},
+ map[string]interface{}{
+ "rpc_duration_seconds": 76656.0,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ },
+ expected: []byte(`
+# HELP rpc_duration_seconds Telegraf collected metric
+# TYPE rpc_duration_seconds summary
+rpc_duration_seconds{quantile="0.01"} 3102
+rpc_duration_seconds{quantile="0.05"} 3272
+rpc_duration_seconds{quantile="0.5"} 4773
+rpc_duration_seconds{quantile="0.9"} 9001
+rpc_duration_seconds{quantile="0.99"} 76656
+rpc_duration_seconds_sum 1.7560473e+07
+rpc_duration_seconds_count 2693
+`),
+ },
+ {
+ name: "newer sample",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 43.0,
+ },
+ time.Unix(1, 0),
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle 43
+`),
+ },
+ {
+ name: "colons are not replaced in metric name from measurement",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu::xyzzy",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu::xyzzy_time_idle Telegraf collected metric
+# TYPE cpu::xyzzy_time_idle untyped
+cpu::xyzzy_time_idle 42
+`),
+ },
+ {
+ name: "colons are not replaced in metric name from field",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time:idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time:idle Telegraf collected metric
+# TYPE cpu_time:idle untyped
+cpu_time:idle 42
+`),
+ },
+ {
+ name: "invalid label",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host-name": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host_name="example.org"} 42
+`),
+ },
+ {
+ name: "colons are replaced in label name",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "host:name": "example.org",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host_name="example.org"} 42
+`),
+ },
+ {
+ name: "discard strings",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "cpu": "cpu0",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle 42
+`),
+ },
+ {
+ name: "string as label",
+ config: FormatConfig{
+ StringHandling: StringAsLabel,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "cpu": "cpu0",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{cpu="cpu0"} 42
+`),
+ },
+ {
+ name: "string as label duplicate tag",
+ config: FormatConfig{
+ StringHandling: StringAsLabel,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_idle": 42.0,
+ "cpu": "cpu1",
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{cpu="cpu0"} 42
+`),
+ },
+ {
+ name: "replace characters when using string as label",
+ config: FormatConfig{
+ StringHandling: StringAsLabel,
+ },
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "host:name": "example.org",
+ "time_idle": 42.0,
+ },
+ time.Unix(1574279268, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_idle Telegraf collected metric
+# TYPE cpu_time_idle untyped
+cpu_time_idle{host_name="example.org"} 42
+`),
+ },
+ {
+ name: "multiple fields grouping",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu0",
+ },
+ map[string]interface{}{
+ "time_guest": 8106.04,
+ "time_system": 26271.4,
+ "time_user": 92904.33,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu1",
+ },
+ map[string]interface{}{
+ "time_guest": 8181.63,
+ "time_system": 25351.49,
+ "time_user": 96912.57,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu2",
+ },
+ map[string]interface{}{
+ "time_guest": 7470.04,
+ "time_system": 24998.43,
+ "time_user": 96034.08,
+ },
+ time.Unix(0, 0),
+ ),
+ testutil.MustMetric(
+ "cpu",
+ map[string]string{
+ "cpu": "cpu3",
+ },
+ map[string]interface{}{
+ "time_guest": 7517.95,
+ "time_system": 24970.82,
+ "time_user": 94148,
+ },
+ time.Unix(0, 0),
+ ),
+ },
+ expected: []byte(`
+# HELP cpu_time_guest Telegraf collected metric
+# TYPE cpu_time_guest untyped
+cpu_time_guest{cpu="cpu0"} 8106.04
+cpu_time_guest{cpu="cpu1"} 8181.63
+cpu_time_guest{cpu="cpu2"} 7470.04
+cpu_time_guest{cpu="cpu3"} 7517.95
+# HELP cpu_time_system Telegraf collected metric
+# TYPE cpu_time_system untyped
+cpu_time_system{cpu="cpu0"} 26271.4
+cpu_time_system{cpu="cpu1"} 25351.49
+cpu_time_system{cpu="cpu2"} 24998.43
+cpu_time_system{cpu="cpu3"} 24970.82
+# HELP cpu_time_user Telegraf collected metric
+# TYPE cpu_time_user untyped
+cpu_time_user{cpu="cpu0"} 92904.33
+cpu_time_user{cpu="cpu1"} 96912.57
+cpu_time_user{cpu="cpu2"} 96034.08
+cpu_time_user{cpu="cpu3"} 94148
+`),
+ },
+ {
+ name: "summary with no quantile",
+ metrics: []telegraf.Metric{
+ testutil.MustMetric(
+ "prometheus",
+ map[string]string{},
+ map[string]interface{}{
+ "rpc_duration_seconds_sum": 1.7560473e+07,
+ "rpc_duration_seconds_count": 2693,
+ },
+ time.Unix(0, 0),
+ telegraf.Summary,
+ ),
+ },
+ expected: []byte(`
+# HELP rpc_duration_seconds Telegraf collected metric
+# TYPE rpc_duration_seconds summary
+rpc_duration_seconds_sum 1.7560473e+07
+rpc_duration_seconds_count 2693
+`),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s, err := NewSerializer(FormatConfig{
+ MetricSortOrder: SortMetrics,
+ TimestampExport: tt.config.TimestampExport,
+ StringHandling: tt.config.StringHandling,
+ })
+ require.NoError(t, err)
+ actual, err := s.SerializeBatch(tt.metrics)
+ require.NoError(t, err)
+
+ require.Equal(t,
+ strings.TrimSpace(string(tt.expected)),
+ strings.TrimSpace(string(actual)))
+ })
+ }
+}
diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go
index ecac6332393b8..b12ef7660b981 100644
--- a/plugins/serializers/registry.go
+++ b/plugins/serializers/registry.go
@@ -5,12 +5,12 @@ import (
"time"
"github.com/influxdata/telegraf"
-
"github.com/influxdata/telegraf/plugins/serializers/carbon2"
"github.com/influxdata/telegraf/plugins/serializers/graphite"
"github.com/influxdata/telegraf/plugins/serializers/influx"
"github.com/influxdata/telegraf/plugins/serializers/json"
"github.com/influxdata/telegraf/plugins/serializers/nowmetric"
+ "github.com/influxdata/telegraf/plugins/serializers/prometheus"
"github.com/influxdata/telegraf/plugins/serializers/splunkmetric"
"github.com/influxdata/telegraf/plugins/serializers/wavefront"
)
@@ -24,10 +24,16 @@ type SerializerOutput interface {
// Serializer is an interface defining functions that a serializer plugin must
// satisfy.
+//
+// Implementations of this interface should be reentrant but are not required
+// to be thread-safe.
type Serializer interface {
// Serialize takes a single telegraf metric and turns it into a byte buffer.
// separate metrics should be separated by a newline, and there should be
// a newline at the end of the buffer.
+ //
+ // New plugins should use SerializeBatch instead to allow for non-line
+ // delimited metrics.
Serialize(metric telegraf.Metric) ([]byte, error)
// SerializeBatch takes an array of telegraf metric and serializes it into
@@ -39,41 +45,64 @@ type Serializer interface {
// Config is a struct that covers the data types needed for all serializer types,
// and can be used to instantiate _any_ of the serializers.
type Config struct {
- // Dataformat can be one of: influx, graphite, or json
- DataFormat string
+ // Dataformat can be one of the serializer types listed in NewSerializer.
+ DataFormat string `toml:"data_format"`
+
+ // Carbon2 metric format.
+ Carbon2Format string `toml:"carbon2_format"`
// Support tags in graphite protocol
- GraphiteTagSupport bool
+ GraphiteTagSupport bool `toml:"graphite_tag_support"`
+
+ // Character for separating metric name and field for Graphite tags
+ GraphiteSeparator string `toml:"graphite_separator"`
// Maximum line length in bytes; influx format only
- InfluxMaxLineBytes int
+ InfluxMaxLineBytes int `toml:"influx_max_line_bytes"`
// Sort field keys, set to true only when debugging as it less performant
// than unsorted fields; influx format only
- InfluxSortFields bool
+ InfluxSortFields bool `toml:"influx_sort_fields"`
// Support unsigned integer output; influx format only
- InfluxUintSupport bool
+ InfluxUintSupport bool `toml:"influx_uint_support"`
// Prefix to add to all measurements, only supports Graphite
- Prefix string
+ Prefix string `toml:"prefix"`
// Template for converting telegraf metrics into Graphite
// only supports Graphite
- Template string
+ Template string `toml:"template"`
+
+ // Templates same Template, but multiple
+ Templates []string `toml:"templates"`
// Timestamp units to use for JSON formatted output
- TimestampUnits time.Duration
+ TimestampUnits time.Duration `toml:"timestamp_units"`
// Include HEC routing fields for splunkmetric output
- HecRouting bool
+ HecRouting bool `toml:"hec_routing"`
+
+ // Enable Splunk MultiMetric output (Splunk 8.0+)
+ SplunkmetricMultiMetric bool `toml:"splunkmetric_multi_metric"`
// Point tags to use as the source name for Wavefront (if none found, host will be used).
- WavefrontSourceOverride []string
+ WavefrontSourceOverride []string `toml:"wavefront_source_override"`
// Use Strict rules to sanitize metric and tag names from invalid characters for Wavefront
// When enabled forward slash (/) and comma (,) will be accepted
- WavefrontUseStrict bool
+ WavefrontUseStrict bool `toml:"wavefront_use_strict"`
+
+ // Include the metric timestamp on each sample.
+ PrometheusExportTimestamp bool `toml:"prometheus_export_timestamp"`
+
+ // Sort prometheus metric families and metric samples. Useful for
+ // debugging.
+ PrometheusSortMetrics bool `toml:"prometheus_sort_metrics"`
+
+ // Output string fields as metric labels; when false string fields are
+ // discarded.
+ PrometheusStringAsLabel bool `toml:"prometheus_string_as_label"`
}
// NewSerializer a Serializer interface based on the given config.
@@ -84,23 +113,48 @@ func NewSerializer(config *Config) (Serializer, error) {
case "influx":
serializer, err = NewInfluxSerializerConfig(config)
case "graphite":
- serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport)
+ serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteSeparator, config.Templates)
case "json":
serializer, err = NewJsonSerializer(config.TimestampUnits)
case "splunkmetric":
- serializer, err = NewSplunkmetricSerializer(config.HecRouting)
+ serializer, err = NewSplunkmetricSerializer(config.HecRouting, config.SplunkmetricMultiMetric)
case "nowmetric":
serializer, err = NewNowSerializer()
case "carbon2":
- serializer, err = NewCarbon2Serializer()
+ serializer, err = NewCarbon2Serializer(config.Carbon2Format)
case "wavefront":
serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride)
+ case "prometheus":
+ serializer, err = NewPrometheusSerializer(config)
default:
err = fmt.Errorf("Invalid data format: %s", config.DataFormat)
}
return serializer, err
}
+func NewPrometheusSerializer(config *Config) (Serializer, error) {
+ exportTimestamp := prometheus.NoExportTimestamp
+ if config.PrometheusExportTimestamp {
+ exportTimestamp = prometheus.ExportTimestamp
+ }
+
+ sortMetrics := prometheus.NoSortMetrics
+ if config.PrometheusExportTimestamp {
+ sortMetrics = prometheus.SortMetrics
+ }
+
+ stringAsLabels := prometheus.DiscardStrings
+ if config.PrometheusStringAsLabel {
+ stringAsLabels = prometheus.StringAsLabel
+ }
+
+ return prometheus.NewSerializer(prometheus.FormatConfig{
+ TimestampExport: exportTimestamp,
+ MetricSortOrder: sortMetrics,
+ StringHandling: stringAsLabels,
+ })
+}
+
func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []string) (Serializer, error) {
return wavefront.NewSerializer(prefix, useStrict, sourceOverride)
}
@@ -109,12 +163,12 @@ func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) {
return json.NewSerializer(timestampUnits)
}
-func NewCarbon2Serializer() (Serializer, error) {
- return carbon2.NewSerializer()
+func NewCarbon2Serializer(carbon2format string) (Serializer, error) {
+ return carbon2.NewSerializer(carbon2format)
}
-func NewSplunkmetricSerializer(splunkmetric_hec_routing bool) (Serializer, error) {
- return splunkmetric.NewSerializer(splunkmetric_hec_routing)
+func NewSplunkmetricSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (Serializer, error) {
+ return splunkmetric.NewSerializer(splunkmetric_hec_routing, splunkmetric_multimetric)
}
func NewNowSerializer() (Serializer, error) {
@@ -143,10 +197,26 @@ func NewInfluxSerializer() (Serializer, error) {
return influx.NewSerializer(), nil
}
-func NewGraphiteSerializer(prefix, template string, tag_support bool) (Serializer, error) {
+func NewGraphiteSerializer(prefix, template string, tag_support bool, separator string, templates []string) (Serializer, error) {
+ graphiteTemplates, defaultTemplate, err := graphite.InitGraphiteTemplates(templates)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if defaultTemplate != "" {
+ template = defaultTemplate
+ }
+
+ if separator == "" {
+ separator = "."
+ }
+
return &graphite.GraphiteSerializer{
Prefix: prefix,
Template: template,
TagSupport: tag_support,
+ Separator: separator,
+ Templates: graphiteTemplates,
}, nil
}
diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md
index 552b90ea47b4b..ba2170d9c4707 100644
--- a/plugins/serializers/splunkmetric/README.md
+++ b/plugins/serializers/splunkmetric/README.md
@@ -27,6 +27,34 @@ In the above snippet, the following keys are dimensions:
* dc
* user
+## Using Multimetric output
+
+Starting with Splunk Enterprise and Splunk Cloud 8.0, you can now send multiple metric values in one payload. This means, for example, that
+you can send all of your CPU stats in one JSON struct, an example event looks like:
+
+```javascript
+{
+ "time": 1572469920,
+ "event": "metric",
+ "host": "mono.local",
+ "fields": {
+ "class": "osx",
+ "cpu": "cpu0",
+ "metric_name:telegraf.cpu.usage_guest": 0,
+ "metric_name:telegraf.cpu.usage_guest_nice": 0,
+ "metric_name:telegraf.cpu.usage_idle": 65.1,
+ "metric_name:telegraf.cpu.usage_iowait": 0,
+ "metric_name:telegraf.cpu.usage_irq": 0,
+ "metric_name:telegraf.cpu.usage_nice": 0,
+ "metric_name:telegraf.cpu.usage_softirq": 0,
+ "metric_name:telegraf.cpu.usage_steal": 0,
+ "metric_name:telegraf.cpu.usage_system": 10.2,
+ "metric_name:telegraf.cpu.usage_user": 24.7,
+ }
+}
+```
+In order to enable this mode, there's a new option `splunkmetric_multimetric` that you set in the appropriate output module you plan on using.
+
## Using with the HTTP output
To send this data to a Splunk HEC, you can use the HTTP output, there are some custom headers that you need to add
@@ -61,6 +89,7 @@ to manage the HEC authorization, here's a sample config for an HTTP output:
data_format = "splunkmetric"
## Provides time, index, source overrides for the HEC
splunkmetric_hec_routing = true
+ # splunkmetric_multimetric = true
## Additional HTTP headers
[outputs.http.headers]
@@ -73,7 +102,7 @@ to manage the HEC authorization, here's a sample config for an HTTP output:
## Overrides
You can override the default values for the HEC token you are using by adding additional tags to the config file.
-The following aspects of the token can be overriden with tags:
+The following aspects of the token can be overridden with tags:
* index
* source
@@ -118,7 +147,6 @@ disabled = false
INDEXED_EXTRACTIONS = json
KV_MODE = none
TIMESTAMP_FIELDS = time
-TIME_FORMAT = %s.%3N
```
An example configuration of a file based output is:
@@ -134,5 +162,25 @@ An example configuration of a file based output is:
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "splunkmetric"
- hec_routing = false
+ splunkmetric_hec_routing = false
+ splunkmetric_multimetric = true
+```
+
+## Non-numeric metric values
+
+Splunk supports only numeric field values, so serializer would silently drop metrics with the string values. For some cases it is possible to workaround using ENUM processor. Example, provided below doing this for the `docker_container_health.health_status` metric:
+
+```toml
+# splunkmetric does not support sting values
+[[processors.enum]]
+ namepass = ["docker_container_health"]
+ [[processors.enum.mapping]]
+ ## Name of the field to map
+ field = "health_status"
+ [processors.enum.mapping.value_mappings]
+ starting = 0
+ healthy = 1
+ unhealthy = 2
+ none = 3
```
+
diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go
index cdcf6cc592567..b96db5cf81155 100644
--- a/plugins/serializers/splunkmetric/splunkmetric.go
+++ b/plugins/serializers/splunkmetric/splunkmetric.go
@@ -9,12 +9,32 @@ import (
)
type serializer struct {
- HecRouting bool
+ HecRouting bool
+ SplunkmetricMultiMetric bool
}
-func NewSerializer(splunkmetric_hec_routing bool) (*serializer, error) {
+type CommonTags struct {
+ Time float64
+ Host string
+ Index string
+ Source string
+ Fields map[string]interface{}
+}
+
+type HECTimeSeries struct {
+ Time float64 `json:"time"`
+ Host string `json:"host,omitempty"`
+ Index string `json:"index,omitempty"`
+ Source string `json:"source,omitempty"`
+ Fields map[string]interface{} `json:"fields"`
+}
+
+// NewSerializer Setup our new serializer
+func NewSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (*serializer, error) {
+ /* Define output params */
s := &serializer{
- HecRouting: splunkmetric_hec_routing,
+ HecRouting: splunkmetric_hec_routing,
+ SplunkmetricMultiMetric: splunkmetric_multimetric,
}
return s, nil
}
@@ -45,26 +65,60 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
return serialized, nil
}
-func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) {
+func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries, commonTags CommonTags) (metricGroup []byte, err error) {
+ /* When splunkmetric_multimetric is true, then we can write out multiple name=value pairs as part of the same
+ ** event payload. This only works when the time, host, and dimensions are the same for every name=value pair
+ ** in the timeseries data.
+ **
+ ** The format for multimetric data is 'metric_name:nameOfMetric = valueOfMetric'
+ */
+ var metricJSON []byte
+
+ // Set the event data from the commonTags above.
+ dataGroup.Time = commonTags.Time
+ dataGroup.Host = commonTags.Host
+ dataGroup.Index = commonTags.Index
+ dataGroup.Source = commonTags.Source
+ dataGroup.Fields = commonTags.Fields
+
+ // Stuff the metric data into the structure.
+ for _, field := range metric.FieldList() {
+ value, valid := verifyValue(field.Value)
- /* Splunk supports one metric json object, and does _not_ support an array of JSON objects.
- ** Splunk has the following required names for the metric store:
- ** metric_name: The name of the metric
- ** _value: The value for the metric
- ** time: The timestamp for the metric
- ** All other index fields become dimensions.
- */
- type HECTimeSeries struct {
- Time float64 `json:"time"`
- Event string `json:"event"`
- Host string `json:"host,omitempty"`
- Index string `json:"index,omitempty"`
- Source string `json:"source,omitempty"`
- Fields map[string]interface{} `json:"fields"`
+ if !valid {
+ log.Printf("D! Can not parse value: %v for key: %v", field.Value, field.Key)
+ continue
+ }
+
+ dataGroup.Fields["metric_name:"+metric.Name()+"."+field.Key] = value
}
- dataGroup := HECTimeSeries{}
- var metricJson []byte
+ // Manage the rest of the event details based upon HEC routing rules
+ switch s.HecRouting {
+ case true:
+ // Output the data as a fields array and host,index,time,source overrides for the HEC.
+ metricJSON, err = json.Marshal(dataGroup)
+ default:
+ // Just output the data and the time, useful for file based outputs
+ dataGroup.Fields["time"] = dataGroup.Time
+ metricJSON, err = json.Marshal(dataGroup.Fields)
+ }
+ if err != nil {
+ return nil, err
+ }
+ // Let the JSON fall through to the return below
+ metricGroup = metricJSON
+
+ return metricGroup, nil
+}
+
+func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSeries, commonTags CommonTags) (metricGroup []byte, err error) {
+ /* The default mode is to generate one JSON entity per metric (required for pre-8.0 Splunks)
+ **
+ ** The format for single metric is 'nameOfMetric = valueOfMetric'
+ */
+
+ var metricJSON []byte
for _, field := range metric.FieldList() {
@@ -75,39 +129,28 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e
continue
}
- obj := map[string]interface{}{}
- obj["metric_name"] = metric.Name() + "." + field.Key
- obj["_value"] = value
-
- dataGroup.Event = "metric"
- // Convert ns to float seconds since epoch.
- dataGroup.Time = float64(metric.Time().UnixNano()) / float64(1000000000)
- dataGroup.Fields = obj
-
- // Break tags out into key(n)=value(t) pairs
- for n, t := range metric.Tags() {
- if n == "host" {
- dataGroup.Host = t
- } else if n == "index" {
- dataGroup.Index = t
- } else if n == "source" {
- dataGroup.Source = t
- } else {
- dataGroup.Fields[n] = t
- }
- }
+ dataGroup.Time = commonTags.Time
+
+ // Apply the common tags from above to every record.
+ dataGroup.Host = commonTags.Host
+ dataGroup.Index = commonTags.Index
+ dataGroup.Source = commonTags.Source
+ dataGroup.Fields = commonTags.Fields
+
+ dataGroup.Fields["metric_name"] = metric.Name() + "." + field.Key
+ dataGroup.Fields["_value"] = value
switch s.HecRouting {
case true:
// Output the data as a fields array and host,index,time,source overrides for the HEC.
- metricJson, err = json.Marshal(dataGroup)
+ metricJSON, err = json.Marshal(dataGroup)
default:
- // Just output the data and the time, useful for file based outuputs
+ // Just output the data and the time, useful for file based outputs
dataGroup.Fields["time"] = dataGroup.Time
- metricJson, err = json.Marshal(dataGroup.Fields)
+ metricJSON, err = json.Marshal(dataGroup.Fields)
}
- metricGroup = append(metricGroup, metricJson...)
+ metricGroup = append(metricGroup, metricJSON...)
if err != nil {
return nil, err
@@ -117,6 +160,47 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e
return metricGroup, nil
}
+func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) {
+
+ /* Splunk supports one metric json object, and does _not_ support an array of JSON objects.
+ ** Splunk has the following required names for the metric store:
+ ** metric_name: The name of the metric
+ ** _value: The value for the metric
+ ** time: The timestamp for the metric
+ ** All other index fields become dimensions.
+ */
+
+ dataGroup := HECTimeSeries{}
+
+ // The tags are common to all events in this timeseries
+ commonTags := CommonTags{}
+
+ commonTags.Fields = map[string]interface{}{}
+
+ // Break tags out into key(n)=value(t) pairs
+ for n, t := range metric.Tags() {
+ if n == "host" {
+ commonTags.Host = t
+ } else if n == "index" {
+ commonTags.Index = t
+ } else if n == "source" {
+ commonTags.Source = t
+ } else {
+ commonTags.Fields[n] = t
+ }
+ }
+ commonTags.Time = float64(metric.Time().UnixNano()) / float64(1000000000)
+ switch s.SplunkmetricMultiMetric {
+ case true:
+ metricGroup, _ = s.createMulti(metric, dataGroup, commonTags)
+ default:
+ metricGroup, _ = s.createSingle(metric, dataGroup, commonTags)
+ }
+
+ // Return the metric group regardless of if it's multimetric or single metric.
+ return metricGroup, nil
+}
+
func verifyValue(v interface{}) (value interface{}, valid bool) {
switch v.(type) {
case string:
diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go
index 04f6e6538294a..c00bcc7798aac 100644
--- a/plugins/serializers/splunkmetric/splunkmetric_test.go
+++ b/plugins/serializers/splunkmetric/splunkmetric_test.go
@@ -4,10 +4,9 @@ import (
"testing"
"time"
- "github.com/stretchr/testify/assert"
-
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
+ "github.com/stretchr/testify/assert"
)
func MustMetric(v telegraf.Metric, err error) telegraf.Metric {
@@ -29,7 +28,7 @@ func TestSerializeMetricFloat(t *testing.T) {
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
- s, _ := NewSerializer(false)
+ s, _ := NewSerializer(false, false)
var buf []byte
buf, err = s.Serialize(m)
assert.NoError(t, err)
@@ -49,11 +48,11 @@ func TestSerializeMetricFloatHec(t *testing.T) {
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
- s, _ := NewSerializer(true)
+ s, _ := NewSerializer(true, false)
var buf []byte
buf, err = s.Serialize(m)
assert.NoError(t, err)
- expS := `{"time":1529875740.819,"event":"metric","fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}`
+ expS := `{"time":1529875740.819,"fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}`
assert.Equal(t, string(expS), string(buf))
}
@@ -68,7 +67,7 @@ func TestSerializeMetricInt(t *testing.T) {
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
- s, _ := NewSerializer(false)
+ s, _ := NewSerializer(false, false)
var buf []byte
buf, err = s.Serialize(m)
assert.NoError(t, err)
@@ -88,12 +87,12 @@ func TestSerializeMetricIntHec(t *testing.T) {
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
- s, _ := NewSerializer(true)
+ s, _ := NewSerializer(true, false)
var buf []byte
buf, err = s.Serialize(m)
assert.NoError(t, err)
- expS := `{"time":0,"event":"metric","fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}`
+ expS := `{"time":0,"fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}`
assert.Equal(t, string(expS), string(buf))
}
@@ -108,7 +107,7 @@ func TestSerializeMetricBool(t *testing.T) {
m, err := metric.New("docker", tags, fields, now)
assert.NoError(t, err)
- s, _ := NewSerializer(false)
+ s, _ := NewSerializer(false, false)
var buf []byte
buf, err = s.Serialize(m)
assert.NoError(t, err)
@@ -128,12 +127,12 @@ func TestSerializeMetricBoolHec(t *testing.T) {
m, err := metric.New("docker", tags, fields, now)
assert.NoError(t, err)
- s, _ := NewSerializer(true)
+ s, _ := NewSerializer(true, false)
var buf []byte
buf, err = s.Serialize(m)
assert.NoError(t, err)
- expS := `{"time":0,"event":"metric","fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}`
+ expS := `{"time":0,"fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}`
assert.Equal(t, string(expS), string(buf))
}
@@ -149,7 +148,7 @@ func TestSerializeMetricString(t *testing.T) {
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
- s, _ := NewSerializer(false)
+ s, _ := NewSerializer(false, false)
var buf []byte
buf, err = s.Serialize(m)
assert.NoError(t, err)
@@ -182,11 +181,33 @@ func TestSerializeBatch(t *testing.T) {
)
metrics := []telegraf.Metric{m, n}
- s, _ := NewSerializer(false)
+ s, _ := NewSerializer(false, false)
buf, err := s.SerializeBatch(metrics)
assert.NoError(t, err)
- expS := `{"_value":42,"metric_name":"cpu.value","time":0}` + `{"_value":92,"metric_name":"cpu.value","time":0}`
+ expS := `{"_value":42,"metric_name":"cpu.value","time":0}{"_value":92,"metric_name":"cpu.value","time":0}`
+ assert.Equal(t, string(expS), string(buf))
+}
+
+func TestSerializeMulti(t *testing.T) {
+ m := MustMetric(
+ metric.New(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "user": 42.0,
+ "system": 8.0,
+ },
+ time.Unix(0, 0),
+ ),
+ )
+
+ metrics := []telegraf.Metric{m}
+ s, _ := NewSerializer(false, true)
+ buf, err := s.SerializeBatch(metrics)
+ assert.NoError(t, err)
+
+ expS := `{"metric_name:cpu.system":8,"metric_name:cpu.user":42,"time":0}`
assert.Equal(t, string(expS), string(buf))
}
@@ -213,10 +234,32 @@ func TestSerializeBatchHec(t *testing.T) {
)
metrics := []telegraf.Metric{m, n}
- s, _ := NewSerializer(true)
+ s, _ := NewSerializer(true, false)
+ buf, err := s.SerializeBatch(metrics)
+ assert.NoError(t, err)
+
+ expS := `{"time":0,"fields":{"_value":42,"metric_name":"cpu.value"}}{"time":0,"fields":{"_value":92,"metric_name":"cpu.value"}}`
+ assert.Equal(t, string(expS), string(buf))
+}
+
+func TestSerializeMultiHec(t *testing.T) {
+ m := MustMetric(
+ metric.New(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{
+ "usage": 42.0,
+ "system": 8.0,
+ },
+ time.Unix(0, 0),
+ ),
+ )
+
+ metrics := []telegraf.Metric{m}
+ s, _ := NewSerializer(true, true)
buf, err := s.SerializeBatch(metrics)
assert.NoError(t, err)
- expS := `{"time":0,"event":"metric","fields":{"_value":42,"metric_name":"cpu.value"}}` + `{"time":0,"event":"metric","fields":{"_value":92,"metric_name":"cpu.value"}}`
+ expS := `{"time":0,"fields":{"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}`
assert.Equal(t, string(expS), string(buf))
}
diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md
index 7a6594da3831e..3b72d95b4914c 100644
--- a/plugins/serializers/wavefront/README.md
+++ b/plugins/serializers/wavefront/README.md
@@ -1,4 +1,4 @@
-# Example
+# Wavefront
The `wavefront` serializer translates the Telegraf metric format to the [Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html).
@@ -9,7 +9,7 @@ The `wavefront` serializer translates the Telegraf metric format to the [Wavefro
files = ["stdout"]
## Use Strict rules to sanitize metric and tag names from invalid characters
- ## When enabled forward slash (/) and comma (,) will be accpeted
+ ## When enabled forward slash (/) and comma (,) will be accepted
# wavefront_use_strict = false
## point tags to use as the source name for Wavefront (if none found, host will be used)
diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go
index 70b87512fad61..67fa1ae3a6834 100755
--- a/plugins/serializers/wavefront/wavefront.go
+++ b/plugins/serializers/wavefront/wavefront.go
@@ -1,11 +1,10 @@
package wavefront
import (
- "bytes"
- "fmt"
"log"
"strconv"
"strings"
+ "sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs/wavefront"
@@ -16,6 +15,8 @@ type WavefrontSerializer struct {
Prefix string
UseStrict bool
SourceOverride []string
+ scratch buffer
+ mu sync.Mutex // buffer mutex
}
// catch many of the invalid chars that could appear in a metric or tag name
@@ -48,18 +49,16 @@ func NewSerializer(prefix string, useStrict bool, sourceOverride []string) (*Wav
return s, nil
}
-// Serialize : Serialize based on Wavefront format
-func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) {
- out := []byte{}
- metricSeparator := "."
+func (s *WavefrontSerializer) serialize(buf *buffer, m telegraf.Metric) {
+ const metricSeparator = "."
for fieldName, value := range m.Fields() {
var name string
if fieldName == "value" {
- name = fmt.Sprintf("%s%s", s.Prefix, m.Name())
+ name = s.Prefix + m.Name()
} else {
- name = fmt.Sprintf("%s%s%s%s", s.Prefix, m.Name(), metricSeparator, fieldName)
+ name = s.Prefix + m.Name() + metricSeparator + fieldName
}
if s.UseStrict {
@@ -70,133 +69,150 @@ func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) {
name = pathReplacer.Replace(name)
- metric := &wavefront.MetricPoint{
- Metric: name,
- Timestamp: m.Time().Unix(),
- }
-
- metricValue, buildError := buildValue(value, metric.Metric)
- if buildError != nil {
+ metricValue, valid := buildValue(value, name)
+ if !valid {
// bad value continue to next metric
continue
}
- metric.Value = metricValue
-
source, tags := buildTags(m.Tags(), s)
- metric.Source = source
- metric.Tags = tags
-
- out = append(out, formatMetricPoint(metric, s)...)
+ metric := wavefront.MetricPoint{
+ Metric: name,
+ Timestamp: m.Time().Unix(),
+ Value: metricValue,
+ Source: source,
+ Tags: tags,
+ }
+ formatMetricPoint(&s.scratch, &metric, s)
}
+}
+
+// Serialize : Serialize based on Wavefront format
+func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) {
+ s.mu.Lock()
+ s.scratch.Reset()
+ s.serialize(&s.scratch, m)
+ out := s.scratch.Copy()
+ s.mu.Unlock()
return out, nil
}
func (s *WavefrontSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
- var batch bytes.Buffer
+ s.mu.Lock()
+ s.scratch.Reset()
for _, m := range metrics {
- buf, err := s.Serialize(m)
- if err != nil {
- return nil, err
- }
- _, err = batch.Write(buf)
- if err != nil {
- return nil, err
+ s.serialize(&s.scratch, m)
+ }
+ out := s.scratch.Copy()
+ s.mu.Unlock()
+ return out, nil
+}
+
+func findSourceTag(mTags map[string]string, s *WavefrontSerializer) string {
+ if src, ok := mTags["source"]; ok {
+ delete(mTags, "source")
+ return src
+ }
+ for _, src := range s.SourceOverride {
+ if source, ok := mTags[src]; ok {
+ delete(mTags, src)
+ mTags["telegraf_host"] = mTags["host"]
+ return source
}
}
- return batch.Bytes(), nil
+ return mTags["host"]
}
func buildTags(mTags map[string]string, s *WavefrontSerializer) (string, map[string]string) {
-
// Remove all empty tags.
for k, v := range mTags {
if v == "" {
delete(mTags, k)
}
}
-
- var source string
-
- if src, ok := mTags["source"]; ok {
- source = src
- delete(mTags, "source")
- } else {
- sourceTagFound := false
- for _, src := range s.SourceOverride {
- for k, v := range mTags {
- if k == src {
- source = v
- mTags["telegraf_host"] = mTags["host"]
- sourceTagFound = true
- delete(mTags, k)
- break
- }
- }
- if sourceTagFound {
- break
- }
- }
-
- if !sourceTagFound {
- source = mTags["host"]
- }
- }
-
+ source := findSourceTag(mTags, s)
delete(mTags, "host")
-
return tagValueReplacer.Replace(source), mTags
}
-func buildValue(v interface{}, name string) (float64, error) {
+func buildValue(v interface{}, name string) (val float64, valid bool) {
switch p := v.(type) {
case bool:
if p {
- return 1, nil
- } else {
- return 0, nil
+ return 1, true
}
+ return 0, true
case int64:
- return float64(v.(int64)), nil
+ return float64(p), true
case uint64:
- return float64(v.(uint64)), nil
+ return float64(p), true
case float64:
- return v.(float64), nil
+ return p, true
case string:
- // return an error but don't log
- return 0, fmt.Errorf("string type not supported")
+ // return false but don't log
+ return 0, false
default:
- // return an error and log a debug message
- err := fmt.Errorf("unexpected type: %T, with value: %v, for :%s", v, v, name)
- log.Printf("D! Serializer [wavefront] %s\n", err.Error())
- return 0, err
+ // log a debug message
+ log.Printf("D! Serializer [wavefront] unexpected type: %T, with value: %v, for :%s\n",
+ v, v, name)
+ return 0, false
}
}
-func formatMetricPoint(metricPoint *wavefront.MetricPoint, s *WavefrontSerializer) []byte {
- var buffer bytes.Buffer
- buffer.WriteString("\"")
- buffer.WriteString(metricPoint.Metric)
- buffer.WriteString("\" ")
- buffer.WriteString(strconv.FormatFloat(metricPoint.Value, 'f', 6, 64))
- buffer.WriteString(" ")
- buffer.WriteString(strconv.FormatInt(metricPoint.Timestamp, 10))
- buffer.WriteString(" source=\"")
- buffer.WriteString(metricPoint.Source)
- buffer.WriteString("\"")
+func formatMetricPoint(b *buffer, metricPoint *wavefront.MetricPoint, s *WavefrontSerializer) []byte {
+ b.WriteChar('"')
+ b.WriteString(metricPoint.Metric)
+ b.WriteString(`" `)
+ b.WriteFloat64(metricPoint.Value)
+ b.WriteChar(' ')
+ b.WriteUint64(uint64(metricPoint.Timestamp))
+ b.WriteString(` source="`)
+ b.WriteString(metricPoint.Source)
+ b.WriteChar('"')
for k, v := range metricPoint.Tags {
- buffer.WriteString(" \"")
+ b.WriteString(` "`)
if s.UseStrict {
- buffer.WriteString(strictSanitizedChars.Replace(k))
+ b.WriteString(strictSanitizedChars.Replace(k))
} else {
- buffer.WriteString(sanitizedChars.Replace(k))
+ b.WriteString(sanitizedChars.Replace(k))
}
- buffer.WriteString("\"=\"")
- buffer.WriteString(tagValueReplacer.Replace(v))
- buffer.WriteString("\"")
+ b.WriteString(`"="`)
+ b.WriteString(tagValueReplacer.Replace(v))
+ b.WriteChar('"')
}
- buffer.WriteString("\n")
+ b.WriteChar('\n')
+
+ return *b
+}
+
+type buffer []byte
+
+func (b *buffer) Reset() { *b = (*b)[:0] }
+
+func (b *buffer) Copy() []byte {
+ p := make([]byte, len(*b))
+ copy(p, *b)
+ return p
+}
+
+func (b *buffer) WriteString(s string) {
+ *b = append(*b, s...)
+}
+
+// This is named WriteChar instead of WriteByte because the 'stdmethods' check
+// of 'go vet' wants WriteByte to have the signature:
+//
+// func (b *buffer) WriteByte(c byte) error { ... }
+//
+func (b *buffer) WriteChar(c byte) {
+ *b = append(*b, c)
+}
+
+func (b *buffer) WriteUint64(val uint64) {
+ *b = strconv.AppendUint(*b, val, 10)
+}
- return buffer.Bytes()
+func (b *buffer) WriteFloat64(val float64) {
+ *b = strconv.AppendFloat(*b, val, 'f', 6, 64)
}
diff --git a/plugins/serializers/wavefront/wavefront_test.go b/plugins/serializers/wavefront/wavefront_test.go
index 3230ce51534c0..548326e703e6c 100755
--- a/plugins/serializers/wavefront/wavefront_test.go
+++ b/plugins/serializers/wavefront/wavefront_test.go
@@ -7,6 +7,7 @@ import (
"testing"
"time"
+ "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/outputs/wavefront"
"github.com/stretchr/testify/assert"
@@ -132,7 +133,7 @@ func TestFormatMetricPoint(t *testing.T) {
s := WavefrontSerializer{}
for _, pt := range pointTests {
- bout := formatMetricPoint(pt.ptIn, &s)
+ bout := formatMetricPoint(new(buffer), pt.ptIn, &s)
sout := string(bout[:])
if sout != pt.out {
t.Errorf("\nexpected\t%s\nreceived\t%s\n", pt.out, sout)
@@ -160,7 +161,7 @@ func TestUseStrict(t *testing.T) {
s := WavefrontSerializer{UseStrict: true}
for _, pt := range pointTests {
- bout := formatMetricPoint(pt.ptIn, &s)
+ bout := formatMetricPoint(new(buffer), pt.ptIn, &s)
sout := string(bout[:])
if sout != pt.out {
t.Errorf("\nexpected\t%s\nreceived\t%s\n", pt.out, sout)
@@ -293,3 +294,47 @@ func TestSerializeMetricPrefix(t *testing.T) {
expS := []string{fmt.Sprintf("\"telegraf.cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)}
assert.Equal(t, expS, mS)
}
+
+func benchmarkMetrics(b *testing.B) [4]telegraf.Metric {
+ b.Helper()
+ now := time.Now()
+ tags := map[string]string{
+ "cpu": "cpu0",
+ "host": "realHost",
+ }
+ newMetric := func(v interface{}) telegraf.Metric {
+ fields := map[string]interface{}{
+ "usage_idle": v,
+ }
+ m, err := metric.New("cpu", tags, fields, now)
+ if err != nil {
+ b.Fatal(err)
+ }
+ return m
+ }
+ return [4]telegraf.Metric{
+ newMetric(91.5),
+ newMetric(91),
+ newMetric(true),
+ newMetric(false),
+ }
+}
+
+func BenchmarkSerialize(b *testing.B) {
+ var s WavefrontSerializer
+ metrics := benchmarkMetrics(b)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ s.Serialize(metrics[i%len(metrics)])
+ }
+}
+
+func BenchmarkSerializeBatch(b *testing.B) {
+ var s WavefrontSerializer
+ m := benchmarkMetrics(b)
+ metrics := m[:]
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ s.SerializeBatch(metrics)
+ }
+}
diff --git a/processor.go b/processor.go
index e084adab7396d..15d67eb406f5c 100644
--- a/processor.go
+++ b/processor.go
@@ -1,12 +1,31 @@
package telegraf
+// Processor is a processor plugin interface for defining new inline processors.
+// these are extremely efficient and should be used over StreamingProcessor if
+// you do not need asynchronous metric writes.
type Processor interface {
- // SampleConfig returns the default configuration of the Input
- SampleConfig() string
-
- // Description returns a one-sentence description on the Input
- Description() string
+ PluginDescriber
// Apply the filter to the given metric.
Apply(in ...Metric) []Metric
}
+
+// StreamingProcessor is a processor that can take in a stream of messages
+type StreamingProcessor interface {
+ PluginDescriber
+
+ // Start is the initializer for the processor
+ // Start is only called once per plugin instance, and never in parallel.
+ // Start should exit immediately after setup
+ Start(acc Accumulator) error
+
+ // Add is called for each metric to be processed.
+ Add(metric Metric, acc Accumulator) error
+
+ // Stop gives you a callback to free resources.
+ // by the time Stop is called, the input stream will have already been closed
+ // and Add will not be called anymore.
+ // When stop returns, you should no longer be writing metrics to the
+ // accumulator.
+ Stop() error
+}
diff --git a/scripts/alpine.docker b/scripts/alpine.docker
index 0103a16d4ba5e..395cbd8a33bc7 100644
--- a/scripts/alpine.docker
+++ b/scripts/alpine.docker
@@ -1,13 +1,10 @@
-FROM golang:1.11.0 as builder
-ENV DEP_VERSION 0.5.0
-RUN curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 && chmod +x /usr/local/bin/dep
+FROM golang:1.14.7 as builder
WORKDIR /go/src/github.com/influxdata/telegraf
-COPY Gopkg.toml Gopkg.lock ./
-RUN dep ensure -vendor-only
+
COPY . /go/src/github.com/influxdata/telegraf
RUN CGO_ENABLED=0 make go-install
-FROM alpine:3.6
+FROM alpine:3.12
RUN echo 'hosts: files dns' >> /etc/nsswitch.conf
RUN apk add --no-cache iputils ca-certificates net-snmp-tools procps lm_sensors && \
update-ca-certificates
diff --git a/scripts/build.py b/scripts/build.py
deleted file mode 100755
index 5869bf1edacf2..0000000000000
--- a/scripts/build.py
+++ /dev/null
@@ -1,923 +0,0 @@
-#!/usr/bin/python -u
-
-import sys
-import os
-import subprocess
-import time
-from datetime import datetime
-import shutil
-import tempfile
-import hashlib
-import re
-import logging
-import argparse
-
-################
-#### Telegraf Variables
-################
-
-# Packaging variables
-PACKAGE_NAME = "telegraf"
-INSTALL_ROOT_DIR = "/usr/bin"
-LOG_DIR = "/var/log/telegraf"
-SCRIPT_DIR = "/usr/lib/telegraf/scripts"
-CONFIG_DIR = "/etc/telegraf"
-CONFIG_DIR_D = "/etc/telegraf/telegraf.d"
-LOGROTATE_DIR = "/etc/logrotate.d"
-
-INIT_SCRIPT = "scripts/init.sh"
-SYSTEMD_SCRIPT = "scripts/telegraf.service"
-LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
-DEFAULT_CONFIG = "etc/telegraf.conf"
-DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf"
-POSTINST_SCRIPT = "scripts/post-install.sh"
-PREINST_SCRIPT = "scripts/pre-install.sh"
-POSTREMOVE_SCRIPT = "scripts/post-remove.sh"
-PREREMOVE_SCRIPT = "scripts/pre-remove.sh"
-
-# Default AWS S3 bucket for uploads
-DEFAULT_BUCKET = "dl.influxdata.com/telegraf/artifacts"
-
-CONFIGURATION_FILES = [
- CONFIG_DIR + '/telegraf.conf',
- LOGROTATE_DIR + '/telegraf',
-]
-
-# META-PACKAGE VARIABLES
-PACKAGE_LICENSE = "MIT"
-PACKAGE_URL = "https://github.com/influxdata/telegraf"
-MAINTAINER = "support@influxdb.com"
-VENDOR = "InfluxData"
-DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
-
-# SCRIPT START
-prereqs = [ 'git', 'go' ]
-go_vet_command = "go tool vet -composites=true ./"
-optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
-
-fpm_common_args = "-f -s dir --log error \
- --vendor {} \
- --url {} \
- --license {} \
- --maintainer {} \
- --config-files {} \
- --config-files {} \
- --after-install {} \
- --before-install {} \
- --after-remove {} \
- --before-remove {} \
- --description \"{}\"".format(
- VENDOR,
- PACKAGE_URL,
- PACKAGE_LICENSE,
- MAINTAINER,
- CONFIG_DIR + '/telegraf.conf',
- LOGROTATE_DIR + '/telegraf',
- POSTINST_SCRIPT,
- PREINST_SCRIPT,
- POSTREMOVE_SCRIPT,
- PREREMOVE_SCRIPT,
- DESCRIPTION)
-
-targets = {
- 'telegraf' : './cmd/telegraf',
-}
-
-supported_builds = {
- "windows": [ "amd64", "i386" ],
- "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x", "mipsel"],
- "freebsd": [ "amd64", "i386" ]
-}
-
-supported_packages = {
- "linux": [ "deb", "rpm", "tar" ],
- "windows": [ "zip" ],
- "freebsd": [ "tar" ]
-}
-
-next_version = '1.11.0'
-
-################
-#### Telegraf Functions
-################
-
-def print_banner():
- logging.info("""
- _____ _ __
-/__ \\___| | ___ __ _ _ __ __ _ / _|
- / /\\/ _ \\ |/ _ \\/ _` | '__/ _` | |_
- / / | __/ | __/ (_| | | | (_| | _|
- \\/ \\___|_|\\___|\\__, |_| \\__,_|_|
- |___/
- Build Script
-""")
-
-def create_package_fs(build_root):
- """Create a filesystem structure to mimic the package filesystem.
- """
- logging.debug("Creating a filesystem hierarchy from directory: {}".format(build_root))
- # Using [1:] for the path names due to them being absolute
- # (will overwrite previous paths, per 'os.path.join' documentation)
- dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:] ]
- for d in dirs:
- os.makedirs(os.path.join(build_root, d))
- os.chmod(os.path.join(build_root, d), 0o755)
-
-def package_scripts(build_root, config_only=False, windows=False):
- """Copy the necessary scripts and configuration files to the package
- filesystem.
- """
- if config_only or windows:
- logging.info("Copying configuration to build directory")
- if windows:
- shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
- else:
- shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf"))
- os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
- else:
- logging.info("Copying scripts and configuration to build directory")
- shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
- os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
- shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
- os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
- shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
- os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
- shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
- os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
-
-def run_generate():
- # NOOP for Telegraf
- return True
-
-def go_get(branch, update=False, no_uncommitted=False):
- """Retrieve build dependencies or restore pinned dependencies.
- """
- if local_changes() and no_uncommitted:
- logging.error("There are uncommitted changes in the current directory.")
- return False
- logging.info("Retrieving dependencies with `dep`...")
- run("{}/bin/dep ensure -v -vendor-only".format(os.environ.get("GOPATH",
- os.path.expanduser("~/go"))))
- return True
-
-def run_tests(race, parallel, timeout, no_vet):
- # Currently a NOOP for Telegraf
- return True
-
-################
-#### All Telegraf-specific content above this line
-################
-
-def run(command, allow_failure=False, shell=False):
- """Run shell command (convenience wrapper around subprocess).
- """
- out = None
- logging.debug("{}".format(command))
- try:
- if shell:
- out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
- else:
- out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
- out = out.decode('utf-8').strip()
- # logging.debug("Command output: {}".format(out))
- except subprocess.CalledProcessError as e:
- if allow_failure:
- logging.warn("Command '{}' failed with error: {}".format(command, e.output))
- return None
- else:
- logging.error("Command '{}' failed with error: {}".format(command, e.output))
- sys.exit(1)
- except OSError as e:
- if allow_failure:
- logging.warn("Command '{}' failed with error: {}".format(command, e))
- return out
- else:
- logging.error("Command '{}' failed with error: {}".format(command, e))
- sys.exit(1)
- else:
- return out
-
-def create_temp_dir(prefix = None):
- """ Create temporary directory with optional prefix.
- """
- if prefix is None:
- return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
- else:
- return tempfile.mkdtemp(prefix=prefix)
-
-def increment_minor_version(version):
- """Return the version with the minor version incremented and patch
- version set to zero.
- """
- ver_list = version.split('.')
- if len(ver_list) != 3:
- logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
- return version
- ver_list[1] = str(int(ver_list[1]) + 1)
- ver_list[2] = str(0)
- inc_version = '.'.join(ver_list)
- logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
- return inc_version
-
-def get_current_version_tag():
- """Retrieve the raw git version tag.
- """
- version = run("git describe --exact-match --tags 2>/dev/null",
- allow_failure=True, shell=True)
- return version
-
-def get_current_version():
- """Parse version information from git tag output.
- """
- version_tag = get_current_version_tag()
- if not version_tag:
- return None
- # Remove leading 'v'
- if version_tag[0] == 'v':
- version_tag = version_tag[1:]
- # Replace any '-'/'_' with '~'
- if '-' in version_tag:
- version_tag = version_tag.replace("-","~")
- if '_' in version_tag:
- version_tag = version_tag.replace("_","~")
- return version_tag
-
-def get_current_commit(short=False):
- """Retrieve the current git commit.
- """
- command = None
- if short:
- command = "git log --pretty=format:'%h' -n 1"
- else:
- command = "git rev-parse HEAD"
- out = run(command)
- return out.strip('\'\n\r ')
-
-def get_current_branch():
- """Retrieve the current git branch.
- """
- command = "git rev-parse --abbrev-ref HEAD"
- out = run(command)
- return out.strip()
-
-def local_changes():
- """Return True if there are local un-committed changes.
- """
- output = run("git diff-files --ignore-submodules --").strip()
- if len(output) > 0:
- return True
- return False
-
-def get_system_arch():
- """Retrieve current system architecture.
- """
- arch = os.uname()[4]
- if arch == "x86_64":
- arch = "amd64"
- elif arch == "386":
- arch = "i386"
- elif "arm64" in arch:
- arch = "arm64"
- elif 'arm' in arch:
- # Prevent uname from reporting full ARM arch (eg 'armv7l')
- arch = "arm"
- return arch
-
-def get_system_platform():
- """Retrieve current system platform.
- """
- if sys.platform.startswith("linux"):
- return "linux"
- else:
- return sys.platform
-
-def get_go_version():
- """Retrieve version information for Go.
- """
- out = run("go version")
- matches = re.search('go version go(\S+)', out)
- if matches is not None:
- return matches.groups()[0].strip()
- return None
-
-def check_path_for(b):
- """Check the the user's path for the provided binary.
- """
- def is_exe(fpath):
- return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
- for path in os.environ["PATH"].split(os.pathsep):
- path = path.strip('"')
- full_path = os.path.join(path, b)
- if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
- return full_path
-
-def check_environ(build_dir = None):
- """Check environment for common Go variables.
- """
- logging.info("Checking environment...")
- for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
- logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
-
- cwd = os.getcwd()
- if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
- logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
- return True
-
-def check_prereqs():
- """Check user path for required dependencies.
- """
- logging.info("Checking for dependencies...")
- for req in prereqs:
- if not check_path_for(req):
- logging.error("Could not find dependency: {}".format(req))
- return False
- return True
-
-def upload_packages(packages, bucket_name=None, overwrite=False):
- """Upload provided package output to AWS S3.
- """
- logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
- try:
- import boto
- from boto.s3.key import Key
- from boto.s3.connection import OrdinaryCallingFormat
- logging.getLogger("boto").setLevel(logging.WARNING)
- except ImportError:
- logging.warn("Cannot upload packages without 'boto' Python library!")
- return False
- logging.info("Connecting to AWS S3...")
- # Up the number of attempts to 10 from default of 1
- boto.config.add_section("Boto")
- boto.config.set("Boto", "metadata_service_num_attempts", "10")
- c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
- if bucket_name is None:
- bucket_name = DEFAULT_BUCKET
- bucket = c.get_bucket(bucket_name.split('/')[0])
- for p in packages:
- if '/' in bucket_name:
- # Allow for nested paths within the bucket name (ex:
- # bucket/folder). Assuming forward-slashes as path
- # delimiter.
- name = os.path.join('/'.join(bucket_name.split('/')[1:]),
- os.path.basename(p))
- else:
- name = os.path.basename(p)
- logging.debug("Using key: {}".format(name))
- if bucket.get_key(name) is None or overwrite:
- logging.info("Uploading file {}".format(name))
- k = Key(bucket)
- k.key = name
- if overwrite:
- n = k.set_contents_from_filename(p, replace=True)
- else:
- n = k.set_contents_from_filename(p, replace=False)
- k.make_public()
- else:
- logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
- return True
-
-def go_list(vendor=False, relative=False):
- """
- Return a list of packages
- If vendor is False vendor package are not included
- If relative is True the package prefix defined by PACKAGE_URL is stripped
- """
- p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = p.communicate()
- packages = out.split('\n')
- if packages[-1] == '':
- packages = packages[:-1]
- if not vendor:
- non_vendor = []
- for p in packages:
- if '/vendor/' not in p:
- non_vendor.append(p)
- packages = non_vendor
- if relative:
- relative_pkgs = []
- for p in packages:
- r = p.replace(PACKAGE_URL, '.')
- if r != '.':
- relative_pkgs.append(r)
- packages = relative_pkgs
- return packages
-
-def build(version=None,
- platform=None,
- arch=None,
- nightly=False,
- race=False,
- clean=False,
- outdir=".",
- tags=[],
- static=False):
- """Build each target for the specified architecture and platform.
- """
- logging.info("Starting build for {}/{}...".format(platform, arch))
- logging.info("Using Go version: {}".format(get_go_version()))
- logging.info("Using git branch: {}".format(get_current_branch()))
- logging.info("Using git commit: {}".format(get_current_commit()))
- if static:
- logging.info("Using statically-compiled output.")
- if race:
- logging.info("Race is enabled.")
- if len(tags) > 0:
- logging.info("Using build tags: {}".format(','.join(tags)))
-
- logging.info("Sending build output to: {}".format(outdir))
- if not os.path.exists(outdir):
- os.makedirs(outdir)
- elif clean and outdir != '/' and outdir != ".":
- logging.info("Cleaning build directory '{}' before building.".format(outdir))
- shutil.rmtree(outdir)
- os.makedirs(outdir)
-
- logging.info("Using version '{}' for build.".format(version))
-
- tmp_build_dir = create_temp_dir()
- for target, path in targets.items():
- logging.info("Building target: {}".format(target))
- build_command = ""
-
- # Handle static binary output
- if static is True or "static_" in arch:
- if "static_" in arch:
- static = True
- arch = arch.replace("static_", "")
- build_command += "CGO_ENABLED=0 "
-
- # Handle variations in architecture output
- goarch = arch
- if arch == "i386" or arch == "i686":
- goarch = "386"
- elif "arm64" in arch:
- goarch = "arm64"
- elif "arm" in arch:
- goarch = "arm"
- elif arch == "mipsel":
- goarch = "mipsle"
- build_command += "GOOS={} GOARCH={} ".format(platform, goarch)
-
- if "arm" in arch:
- if arch == "armel":
- build_command += "GOARM=5 "
- elif arch == "armhf" or arch == "arm":
- build_command += "GOARM=6 "
- elif arch == "arm64":
- # TODO(rossmcdonald) - Verify this is the correct setting for arm64
- build_command += "GOARM=7 "
- else:
- logging.error("Invalid ARM architecture specified: {}".format(arch))
- logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
- return False
- if platform == 'windows':
- target = target + '.exe'
- build_command += "go build -o {} ".format(os.path.join(outdir, target))
- if race:
- build_command += "-race "
- if len(tags) > 0:
- build_command += "-tags {} ".format(','.join(tags))
-
- ldflags = [
- '-w', '-s',
- '-X', 'main.branch={}'.format(get_current_branch()),
- '-X', 'main.commit={}'.format(get_current_commit(short=True))]
- if version:
- ldflags.append('-X')
- ldflags.append('main.version={}'.format(version))
- build_command += ' -ldflags="{}" '.format(' '.join(ldflags))
-
- if static:
- build_command += " -a -installsuffix cgo "
- build_command += path
- start_time = datetime.utcnow()
- run(build_command, shell=True)
- end_time = datetime.utcnow()
- logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
- return True
-
-def generate_sha256_from_file(path):
- """Generate SHA256 hash signature based on the contents of the file at path.
- """
- m = hashlib.sha256()
- with open(path, 'rb') as f:
- m.update(f.read())
- return m.hexdigest()
-
-def generate_sig_from_file(path):
- """Generate a detached GPG signature from the file at path.
- """
- logging.debug("Generating GPG signature for file: {}".format(path))
- gpg_path = check_path_for('gpg')
- if gpg_path is None:
- logging.warn("gpg binary not found on path! Skipping signature creation.")
- return False
- if os.environ.get("GNUPG_HOME") is not None:
- run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
- else:
- run('gpg --armor --detach-sign --yes {}'.format(path))
- return True
-
-def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
- """Package the output of the build process.
- """
- outfiles = []
- tmp_build_dir = create_temp_dir()
- logging.debug("Packaging for build output: {}".format(build_output))
- logging.info("Using temporary directory: {}".format(tmp_build_dir))
- try:
- for platform in build_output:
- # Create top-level folder displaying which platform (linux, etc)
- os.makedirs(os.path.join(tmp_build_dir, platform))
- for arch in build_output[platform]:
- logging.info("Creating packages for {}/{}".format(platform, arch))
- # Create second-level directory displaying the architecture (amd64, etc)
- current_location = build_output[platform][arch]
-
- # Create directory tree to mimic file system of package
- build_root = os.path.join(tmp_build_dir,
- platform,
- arch,
- PACKAGE_NAME)
- os.makedirs(build_root)
-
- # Copy packaging scripts to build directory
- if platform == "windows":
- # For windows and static builds, just copy
- # binaries to root of package (no other scripts or
- # directories)
- package_scripts(build_root, config_only=True, windows=True)
- elif static or "static_" in arch:
- package_scripts(build_root, config_only=True)
- else:
- create_package_fs(build_root)
- package_scripts(build_root)
-
- for binary in targets:
- # Copy newly-built binaries to packaging directory
- if platform == 'windows':
- binary = binary + '.exe'
- if platform == 'windows' or static or "static_" in arch:
- # Where the binary should go in the package filesystem
- to = os.path.join(build_root, binary)
- # Where the binary currently is located
- fr = os.path.join(current_location, binary)
- else:
- # Where the binary currently is located
- fr = os.path.join(current_location, binary)
- # Where the binary should go in the package filesystem
- to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
- shutil.copy(fr, to)
-
- for package_type in supported_packages[platform]:
- if package_type == "rpm" and arch == "mipsel":
- continue
- # Package the directory structure for each package type for the platform
- logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
- name = pkg_name
- # Reset version, iteration, and current location on each run
- # since they may be modified below.
- package_version = version
- package_iteration = iteration
- if "static_" in arch:
- # Remove the "static_" from the displayed arch on the package
- package_arch = arch.replace("static_", "")
- elif package_type == "rpm" and arch == 'armhf':
- package_arch = 'armv6hl'
- else:
- package_arch = arch
- if not version:
- package_version = "{}~{}".format(next_version, get_current_commit(short=True))
- package_iteration = "0"
- package_build_root = build_root
- current_location = build_output[platform][arch]
-
- if package_type in ['zip', 'tar']:
- # For tars and zips, start the packaging one folder above
- # the build root (to include the package name)
- package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
- if nightly:
- if static or "static_" in arch:
- name = '{}-static-nightly_{}_{}'.format(name,
- platform,
- package_arch)
- else:
- name = '{}-nightly_{}_{}'.format(name,
- platform,
- package_arch)
- else:
- if static or "static_" in arch:
- name = '{}-{}-static_{}_{}'.format(name,
- package_version,
- platform,
- package_arch)
- else:
- name = '{}-{}_{}_{}'.format(name,
- package_version,
- platform,
- package_arch)
- current_location = os.path.join(os.getcwd(), current_location)
- if package_type == 'tar':
- tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name)
- run(tar_command, shell=True)
- run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
- outfile = os.path.join(current_location, name + ".tar.gz")
- outfiles.append(outfile)
- elif package_type == 'zip':
- zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
- run(zip_command, shell=True)
- run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
- outfile = os.path.join(current_location, name + ".zip")
- outfiles.append(outfile)
- elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
- logging.info("Skipping package type '{}' for static builds.".format(package_type))
- else:
- if package_type == 'rpm' and release and '~' in package_version:
- package_version, suffix = package_version.split('~', 1)
- # The ~ indicatees that this is a prerelease so we give it a leading 0.
- package_iteration = "0.%s" % suffix
- fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
- fpm_common_args,
- name,
- package_arch,
- package_type,
- package_version,
- package_iteration,
- package_build_root,
- current_location)
- if package_type == "rpm":
- fpm_command += "--directories /var/log/telegraf --directories /etc/telegraf --depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT)
- out = run(fpm_command, shell=True)
- matches = re.search(':path=>"(.*)"', out)
- outfile = None
- if matches is not None:
- outfile = matches.groups()[0]
- if outfile is None:
- logging.warn("Could not determine output from packaging output!")
- else:
- if nightly:
- # Strip nightly version from package name
- new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
- os.rename(outfile, new_outfile)
- outfile = new_outfile
- else:
- if package_type == 'rpm':
- # rpm's convert any dashes to underscores
- package_version = package_version.replace("-", "_")
- outfiles.append(os.path.join(os.getcwd(), outfile))
- logging.debug("Produced package files: {}".format(outfiles))
- return outfiles
- finally:
- # Cleanup
- shutil.rmtree(tmp_build_dir)
-
-def main(args):
- global PACKAGE_NAME
-
- if args.release and args.nightly:
- logging.error("Cannot be both a nightly and a release.")
- return 1
-
- if args.nightly:
- args.iteration = 0
-
- # Pre-build checks
- check_environ()
- if not check_prereqs():
- return 1
- if args.build_tags is None:
- args.build_tags = []
- else:
- args.build_tags = args.build_tags.split(',')
-
- orig_commit = get_current_commit(short=True)
- orig_branch = get_current_branch()
-
- if args.platform not in supported_builds and args.platform != 'all':
- logging.error("Invalid build platform: {}".format(args.platform))
- return 1
-
- build_output = {}
-
- if args.branch != orig_branch and args.commit != orig_commit:
- logging.error("Can only specify one branch or commit to build from.")
- return 1
- elif args.branch != orig_branch:
- logging.info("Moving to git branch: {}".format(args.branch))
- run("git checkout {}".format(args.branch))
- elif args.commit != orig_commit:
- logging.info("Moving to git commit: {}".format(args.commit))
- run("git checkout {}".format(args.commit))
-
- if not args.no_get:
- if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
- return 1
-
- if args.generate:
- if not run_generate():
- return 1
-
- if args.test:
- if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
- return 1
-
- platforms = []
- single_build = True
- if args.platform == 'all':
- platforms = supported_builds.keys()
- single_build = False
- else:
- platforms = [args.platform]
-
- for platform in platforms:
- build_output.update( { platform : {} } )
- archs = []
- if args.arch == "all":
- single_build = False
- archs = supported_builds.get(platform)
- else:
- archs = [args.arch]
-
- for arch in archs:
- od = args.outdir
- if not single_build:
- od = os.path.join(args.outdir, platform, arch)
- if not build(version=args.version,
- platform=platform,
- arch=arch,
- nightly=args.nightly,
- race=args.race,
- clean=args.clean,
- outdir=od,
- tags=args.build_tags,
- static=args.static):
- return 1
- build_output.get(platform).update( { arch : od } )
-
- # Build packages
- if args.package:
- if not check_path_for("fpm"):
- logging.error("FPM ruby gem required for packaging. Stopping.")
- return 1
- packages = package(build_output,
- args.name,
- args.version,
- nightly=args.nightly,
- iteration=args.iteration,
- static=args.static,
- release=args.release)
- if args.sign:
- logging.debug("Generating GPG signatures for packages: {}".format(packages))
- sigs = [] # retain signatures so they can be uploaded with packages
- for p in packages:
- if generate_sig_from_file(p):
- sigs.append(p + '.asc')
- else:
- logging.error("Creation of signature for package [{}] failed!".format(p))
- return 1
- packages += sigs
- if args.upload:
- logging.debug("Files staged for upload: {}".format(packages))
- if args.nightly:
- args.upload_overwrite = True
- if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
- return 1
- logging.info("Packages created:")
- for filename in packages:
- logging.info("%s (SHA256=%s)",
- os.path.basename(filename),
- generate_sha256_from_file(filename))
- if orig_branch != get_current_branch():
- logging.info("Moving back to original git branch: {}".format(args.branch))
- run("git checkout {}".format(orig_branch))
-
- return 0
-
-if __name__ == '__main__':
- LOG_LEVEL = logging.INFO
- if '--debug' in sys.argv[1:]:
- LOG_LEVEL = logging.DEBUG
- log_format = '[%(levelname)s] %(funcName)s: %(message)s'
- logging.basicConfig(level=LOG_LEVEL,
- format=log_format)
-
- parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
- parser.add_argument('--verbose','-v','--debug',
- action='store_true',
- help='Use debug output')
- parser.add_argument('--outdir', '-o',
- metavar='',
- default='./build/',
- type=os.path.abspath,
- help='Output directory')
- parser.add_argument('--name', '-n',
- metavar='',
- default=PACKAGE_NAME,
- type=str,
- help='Name to use for package name (when package is specified)')
- parser.add_argument('--arch',
- metavar='',
- type=str,
- default=get_system_arch(),
- help='Target architecture for build output')
- parser.add_argument('--platform',
- metavar='',
- type=str,
- default=get_system_platform(),
- help='Target platform for build output')
- parser.add_argument('--branch',
- metavar='',
- type=str,
- default=get_current_branch(),
- help='Build from a specific branch')
- parser.add_argument('--commit',
- metavar='',
- type=str,
- default=get_current_commit(short=True),
- help='Build from a specific commit')
- parser.add_argument('--version',
- metavar='',
- type=str,
- default=get_current_version(),
- help='Version information to apply to build output (ex: 0.12.0)')
- parser.add_argument('--iteration',
- metavar='',
- type=str,
- default="1",
- help='Package iteration to apply to build output (defaults to 1)')
- parser.add_argument('--stats',
- action='store_true',
- help='Emit build metrics (requires InfluxDB Python client)')
- parser.add_argument('--stats-server',
- metavar='',
- type=str,
- help='Send build stats to InfluxDB using provided hostname and port')
- parser.add_argument('--stats-db',
- metavar='',
- type=str,
- help='Send build stats to InfluxDB using provided database name')
- parser.add_argument('--nightly',
- action='store_true',
- help='Mark build output as nightly build (will incremement the minor version)')
- parser.add_argument('--update',
- action='store_true',
- help='Update build dependencies prior to building')
- parser.add_argument('--package',
- action='store_true',
- help='Package binary output')
- parser.add_argument('--release',
- action='store_true',
- help='Mark build output as release')
- parser.add_argument('--clean',
- action='store_true',
- help='Clean output directory before building')
- parser.add_argument('--no-get',
- action='store_true',
- help='Do not retrieve pinned dependencies when building')
- parser.add_argument('--no-uncommitted',
- action='store_true',
- help='Fail if uncommitted changes exist in the working directory')
- parser.add_argument('--upload',
- action='store_true',
- help='Upload output packages to AWS S3')
- parser.add_argument('--upload-overwrite','-w',
- action='store_true',
- help='Upload output packages to AWS S3')
- parser.add_argument('--bucket',
- metavar='',
- type=str,
- default=DEFAULT_BUCKET,
- help='Destination bucket for uploads')
- parser.add_argument('--generate',
- action='store_true',
- help='Run "go generate" before building')
- parser.add_argument('--build-tags',
- metavar='',
- help='Optional build tags to use for compilation')
- parser.add_argument('--static',
- action='store_true',
- help='Create statically-compiled binary output')
- parser.add_argument('--sign',
- action='store_true',
- help='Create GPG detached signatures for packages (when package is specified)')
- parser.add_argument('--test',
- action='store_true',
- help='Run tests (does not produce build output)')
- parser.add_argument('--no-vet',
- action='store_true',
- help='Do not run "go vet" when running tests')
- parser.add_argument('--race',
- action='store_true',
- help='Enable race flag for build output')
- parser.add_argument('--parallel',
- metavar='',
- type=int,
- help='Number of tests to run simultaneously')
- parser.add_argument('--timeout',
- metavar='',
- type=str,
- help='Timeout for tests before failing')
- args = parser.parse_args()
- print_banner()
- sys.exit(main(args))
diff --git a/scripts/check-deps.sh b/scripts/check-deps.sh
new file mode 100755
index 0000000000000..b76d47d579004
--- /dev/null
+++ b/scripts/check-deps.sh
@@ -0,0 +1,68 @@
+#!/bin/sh
+
+tmpdir="$(mktemp -d)"
+
+cleanup() {
+ rm -rf "$tmpdir"
+}
+trap cleanup EXIT
+
+targets="$(go tool dist list)"
+
+for target in ${targets}; do
+ # only check platforms we build for
+ case "${target}" in
+ linux/*) ;;
+ windows/*) ;;
+ freebsd/*) ;;
+ darwin/*) ;;
+ *) continue;;
+ esac
+
+ GOOS=${target%%/*} GOARCH=${target##*/} \
+ go list -deps -f '{{with .Module}}{{.Path}}{{end}}' ./cmd/telegraf/ >> "${tmpdir}/golist"
+done
+
+for dep in $(LC_ALL=C sort -u "${tmpdir}/golist"); do
+ case "${dep}" in
+ # ignore ourselves
+ github.com/influxdata/telegraf) continue;;
+
+ # dependency is replaced in go.mod
+ github.com/satori/go.uuid) continue;;
+
+ # go-autorest has a single license for all sub modules
+ github.com/Azure/go-autorest/autorest)
+ dep=github.com/Azure/go-autorest;;
+ github.com/Azure/go-autorest/*)
+ continue;;
+
+ # single license for all sub modules
+ cloud.google.com/go/*)
+ continue;;
+ esac
+
+ # Remove single and double digit version from path; these are generally not
+ # actual parts of the path and instead indicate a branch or tag.
+ # example: github.com/influxdata/go-syslog/v2 -> github.com/influxdata/go-syslog
+ dep="${dep%%/v[0-9]}"
+ dep="${dep%%/v[0-9][0-9]}"
+
+ echo "${dep}" >> "${tmpdir}/HEAD"
+done
+
+grep '^-' docs/LICENSE_OF_DEPENDENCIES.md | grep -v github.com/DataDog/datadog-agent | cut -f 2 -d' ' > "${tmpdir}/LICENSE_OF_DEPENDENCIES.md"
+
+diff -U0 "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" "${tmpdir}/HEAD" || {
+cat - </dev/null; then
+ install_update_rcd
+ else
+ install_chkconfig
+ fi
+ invoke-rc.d telegraf restart
+fi
diff --git a/scripts/deb/post-remove.sh b/scripts/deb/post-remove.sh
new file mode 100644
index 0000000000000..35ca732580c1c
--- /dev/null
+++ b/scripts/deb/post-remove.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+function disable_systemd {
+ systemctl disable telegraf
+ rm -f $1
+}
+
+function disable_update_rcd {
+ update-rc.d -f telegraf remove
+ rm -f /etc/init.d/telegraf
+}
+
+function disable_chkconfig {
+ chkconfig --del telegraf
+ rm -f /etc/init.d/telegraf
+}
+
+if [ "$1" == "remove" -o "$1" == "purge" ]; then
+ # Remove/purge
+ rm -f /etc/default/telegraf
+
+ if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
+ disable_systemd /lib/systemd/system/telegraf.service
+ else
+ # Assuming sysv
+ # Run update-rc.d or fallback to chkconfig if not available
+ if which update-rc.d &>/dev/null; then
+ disable_update_rcd
+ else
+ disable_chkconfig
+ fi
+ fi
+fi
diff --git a/scripts/pre-install.sh b/scripts/deb/pre-install.sh
similarity index 78%
rename from scripts/pre-install.sh
rename to scripts/deb/pre-install.sh
index b371f462d36f4..3fad54f61c3bf 100644
--- a/scripts/pre-install.sh
+++ b/scripts/deb/pre-install.sh
@@ -1,5 +1,13 @@
#!/bin/bash
+if ! grep "^telegraf:" /etc/group &>/dev/null; then
+ groupadd -r telegraf
+fi
+
+if ! id telegraf &>/dev/null; then
+ useradd -r -M telegraf -s /bin/false -d /etc/telegraf -g telegraf
+fi
+
if [[ -d /etc/opt/telegraf ]]; then
# Legacy configuration found
if [[ ! -d /etc/telegraf ]]; then
diff --git a/scripts/deb/pre-remove.sh b/scripts/deb/pre-remove.sh
new file mode 100644
index 0000000000000..838fa11718f20
--- /dev/null
+++ b/scripts/deb/pre-remove.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+BIN_DIR=/usr/bin
+
+if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
+ deb-systemd-invoke stop telegraf.service
+else
+ # Assuming sysv
+ invoke-rc.d telegraf stop
+fi
diff --git a/scripts/init.sh b/scripts/init.sh
index 67236d8c7a9b9..d01e16a7ca6f2 100755
--- a/scripts/init.sh
+++ b/scripts/init.sh
@@ -120,13 +120,13 @@ confdir=/etc/telegraf/telegraf.d
case $1 in
start)
# Checked the PID file exists and check the actual status of process
- if [ -e $pidfile ]; then
- pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?"
- # If the status is SUCCESS then don't need to start again.
- if [ "x$status" = "x0" ]; then
+ if [ -e "$pidfile" ]; then
+ if pidofproc -p $pidfile $daemon > /dev/null; then
log_failure_msg "$name process is running"
- exit 0 # Exit
+ else
+ log_failure_msg "$name pidfile has no corresponding process; ensure $name is stopped and remove $pidfile"
fi
+ exit 0
fi
# Bump the file limits, before launching the daemon. These will carry over to
@@ -150,8 +150,7 @@ case $1 in
stop)
# Stop the daemon.
if [ -e $pidfile ]; then
- pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?"
- if [ "$status" = 0 ]; then
+ if pidofproc -p $pidfile $daemon > /dev/null; then
# periodically signal until process exists
while true; do
if ! pidofproc -p $pidfile $daemon > /dev/null; then
@@ -172,8 +171,7 @@ case $1 in
reload)
# Reload the daemon.
if [ -e $pidfile ]; then
- pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?"
- if [ "$status" = 0 ]; then
+ if pidofproc -p $pidfile $daemon > /dev/null; then
if killproc -p $pidfile SIGHUP; then
log_success_msg "$name process was reloaded"
else
diff --git a/scripts/pre-remove.sh b/scripts/pre-remove.sh
deleted file mode 100644
index 2887fc9b624c5..0000000000000
--- a/scripts/pre-remove.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-BIN_DIR=/usr/bin
-
-# Distribution-specific logic
-if [[ -f /etc/debian_version ]]; then
- # Debian/Ubuntu logic
- if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
- deb-systemd-invoke stop telegraf.service
- else
- # Assuming sysv
- invoke-rc.d telegraf stop
- fi
-fi
diff --git a/scripts/release.sh b/scripts/release.sh
new file mode 100644
index 0000000000000..cf29b5c23a0e8
--- /dev/null
+++ b/scripts/release.sh
@@ -0,0 +1,179 @@
+#!/bin/sh
+#
+# usage: release.sh BUILD_NUM
+#
+# Requirements:
+# - curl
+# - jq
+# - sha256sum
+# - awscli
+# - gpg
+#
+# CIRCLE_TOKEN set to a CircleCI API token that can list the artifacts.
+#
+# AWS cli setup to be able to write to the BUCKET.
+#
+# GPG setup with a signing key.
+
+BUILD_NUM="${1:?usage: release.sh BUILD_NUM}"
+BUCKET="${2:-dl.influxdata.com/telegraf/releases}"
+
+: ${CIRCLE_TOKEN:?"Must set CIRCLE_TOKEN"}
+
+tmpdir="$(mktemp -d -t telegraf.XXXXXXXXXX)"
+
+on_exit() {
+ rm -rf "$tmpdir"
+}
+trap on_exit EXIT
+
+echo "${tmpdir}"
+cd "${tmpdir}" || exit 1
+
+curl -s -S -L -H Circle-Token:${CIRCLE_TOKEN} \
+ "https://circleci.com/api/v2/project/gh/influxdata/telegraf/${BUILD_NUM}/artifacts" \
+ -o artifacts || exit 1
+
+cat artifacts | jq -r '.items[] | "\(.url) \(.path|ltrimstr("build/dist/"))"' > manifest
+
+while read url path;
+do
+ echo $url
+ curl -s -S -L -o "$path" "$url" &&
+ sha256sum "$path" > "$path.DIGESTS" &&
+ gpg --armor --detach-sign "$path.DIGESTS" &&
+ gpg --armor --detach-sign "$path" || exit 1
+done < manifest
+
+echo
+cat *.DIGESTS
+echo
+
+arch() {
+ case ${1} in
+ *i386.*)
+ echo i386;;
+ *armel.*)
+ echo armel;;
+ *armv6hl.*)
+ echo armv6hl;;
+ *armhf.*)
+ echo armhf;;
+ *arm64.* | *aarch64.*)
+ echo arm64;;
+ *amd64.* | *x86_64.*)
+ echo amd64;;
+ *s390x.*)
+ echo s390x;;
+ *mipsel.*)
+ echo mipsel;;
+ *mips.*)
+ echo mips;;
+ *)
+ echo unknown
+ esac
+}
+
+platform() {
+ case ${1} in
+ *".rpm")
+ echo Centos;;
+ *".deb")
+ echo Debian;;
+ *"linux"*)
+ echo Linux;;
+ *"freebsd"*)
+ echo FreeBSD;;
+ *"darwin"*)
+ echo Mac OS X;;
+ *"windows"*)
+ echo Windows;;
+ *)
+ echo unknown;;
+ esac
+}
+
+echo "Arch | Platform | Package | SHA256"
+echo "---| --- | --- | ---"
+while read url path;
+do
+ echo "$(arch ${path}) | $(platform ${path}) | [\`${path}\`](https://dl.influxdata.com/telegraf/releases/${path}) | \`$(sha256sum ${path} | cut -f1 -d' ')\`"
+done < manifest
+echo ""
+
+package="$(grep *_amd64.deb manifest | cut -f2 -d' ')"
+cat -</dev/null; then
- groupadd -r telegraf
-fi
-
-if ! id telegraf &>/dev/null; then
- useradd -r -M telegraf -s /bin/false -d /etc/telegraf -g telegraf
-fi
-
-test -d $LOG_DIR || mkdir -p $LOG_DIR
-chown -R -L telegraf:telegraf $LOG_DIR
-chmod 755 $LOG_DIR
-
# Remove legacy symlink, if it exists
if [[ -L /etc/init.d/telegraf ]]; then
rm -f /etc/init.d/telegraf
@@ -55,6 +43,15 @@ if [[ ! -d /etc/telegraf/telegraf.d ]]; then
mkdir -p /etc/telegraf/telegraf.d
fi
+# If 'telegraf.conf' is not present use package's sample (fresh install)
+if [[ ! -f /etc/telegraf/telegraf.conf ]] && [[ -f /etc/telegraf/telegraf.conf.sample ]]; then
+ cp /etc/telegraf/telegraf.conf.sample /etc/telegraf/telegraf.conf
+fi
+
+test -d $LOG_DIR || mkdir -p $LOG_DIR
+chown -R -L telegraf:telegraf $LOG_DIR
+chmod 755 $LOG_DIR
+
# Distribution-specific logic
if [[ -f /etc/redhat-release ]] || [[ -f /etc/SuSE-release ]]; then
# RHEL-variant logic
@@ -70,22 +67,6 @@ if [[ -f /etc/redhat-release ]] || [[ -f /etc/SuSE-release ]]; then
install_chkconfig
fi
fi
-elif [[ -f /etc/debian_version ]]; then
- # Debian/Ubuntu logic
- if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
- install_systemd /lib/systemd/system/telegraf.service
- deb-systemd-invoke restart telegraf.service || echo "WARNING: systemd not running."
- else
- # Assuming SysVinit
- install_init
- # Run update-rc.d or fallback to chkconfig if not available
- if which update-rc.d &>/dev/null; then
- install_update_rcd
- else
- install_chkconfig
- fi
- invoke-rc.d telegraf restart
- fi
elif [[ -f /etc/os-release ]]; then
source /etc/os-release
if [[ "$NAME" = "Amazon Linux" ]]; then
@@ -100,5 +81,8 @@ elif [[ -f /etc/os-release ]]; then
else
install_chkconfig
fi
+ elif [[ "$NAME" = "Solus" ]]; then
+ # Solus logic
+ install_systemd /usr/lib/systemd/system/telegraf.service
fi
fi
diff --git a/scripts/post-remove.sh b/scripts/rpm/post-remove.sh
similarity index 69%
rename from scripts/post-remove.sh
rename to scripts/rpm/post-remove.sh
index 533a4fec12028..90f34f95ffd2d 100644
--- a/scripts/post-remove.sh
+++ b/scripts/rpm/post-remove.sh
@@ -28,24 +28,6 @@ if [[ -f /etc/redhat-release ]] || [[ -f /etc/SuSE-release ]]; then
disable_chkconfig
fi
fi
-elif [[ -f /etc/debian_version ]]; then
- # Debian/Ubuntu logic
- if [ "$1" == "remove" -o "$1" == "purge" ]; then
- # Remove/purge
- rm -f /etc/default/telegraf
-
- if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then
- disable_systemd /lib/systemd/system/telegraf.service
- else
- # Assuming sysv
- # Run update-rc.d or fallback to chkconfig if not available
- if which update-rc.d &>/dev/null; then
- disable_update_rcd
- else
- disable_chkconfig
- fi
- fi
- fi
elif [[ -f /etc/os-release ]]; then
source /etc/os-release
if [[ "$ID" = "amzn" ]] && [[ "$1" = "0" ]]; then
@@ -59,5 +41,8 @@ elif [[ -f /etc/os-release ]]; then
# Amazon Linux logic
disable_chkconfig
fi
+ elif [[ "$NAME" = "Solus" ]]; then
+ rm -f /etc/default/telegraf
+ disable_systemd /usr/lib/systemd/system/telegraf.service
fi
fi
diff --git a/scripts/rpm/pre-install.sh b/scripts/rpm/pre-install.sh
new file mode 100644
index 0000000000000..3fad54f61c3bf
--- /dev/null
+++ b/scripts/rpm/pre-install.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+if ! grep "^telegraf:" /etc/group &>/dev/null; then
+ groupadd -r telegraf
+fi
+
+if ! id telegraf &>/dev/null; then
+ useradd -r -M telegraf -s /bin/false -d /etc/telegraf -g telegraf
+fi
+
+if [[ -d /etc/opt/telegraf ]]; then
+ # Legacy configuration found
+ if [[ ! -d /etc/telegraf ]]; then
+ # New configuration does not exist, move legacy configuration to new location
+ echo -e "Please note, Telegraf's configuration is now located at '/etc/telegraf' (previously '/etc/opt/telegraf')."
+ mv -vn /etc/opt/telegraf /etc/telegraf
+
+ if [[ -f /etc/telegraf/telegraf.conf ]]; then
+ backup_name="telegraf.conf.$(date +%s).backup"
+ echo "A backup of your current configuration can be found at: /etc/telegraf/${backup_name}"
+ cp -a "/etc/telegraf/telegraf.conf" "/etc/telegraf/${backup_name}"
+ fi
+ fi
+fi
diff --git a/scripts/stretch.docker b/scripts/stretch.docker
index 906e0c5040d47..642421513c65d 100644
--- a/scripts/stretch.docker
+++ b/scripts/stretch.docker
@@ -1,9 +1,6 @@
-FROM golang:1.11.0 as builder
-ENV DEP_VERSION 0.5.0
-RUN curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 && chmod +x /usr/local/bin/dep
+FROM golang:1.14.7-stretch as builder
WORKDIR /go/src/github.com/influxdata/telegraf
-COPY Gopkg.toml Gopkg.lock ./
-RUN dep ensure -vendor-only
+
COPY . /go/src/github.com/influxdata/telegraf
RUN make go-install
diff --git a/selfstat/selfstat.go b/selfstat/selfstat.go
index 98ecbb4d42b90..a60ee099e0438 100644
--- a/selfstat/selfstat.go
+++ b/selfstat/selfstat.go
@@ -17,7 +17,7 @@ import (
)
var (
- registry *rgstry
+ registry *Registry
)
// Stat is an interface for dealing with telegraf statistics collected
@@ -32,9 +32,6 @@ type Stat interface {
// Tags is a tag map. Each time this is called a new map is allocated.
Tags() map[string]string
- // Key is the unique measurement+tags key of the stat.
- Key() uint64
-
// Incr increments a regular stat by 'v'.
// in the case of a timing stat, increment adds the timing to the cache.
Incr(v int64)
@@ -56,11 +53,7 @@ type Stat interface {
// The returned Stat can be incremented by the consumer of Register(), and it's
// value will be returned as a telegraf metric when Metrics() is called.
func Register(measurement, field string, tags map[string]string) Stat {
- return registry.register(&stat{
- measurement: "internal_" + measurement,
- field: field,
- tags: tags,
- })
+ return registry.register("internal_"+measurement, field, tags)
}
// RegisterTiming registers the given measurement, field, and tags in the selfstat
@@ -80,11 +73,7 @@ func Register(measurement, field string, tags map[string]string) Stat {
// The returned Stat can be incremented by the consumer of Register(), and it's
// value will be returned as a telegraf metric when Metrics() is called.
func RegisterTiming(measurement, field string, tags map[string]string) Stat {
- return registry.register(&timingStat{
- measurement: "internal_" + measurement,
- field: field,
- tags: tags,
- })
+ return registry.registerTiming("internal_"+measurement, field, tags)
}
// Metrics returns all registered stats as telegraf metrics.
@@ -120,27 +109,76 @@ func Metrics() []telegraf.Metric {
return metrics
}
-type rgstry struct {
+type Registry struct {
stats map[uint64]map[string]Stat
mu sync.Mutex
}
-func (r *rgstry) register(s Stat) Stat {
+func (r *Registry) register(measurement, field string, tags map[string]string) Stat {
r.mu.Lock()
defer r.mu.Unlock()
- if stats, ok := r.stats[s.Key()]; ok {
- // measurement exists
- if stat, ok := stats[s.FieldName()]; ok {
- // field already exists, so don't create a new one
- return stat
- }
- r.stats[s.Key()][s.FieldName()] = s
- return s
- } else {
- // creating a new unique metric
- r.stats[s.Key()] = map[string]Stat{s.FieldName(): s}
- return s
+
+ key := key(measurement, tags)
+ if stat, ok := registry.get(key, field); ok {
+ return stat
+ }
+
+ t := make(map[string]string, len(tags))
+ for k, v := range tags {
+ t[k] = v
+ }
+
+ s := &stat{
+ measurement: measurement,
+ field: field,
+ tags: t,
+ }
+ registry.set(key, s)
+ return s
+}
+
+func (r *Registry) registerTiming(measurement, field string, tags map[string]string) Stat {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ key := key(measurement, tags)
+ if stat, ok := registry.get(key, field); ok {
+ return stat
}
+
+ t := make(map[string]string, len(tags))
+ for k, v := range tags {
+ t[k] = v
+ }
+
+ s := &timingStat{
+ measurement: measurement,
+ field: field,
+ tags: t,
+ }
+ registry.set(key, s)
+ return s
+}
+
+func (r *Registry) get(key uint64, field string) (Stat, bool) {
+ if _, ok := r.stats[key]; !ok {
+ return nil, false
+ }
+
+ if stat, ok := r.stats[key][field]; ok {
+ return stat, true
+ }
+
+ return nil, false
+}
+
+func (r *Registry) set(key uint64, s Stat) {
+ if _, ok := r.stats[key]; !ok {
+ r.stats[key] = make(map[string]Stat)
+ }
+
+ r.stats[key][s.FieldName()] = s
+ return
}
func key(measurement string, tags map[string]string) uint64 {
@@ -163,7 +201,7 @@ func key(measurement string, tags map[string]string) uint64 {
}
func init() {
- registry = &rgstry{
+ registry = &Registry{
stats: make(map[uint64]map[string]Stat),
}
}
diff --git a/selfstat/selfstat_test.go b/selfstat/selfstat_test.go
index 2de2bd3811680..3d590bb96006c 100644
--- a/selfstat/selfstat_test.go
+++ b/selfstat/selfstat_test.go
@@ -5,8 +5,8 @@ import (
"testing"
"github.com/influxdata/telegraf/testutil"
-
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
var (
@@ -18,7 +18,7 @@ var (
// testCleanup resets the global registry for test cleanup & unlocks the test lock
func testCleanup() {
- registry = &rgstry{
+ registry = &Registry{
stats: make(map[uint64]map[string]Stat),
}
testLock.Unlock()
@@ -109,32 +109,17 @@ func TestRegisterTimingAndIncrAndSet(t *testing.T) {
}
func TestStatKeyConsistency(t *testing.T) {
- s := &stat{
- measurement: "internal_stat",
- field: "myfield",
- tags: map[string]string{
- "foo": "bar",
- "bar": "baz",
- "whose": "first",
- },
- }
- k := s.Key()
- for i := 0; i < 5000; i++ {
- // assert that the Key() func doesn't change anything.
- assert.Equal(t, k, s.Key())
-
- // assert that two identical measurements always produce the same key.
- tmp := &stat{
- measurement: "internal_stat",
- field: "myfield",
- tags: map[string]string{
- "foo": "bar",
- "bar": "baz",
- "whose": "first",
- },
- }
- assert.Equal(t, k, tmp.Key())
- }
+ lhs := key("internal_stats", map[string]string{
+ "foo": "bar",
+ "bar": "baz",
+ "whose": "first",
+ })
+ rhs := key("internal_stats", map[string]string{
+ "foo": "bar",
+ "bar": "baz",
+ "whose": "first",
+ })
+ require.Equal(t, lhs, rhs)
}
func TestRegisterMetricsAndVerify(t *testing.T) {
@@ -219,3 +204,10 @@ func TestRegisterMetricsAndVerify(t *testing.T) {
},
)
}
+
+func TestRegisterCopy(t *testing.T) {
+ tags := map[string]string{"input": "mem", "alias": "mem1"}
+ stat := Register("gather", "metrics_gathered", tags)
+ tags["new"] = "value"
+ require.NotEqual(t, tags, stat.Tags())
+}
diff --git a/selfstat/stat.go b/selfstat/stat.go
index d7ec60a2bb53b..e1905baf57878 100644
--- a/selfstat/stat.go
+++ b/selfstat/stat.go
@@ -41,10 +41,3 @@ func (s *stat) Tags() map[string]string {
}
return m
}
-
-func (s *stat) Key() uint64 {
- if s.key == 0 {
- s.key = key(s.measurement, s.tags)
- }
- return s.key
-}
diff --git a/selfstat/timingStat.go b/selfstat/timingStat.go
index ef0ee05aa6106..13f8400bc7a48 100644
--- a/selfstat/timingStat.go
+++ b/selfstat/timingStat.go
@@ -57,10 +57,3 @@ func (s *timingStat) Tags() map[string]string {
}
return m
}
-
-func (s *timingStat) Key() uint64 {
- if s.key == 0 {
- s.key = key(s.measurement, s.tags)
- }
- return s.key
-}
diff --git a/testutil/accumulator.go b/testutil/accumulator.go
index a7b9fe8f63039..978fb7b38ddf5 100644
--- a/testutil/accumulator.go
+++ b/testutil/accumulator.go
@@ -18,8 +18,8 @@ var (
)
func newTrackingID() telegraf.TrackingID {
- atomic.AddUint64(&lastID, 1)
- return telegraf.TrackingID(lastID)
+ id := atomic.AddUint64(&lastID, 1)
+ return telegraf.TrackingID(id)
}
// Metric defines a single point measurement
@@ -28,6 +28,7 @@ type Metric struct {
Tags map[string]string
Fields map[string]interface{}
Time time.Time
+ Type telegraf.ValueType
}
func (p *Metric) String() string {
@@ -45,12 +46,22 @@ type Accumulator struct {
Errors []error
debug bool
delivered chan telegraf.DeliveryInfo
+
+ TimeFunc func() time.Time
}
func (a *Accumulator) NMetrics() uint64 {
return atomic.LoadUint64(&a.nMetrics)
}
+func (a *Accumulator) GetTelegrafMetrics() []telegraf.Metric {
+ metrics := []telegraf.Metric{}
+ for _, m := range a.Metrics {
+ metrics = append(metrics, FromTestMetric(m))
+ }
+ return metrics
+}
+
func (a *Accumulator) FirstError() error {
if len(a.Errors) == 0 {
return nil
@@ -65,11 +76,11 @@ func (a *Accumulator) ClearMetrics() {
a.Metrics = make([]*Metric, 0)
}
-// AddFields adds a measurement point with a specified timestamp.
-func (a *Accumulator) AddFields(
+func (a *Accumulator) addFields(
measurement string,
- fields map[string]interface{},
tags map[string]string,
+ fields map[string]interface{},
+ tp telegraf.ValueType,
timestamp ...time.Time,
) {
a.Lock()
@@ -101,6 +112,12 @@ func (a *Accumulator) AddFields(
t = timestamp[0]
} else {
t = time.Now()
+ if a.TimeFunc == nil {
+ t = time.Now()
+ } else {
+ t = a.TimeFunc()
+ }
+
}
if a.debug {
@@ -116,18 +133,29 @@ func (a *Accumulator) AddFields(
Fields: fieldsCopy,
Tags: tagsCopy,
Time: t,
+ Type: tp,
}
a.Metrics = append(a.Metrics, p)
}
+// AddFields adds a measurement point with a specified timestamp.
+func (a *Accumulator) AddFields(
+ measurement string,
+ fields map[string]interface{},
+ tags map[string]string,
+ timestamp ...time.Time,
+) {
+ a.addFields(measurement, tags, fields, telegraf.Untyped, timestamp...)
+}
+
func (a *Accumulator) AddCounter(
measurement string,
fields map[string]interface{},
tags map[string]string,
timestamp ...time.Time,
) {
- a.AddFields(measurement, fields, tags, timestamp...)
+ a.addFields(measurement, tags, fields, telegraf.Counter, timestamp...)
}
func (a *Accumulator) AddGauge(
@@ -136,12 +164,12 @@ func (a *Accumulator) AddGauge(
tags map[string]string,
timestamp ...time.Time,
) {
- a.AddFields(measurement, fields, tags, timestamp...)
+ a.addFields(measurement, tags, fields, telegraf.Gauge, timestamp...)
}
func (a *Accumulator) AddMetrics(metrics []telegraf.Metric) {
for _, m := range metrics {
- a.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
+ a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time())
}
}
@@ -151,7 +179,7 @@ func (a *Accumulator) AddSummary(
tags map[string]string,
timestamp ...time.Time,
) {
- a.AddFields(measurement, fields, tags, timestamp...)
+ a.addFields(measurement, tags, fields, telegraf.Summary, timestamp...)
}
func (a *Accumulator) AddHistogram(
@@ -160,11 +188,11 @@ func (a *Accumulator) AddHistogram(
tags map[string]string,
timestamp ...time.Time,
) {
- a.AddFields(measurement, fields, tags, timestamp...)
+ a.addFields(measurement, tags, fields, telegraf.Histogram, timestamp...)
}
func (a *Accumulator) AddMetric(m telegraf.Metric) {
- a.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
+ a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time())
}
func (a *Accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator {
@@ -184,9 +212,11 @@ func (a *Accumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.T
}
func (a *Accumulator) Delivered() <-chan telegraf.DeliveryInfo {
+ a.Lock()
if a.delivered == nil {
a.delivered = make(chan telegraf.DeliveryInfo)
}
+ a.Unlock()
return a.delivered
}
@@ -242,6 +272,18 @@ func (a *Accumulator) HasTag(measurement string, key string) bool {
return false
}
+func (a *Accumulator) TagSetValue(measurement string, key string) string {
+ for _, p := range a.Metrics {
+ if p.Measurement == measurement {
+ v, ok := p.Tags[key]
+ if ok {
+ return v
+ }
+ }
+ }
+ return ""
+}
+
func (a *Accumulator) TagValue(measurement string, key string) string {
for _, p := range a.Metrics {
if p.Measurement == measurement {
@@ -283,13 +325,13 @@ func (a *Accumulator) NFields() int {
// Wait waits for the given number of metrics to be added to the accumulator.
func (a *Accumulator) Wait(n int) {
a.Lock()
+ defer a.Unlock()
if a.Cond == nil {
a.Cond = sync.NewCond(&a.Mutex)
}
for int(a.NMetrics()) < n {
a.Cond.Wait()
}
- a.Unlock()
}
// WaitError waits for the given number of errors to be added to the accumulator.
@@ -317,8 +359,7 @@ func (a *Accumulator) AssertContainsTaggedFields(
continue
}
- if p.Measurement == measurement {
- assert.Equal(t, fields, p.Fields)
+ if p.Measurement == measurement && reflect.DeepEqual(fields, p.Fields) {
return
}
}
@@ -676,3 +717,22 @@ func (a *Accumulator) BoolField(measurement string, field string) (bool, bool) {
return false, false
}
+
+// NopAccumulator is used for benchmarking to isolate the plugin from the internal
+// telegraf accumulator machinery.
+type NopAccumulator struct{}
+
+func (n *NopAccumulator) AddFields(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
+}
+func (n *NopAccumulator) AddGauge(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
+}
+func (n *NopAccumulator) AddCounter(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
+}
+func (n *NopAccumulator) AddSummary(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
+}
+func (n *NopAccumulator) AddHistogram(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
+}
+func (n *NopAccumulator) AddMetric(telegraf.Metric) {}
+func (n *NopAccumulator) SetPrecision(precision time.Duration) {}
+func (n *NopAccumulator) AddError(err error) {}
+func (n *NopAccumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { return nil }
diff --git a/testutil/log.go b/testutil/log.go
new file mode 100644
index 0000000000000..c81370e234f3f
--- /dev/null
+++ b/testutil/log.go
@@ -0,0 +1,54 @@
+package testutil
+
+import (
+ "log"
+
+ "github.com/influxdata/telegraf"
+)
+
+var _ telegraf.Logger = &Logger{}
+
+// Logger defines a logging structure for plugins.
+type Logger struct {
+ Name string // Name is the plugin name, will be printed in the `[]`.
+}
+
+// Errorf logs an error message, patterned after log.Printf.
+func (l Logger) Errorf(format string, args ...interface{}) {
+ log.Printf("E! ["+l.Name+"] "+format, args...)
+}
+
+// Error logs an error message, patterned after log.Print.
+func (l Logger) Error(args ...interface{}) {
+ log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...)
+}
+
+// Debugf logs a debug message, patterned after log.Printf.
+func (l Logger) Debugf(format string, args ...interface{}) {
+ log.Printf("D! ["+l.Name+"] "+format, args...)
+}
+
+// Debug logs a debug message, patterned after log.Print.
+func (l Logger) Debug(args ...interface{}) {
+ log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...)
+}
+
+// Warnf logs a warning message, patterned after log.Printf.
+func (l Logger) Warnf(format string, args ...interface{}) {
+ log.Printf("W! ["+l.Name+"] "+format, args...)
+}
+
+// Warn logs a warning message, patterned after log.Print.
+func (l Logger) Warn(args ...interface{}) {
+ log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...)
+}
+
+// Infof logs an information message, patterned after log.Printf.
+func (l Logger) Infof(format string, args ...interface{}) {
+ log.Printf("I! ["+l.Name+"] "+format, args...)
+}
+
+// Info logs an information message, patterned after log.Print.
+func (l Logger) Info(args ...interface{}) {
+ log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...)
+}
diff --git a/testutil/metric.go b/testutil/metric.go
index afb3de7fe0422..36ba63af9338f 100644
--- a/testutil/metric.go
+++ b/testutil/metric.go
@@ -1,11 +1,13 @@
package testutil
import (
+ "reflect"
"sort"
"testing"
"time"
"github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
@@ -18,6 +20,77 @@ type metricDiff struct {
Time time.Time
}
+func lessFunc(lhs, rhs *metricDiff) bool {
+ if lhs.Measurement != rhs.Measurement {
+ return lhs.Measurement < rhs.Measurement
+ }
+
+ for i := 0; ; i++ {
+ if i >= len(lhs.Tags) && i >= len(rhs.Tags) {
+ break
+ } else if i >= len(lhs.Tags) {
+ return true
+ } else if i >= len(rhs.Tags) {
+ return false
+ }
+
+ if lhs.Tags[i].Key != rhs.Tags[i].Key {
+ return lhs.Tags[i].Key < rhs.Tags[i].Key
+ }
+ if lhs.Tags[i].Value != rhs.Tags[i].Value {
+ return lhs.Tags[i].Value < rhs.Tags[i].Value
+ }
+ }
+
+ for i := 0; ; i++ {
+ if i >= len(lhs.Fields) && i >= len(rhs.Fields) {
+ break
+ } else if i >= len(lhs.Fields) {
+ return true
+ } else if i >= len(rhs.Fields) {
+ return false
+ }
+
+ if lhs.Fields[i].Key != rhs.Fields[i].Key {
+ return lhs.Fields[i].Key < rhs.Fields[i].Key
+ }
+
+ if lhs.Fields[i].Value != rhs.Fields[i].Value {
+ ltype := reflect.TypeOf(lhs.Fields[i].Value)
+ rtype := reflect.TypeOf(lhs.Fields[i].Value)
+
+ if ltype.Kind() != rtype.Kind() {
+ return ltype.Kind() < rtype.Kind()
+ }
+
+ switch v := lhs.Fields[i].Value.(type) {
+ case int64:
+ return v < lhs.Fields[i].Value.(int64)
+ case uint64:
+ return v < lhs.Fields[i].Value.(uint64)
+ case float64:
+ return v < lhs.Fields[i].Value.(float64)
+ case string:
+ return v < lhs.Fields[i].Value.(string)
+ case bool:
+ return !v
+ default:
+ panic("unknown type")
+ }
+ }
+ }
+
+ if lhs.Type != rhs.Type {
+ return lhs.Type < rhs.Type
+ }
+
+ if lhs.Time.UnixNano() != rhs.Time.UnixNano() {
+ return lhs.Time.UnixNano() < rhs.Time.UnixNano()
+ }
+
+ return false
+}
+
func newMetricDiff(metric telegraf.Metric) *metricDiff {
if metric == nil {
return nil
@@ -45,7 +118,18 @@ func newMetricDiff(metric telegraf.Metric) *metricDiff {
return m
}
-func MetricEqual(expected, actual telegraf.Metric) bool {
+// SortMetrics enables sorting metrics before comparison.
+func SortMetrics() cmp.Option {
+ return cmpopts.SortSlices(lessFunc)
+}
+
+// IgnoreTime disables comparison of timestamp.
+func IgnoreTime() cmp.Option {
+ return cmpopts.IgnoreFields(metricDiff{}, "Time")
+}
+
+// MetricEqual returns true if the metrics are equal.
+func MetricEqual(expected, actual telegraf.Metric, opts ...cmp.Option) bool {
var lhs, rhs *metricDiff
if expected != nil {
lhs = newMetricDiff(expected)
@@ -54,10 +138,13 @@ func MetricEqual(expected, actual telegraf.Metric) bool {
rhs = newMetricDiff(actual)
}
- return cmp.Equal(lhs, rhs)
+ opts = append(opts, cmpopts.EquateNaNs())
+ return cmp.Equal(lhs, rhs, opts...)
}
-func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) {
+// RequireMetricEqual halts the test with an error if the metrics are not
+// equal.
+func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric, opts ...cmp.Option) {
t.Helper()
var lhs, rhs *metricDiff
@@ -68,12 +155,15 @@ func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) {
rhs = newMetricDiff(actual)
}
- if diff := cmp.Diff(lhs, rhs); diff != "" {
+ opts = append(opts, cmpopts.EquateNaNs())
+ if diff := cmp.Diff(lhs, rhs, opts...); diff != "" {
t.Fatalf("telegraf.Metric\n--- expected\n+++ actual\n%s", diff)
}
}
-func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric) {
+// RequireMetricsEqual halts the test with an error if the array of metrics
+// are not equal.
+func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric, opts ...cmp.Option) {
t.Helper()
lhs := make([]*metricDiff, 0, len(expected))
@@ -84,7 +174,9 @@ func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric) {
for _, m := range actual {
rhs = append(rhs, newMetricDiff(m))
}
- if diff := cmp.Diff(lhs, rhs); diff != "" {
+
+ opts = append(opts, cmpopts.EquateNaNs())
+ if diff := cmp.Diff(lhs, rhs, opts...); diff != "" {
t.Fatalf("[]telegraf.Metric\n--- expected\n+++ actual\n%s", diff)
}
}
@@ -105,7 +197,7 @@ func MustMetric(
}
func FromTestMetric(met *Metric) telegraf.Metric {
- m, err := metric.New(met.Measurement, met.Tags, met.Fields, met.Time)
+ m, err := metric.New(met.Measurement, met.Tags, met.Fields, met.Time, met.Type)
if err != nil {
panic("MustMetric")
}
diff --git a/testutil/metric_test.go b/testutil/metric_test.go
index 5b5ef01f470e7..0c999185a8fd4 100644
--- a/testutil/metric_test.go
+++ b/testutil/metric_test.go
@@ -4,18 +4,19 @@ import (
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
-func TestRequireMetricsEqual(t *testing.T) {
+func TestRequireMetricEqual(t *testing.T) {
tests := []struct {
name string
got telegraf.Metric
want telegraf.Metric
}{
{
- name: "telegraf and testutil metrics should be equal",
+ name: "equal metrics should be equal",
got: func() telegraf.Metric {
m, _ := metric.New(
"test",
@@ -56,3 +57,50 @@ func TestRequireMetricsEqual(t *testing.T) {
})
}
}
+
+func TestRequireMetricsEqual(t *testing.T) {
+ tests := []struct {
+ name string
+ got []telegraf.Metric
+ want []telegraf.Metric
+ opts []cmp.Option
+ }{
+ {
+ name: "sort metrics option sorts by name",
+ got: []telegraf.Metric{
+ MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ MustMetric(
+ "net",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ want: []telegraf.Metric{
+ MustMetric(
+ "net",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ MustMetric(
+ "cpu",
+ map[string]string{},
+ map[string]interface{}{},
+ time.Unix(0, 0),
+ ),
+ },
+ opts: []cmp.Option{SortMetrics()},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ RequireMetricsEqual(t, tt.want, tt.got, tt.opts...)
+ })
+ }
+}
diff --git a/testutil/tls.go b/testutil/tls.go
index 4f7fc012aef90..68a244a8b1e74 100644
--- a/testutil/tls.go
+++ b/testutil/tls.go
@@ -6,7 +6,7 @@ import (
"os"
"path"
- "github.com/influxdata/telegraf/internal/tls"
+ "github.com/influxdata/telegraf/plugins/common/tls"
)
type pki struct {
@@ -30,6 +30,9 @@ func (p *pki) TLSServerConfig() *tls.ServerConfig {
TLSAllowedCACerts: []string{p.CACertPath()},
TLSCert: p.ServerCertPath(),
TLSKey: p.ServerKeyPath(),
+ TLSCipherSuites: []string{p.CipherSuite()},
+ TLSMinVersion: p.TLSMinVersion(),
+ TLSMaxVersion: p.TLSMaxVersion(),
}
}
@@ -41,6 +44,18 @@ func (p *pki) CACertPath() string {
return path.Join(p.path, "cacert.pem")
}
+func (p *pki) CipherSuite() string {
+ return "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
+}
+
+func (p *pki) TLSMinVersion() string {
+ return "TLS11"
+}
+
+func (p *pki) TLSMaxVersion() string {
+ return "TLS12"
+}
+
func (p *pki) ReadClientCert() string {
return readCertificate(p.ClientCertPath())
}