Skip to content

Commit

Permalink
chore(inputs_a-l): migrate sample configs into separate files (#11132)
Browse files Browse the repository at this point in the history
  • Loading branch information
sspaink authored May 18, 2022
1 parent 0f5dc99 commit 6b697db
Show file tree
Hide file tree
Showing 104 changed files with 3,070 additions and 0 deletions.
26 changes: 26 additions & 0 deletions plugins/inputs/activemq/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Gather ActiveMQ metrics
[[inputs.activemq]]
## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161"

## Required ActiveMQ Endpoint
## deprecated in 1.11; use the url option
# server = "192.168.50.10"
# port = 8161

## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"

## Required ActiveMQ webadmin root path
# webadmin = "admin"

## Maximum time to receive response.
# response_timeout = "5s"

## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
41 changes: 41 additions & 0 deletions plugins/inputs/aerospike/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Read stats from aerospike server(s)
[[inputs.aerospike]]
## Aerospike servers to connect to (with port)
## This plugin will query all namespaces the aerospike
## server has configured and get stats for them.
servers = ["localhost:3000"]

# username = "telegraf"
# password = "pa$$word"

## Optional TLS Config
# enable_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
# tls_name = "tlsname"
## If false, skip chain & host verification
# insecure_skip_verify = true

# Feature Options
# Add namespace variable to limit the namespaces executed on
# Leave blank to do all
# disable_query_namespaces = true # default false
# namespaces = ["namespace1", "namespace2"]

# Enable set level telemetry
# query_sets = true # default: false
# Add namespace set combinations to limit sets executed on
# Leave blank to do all sets
# sets = ["namespace1/set1", "namespace1/set2", "namespace3"]

# Histograms
# enable_ttl_histogram = true # default: false
# enable_object_size_linear_histogram = true # default: false

# by default, aerospike produces a 100 bucket histogram
# this is not great for most graphing tools, this will allow
# the ability to squash this to a smaller number of buckets
# To have a balanced histogram, the number of buckets chosen
# should divide evenly into 100.
# num_histogram_buckets = 100 # default: 10
100 changes: 100 additions & 0 deletions plugins/inputs/aliyuncms/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# Pull Metric Statistics from Aliyun CMS
[[inputs.aliyuncms]]
## Aliyun Credentials
## Credentials are loaded in the following order
## 1) Ram RoleArn credential
## 2) AccessKey STS token credential
## 3) AccessKey credential
## 4) Ecs Ram Role credential
## 5) RSA keypair credential
## 6) Environment variables credential
## 7) Instance metadata credential

# access_key_id = ""
# access_key_secret = ""
# access_key_sts_token = ""
# role_arn = ""
# role_session_name = ""
# private_key = ""
# public_key_id = ""
# role_name = ""

## Specify the ali cloud region list to be queried for metrics and objects discovery
## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here
## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm
## Default supported regions are:
## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen,
## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5,
## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1
##
## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich
## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then
## it will be reported on the start - for example for 'acs_cdn' project:
## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' )
## Currently, discovery supported for the following projects:
## - acs_ecs_dashboard
## - acs_rds_dashboard
## - acs_slb_dashboard
## - acs_vpc_eip
regions = ["cn-hongkong"]

# The minimum period for AliyunCMS metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute, 5 minute, or larger intervals.
# See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Aliyun OpenAPI
# and will not be collected by Telegraf.
#
## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s)
period = "5m"

## Collection Delay (required - must account for metrics availability via AliyunCMS API)
delay = "1m"

## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"

## Metric Statistic Project (required)
project = "acs_slb_dashboard"

## Maximum requests per second, default value is 200
ratelimit = 200

## How often the discovery API call executed (default 1m)
#discovery_interval = "1m"

## Metrics to Pull (Required)
[[inputs.aliyuncms.metrics]]
## Metrics names to be requested,
## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]

## Dimension filters for Metric (these are optional).
## This allows to get additional metric dimension. If dimension is not specified it can be returned or
## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled)
## Values specified here would be added into the list of discovered objects.
## You can specify either single dimension:
#dimensions = '{"instanceId": "p-example"}'

## Or you can specify several dimensions at once:
#dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'

## Enrichment tags, can be added from discovery (if supported)
## Notation is <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the Describe<ObjectType> API per project.
## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
#tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## The following tags added by default: regionId (if discovery enabled), userId, instanceId.

## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery
## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage
## of discovery scope vs monitoring scope
#allow_dps_without_discovery = false
7 changes: 7 additions & 0 deletions plugins/inputs/amd_rocm_smi/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Query statistics from AMD Graphics cards using rocm-smi binary
[[inputs.amd_rocm_smi]]
## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath
# bin_path = "/opt/rocm/bin/rocm-smi"

## Optional: timeout for GPU polling
# timeout = "5s"
74 changes: 74 additions & 0 deletions plugins/inputs/amqp_consumer/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# AMQP consumer plugin
[[inputs.amqp_consumer]]
## Brokers to consume from. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer.
brokers = ["amqp://localhost:5672/influxdb"]

## Authentication credentials for the PLAIN auth_method.
# username = ""
# password = ""

## Name of the exchange to declare. If unset, no exchange will be declared.
exchange = "telegraf"

## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# exchange_type = "topic"

## If true, exchange will be passively declared.
# exchange_passive = false

## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"

## Additional exchange arguments.
# exchange_arguments = { }
# exchange_arguments = {"hash_property" = "timestamp"}

## AMQP queue name.
queue = "telegraf"

## AMQP queue durability can be "transient" or "durable".
queue_durability = "durable"

## If true, queue will be passively declared.
# queue_passive = false

## A binding between the exchange and queue using this binding key is
## created. If unset, no binding is created.
binding_key = "#"

## Maximum number of messages server should give to the worker.
# prefetch_count = 50

## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000

## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"

## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
# content_encoding = "identity"

## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
20 changes: 20 additions & 0 deletions plugins/inputs/apache/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]

## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"

## Maximum time to receive response.
# response_timeout = "5s"

## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
8 changes: 8 additions & 0 deletions plugins/inputs/apcupsd/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# Monitor APC UPSes connected to apcupsd
[[inputs.apcupsd]]
# A list of running apcupsd server to connect to.
# If not provided will default to tcp://127.0.0.1:3551
servers = ["tcp://127.0.0.1:3551"]

## Timeout for dialing server.
timeout = "5s"
24 changes: 24 additions & 0 deletions plugins/inputs/aurora/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Gather metrics from Apache Aurora schedulers
[[inputs.aurora]]
## Schedulers are the base addresses of your Aurora Schedulers
schedulers = ["http://127.0.0.1:8081"]

## Set of role types to collect metrics from.
##
## The scheduler roles are checked each interval by contacting the
## scheduler nodes; zookeeper is not contacted.
# roles = ["leader", "follower"]

## Timeout is the max time for total network operations.
# timeout = "5s"

## Username and password are sent using HTTP Basic Auth.
# username = "username"
# password = "pa$$word"

## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
10 changes: 10 additions & 0 deletions plugins/inputs/azure_storage_queue/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Gather Azure Storage Queue metrics
[[inputs.azure_storage_queue]]
## Required Azure Storage Account name
account_name = "mystorageaccount"

## Required Azure Storage Account access key
account_key = "storageaccountaccesskey"

## Set to false to disable peeking age of oldest message (executes faster)
# peek_oldest_message_age = true
10 changes: 10 additions & 0 deletions plugins/inputs/bcache/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Read metrics of bcache from stats_total and dirty_data
[[inputs.bcache]]
## Bcache sets path
## If not specified, then default is:
bcachePath = "/sys/fs/bcache"

## By default, Telegraf gather stats for all bcache devices
## Setting devices will restrict the stats to the specified
## bcache devices.
bcacheDevs = ["bcache0"]
8 changes: 8 additions & 0 deletions plugins/inputs/beanstalkd/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# Collects Beanstalkd server and tubes stats
[[inputs.beanstalkd]]
## Server to collect data from
server = "localhost:11300"

## List of tubes to gather stats about.
## If no tubes specified then data gathered for each tube on server reported by list-tubes command
tubes = ["notifications"]
33 changes: 33 additions & 0 deletions plugins/inputs/beat/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# Read metrics exposed by Beat
[[inputs.beat]]
## An URL from which to read Beat-formatted JSON
## Default is "http://127.0.0.1:5066".
url = "http://127.0.0.1:5066"

## Enable collection of the listed stats
## An empty list means collect all. Available options are currently
## "beat", "libbeat", "system" and "filebeat".
# include = ["beat", "libbeat", "filebeat"]

## HTTP method
# method = "GET"

## Optional HTTP headers
# headers = {"X-Special-Header" = "Special-Value"}

## Override HTTP "Host" header
# host_header = "logstash.example.com"

## Timeout for HTTP requests
# timeout = "5s"

## Optional HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"

## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
10 changes: 10 additions & 0 deletions plugins/inputs/bind/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Read BIND nameserver XML statistics
[[inputs.bind]]
## An array of BIND XML statistics URI to gather stats.
## Default is "http://localhost:8053/xml/v3".
# urls = ["http://localhost:8053/xml/v3"]
# gather_memory_contexts = false
# gather_views = false

## Timeout for http requests made by bind nameserver
# timeout = "4s"
Loading

0 comments on commit 6b697db

Please sign in to comment.