Skip to content

Commit

Permalink
chore(outputs): migrate sample configs into separate files (#11131)
Browse files Browse the repository at this point in the history
  • Loading branch information
sspaink authored May 18, 2022
1 parent 4b3a5d5 commit 256caed
Show file tree
Hide file tree
Showing 53 changed files with 2,097 additions and 0 deletions.
10 changes: 10 additions & 0 deletions plugins/outputs/amon/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Configuration for Amon Server to send metrics to.
[[outputs.amon]]
## Amon Server Key
server_key = "my-server-key" # required.

## Amon Instance URL
amon_instance = "https://youramoninstance" # required

## Connection timeout.
# timeout = "5s"
95 changes: 95 additions & 0 deletions plugins/outputs/amqp/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# Publishes metrics to an AMQP broker
[[outputs.amqp]]
## Broker to publish to.
## deprecated in 1.7; use the brokers option
# url = "amqp://localhost:5672/influxdb"

## Brokers to publish to. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer.
brokers = ["amqp://localhost:5672/influxdb"]

## Maximum messages to send over a connection. Once this is reached, the
## connection is closed and a new connection is made. This can be helpful for
## load balancing when not using a dedicated load balancer.
# max_messages = 0

## Exchange to declare and publish to.
exchange = "telegraf"

## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# exchange_type = "topic"

## If true, exchange will be passively declared.
# exchange_passive = false

## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"

## Additional exchange arguments.
# exchange_arguments = { }
# exchange_arguments = {"hash_property" = "timestamp"}

## Authentication credentials for the PLAIN auth_method.
# username = ""
# password = ""

## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"

## Metric tag to use as a routing key.
## ie, if this tag exists, its value will be used as the routing key
# routing_tag = "host"

## Static routing key. Used when no routing_tag is set or as a fallback
## when the tag specified in routing tag is not found.
# routing_key = ""
# routing_key = "telegraf"

## Delivery Mode controls if a published message is persistent.
## One of "transient" or "persistent".
# delivery_mode = "transient"

## InfluxDB database added as a message header.
## deprecated in 1.7; use the headers option
# database = "telegraf"

## InfluxDB retention policy added as a message header
## deprecated in 1.7; use the headers option
# retention_policy = "default"

## Static headers added to each published message.
# headers = { }
# headers = {"database" = "telegraf", "retention_policy" = "default"}

## Connection timeout. If not provided, will default to 5s. 0s means no
## timeout (not recommended).
# timeout = "5s"

## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

## If true use batch serialization format instead of line based delimiting.
## Only applies to data formats which are not line based such as JSON.
## Recommended to set to true.
# use_batch_format = false

## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
##
## Please note that when use_batch_format = false each amqp message contains only
## a single metric, it is recommended to use compression with batch format
## for best results.
# content_encoding = "identity"

## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
21 changes: 21 additions & 0 deletions plugins/outputs/application_insights/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Send metrics to Azure Application Insights
[[outputs.application_insights]]
## Instrumentation key of the Application Insights resource.
instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"

## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
# endpoint_url = "https://dc.services.visualstudio.com/v2/track"

## Timeout for closing (default: 5s).
# timeout = "5s"

## Enable additional diagnostic logging.
# enable_diagnostic_logging = false

## Context Tag Sources add Application Insights context tags to a tag value.
##
## For list of allowed context tag keys see:
## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
# [outputs.application_insights.context_tag_sources]
# "ai.cloud.role" = "kubernetes_container_name"
# "ai.cloud.roleInstance" = "kubernetes_pod_name"
25 changes: 25 additions & 0 deletions plugins/outputs/azure_data_explorer/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Sends metrics to Azure Data Explorer
[[outputs.azure_data_explorer]]
## The URI property of the Azure Data Explorer resource on Azure
## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net
endpoint_url = ""

## The Azure Data Explorer database that the metrics will be ingested into.
## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion.
## ex: "exampledatabase"
database = ""

## Timeout for Azure Data Explorer operations
# timeout = "20s"

## Type of metrics grouping used when pushing to Azure Data Explorer.
## Default is "TablePerMetric" for one table per different metric.
## For more information, please check the plugin README.
# metrics_grouping_type = "TablePerMetric"

## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable").
# table_name = ""

## Creates tables and relevant mapping if set to true(default).
## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role.
# create_tables = true
29 changes: 29 additions & 0 deletions plugins/outputs/azure_monitor/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Send aggregate metrics to Azure Monitor
[[outputs.azure_monitor]]
## Timeout for HTTP writes.
# timeout = "20s"

## Set the namespace prefix, defaults to "Telegraf/<input-name>".
# namespace_prefix = "Telegraf/"

## Azure Monitor doesn't have a string value type, so convert string
## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
## a maximum of 10 dimensions so Telegraf will only send the first 10
## alphanumeric dimensions.
# strings_as_dimensions = false

## Both region and resource_id must be set or be available via the
## Instance Metadata service on Azure Virtual Machines.
#
## Azure Region to publish metrics against.
## ex: region = "southcentralus"
# region = ""
#
## The Azure Resource ID against which metric will be logged, e.g.
## ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
# resource_id = ""

## Optionally, if in Azure US Government, China, or other sovereign
## cloud environment, set the appropriate REST endpoint for receiving
## metrics. (Note: region may be unused in this context)
# endpoint_url = "https://monitoring.core.usgovcloudapi.net"
16 changes: 16 additions & 0 deletions plugins/outputs/bigquery/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Configuration for Google Cloud BigQuery to send entries
[[outputs.bigquery]]
## Credentials File
credentials_file = "/path/to/service/account/key.json"

## Google Cloud Platform Project
project = "my-gcp-project"

## The namespace for the metric descriptor
dataset = "telegraf"

## Timeout for BigQuery operations.
# timeout = "5s"

## Character to replace hyphens on Metric name
# replace_hyphen_to = "_"
49 changes: 49 additions & 0 deletions plugins/outputs/cloud_pubsub/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Publish Telegraf metrics to a Google Cloud PubSub topic
[[outputs.cloud_pubsub]]
## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub topic.
project = "my-project"

## Required. Name of PubSub topic to publish metrics to.
topic = "my-topic"

## Required. Data format to consume.
## Each data format has its own unique set of configuration options.
## Read more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

## Optional. Filepath for GCP credentials JSON file to authorize calls to
## PubSub APIs. If not set explicitly, Telegraf will attempt to use
## Application Default Credentials, which is preferred.
# credentials_file = "path/to/my/creds.json"

## Optional. If true, will send all metrics per write in one PubSub message.
# send_batched = true

## The following publish_* parameters specifically configures batching
## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings

## Optional. Send a request to PubSub (i.e. actually publish a batch)
## when it has this many PubSub messages. If send_batched is true,
## this is ignored and treated as if it were 1.
# publish_count_threshold = 1000

## Optional. Send a request to PubSub (i.e. actually publish a batch)
## when it has this many PubSub messages. If send_batched is true,
## this is ignored and treated as if it were 1
# publish_byte_threshold = 1000000

## Optional. Specifically configures requests made to the PubSub API.
# publish_num_go_routines = 2

## Optional. Specifies a timeout for requests to the PubSub API.
# publish_timeout = "30s"

## Optional. If true, published PubSub message data will be base64-encoded.
# base64_data = false

## Optional. PubSub attributes to add to metrics.
# [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value"
42 changes: 42 additions & 0 deletions plugins/outputs/cloudwatch/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Configuration for AWS CloudWatch output.
[[outputs.cloudwatch]]
## Amazon REGION
region = "us-east-1"

## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""

## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""

## Namespace for the CloudWatch MetricDatums
namespace = "InfluxData/Telegraf"

## If you have a large amount of metrics, you should consider to send statistic
## values instead of raw metrics which could not only improve performance but
## also save AWS API cost. If enable this flag, this plugin would parse the required
## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
## You could use basicstats aggregator to calculate those fields. If not all statistic
## fields are available, all fields would still be sent as raw metrics.
# write_statistics = false

## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
# high_resolution_metrics = false
58 changes: 58 additions & 0 deletions plugins/outputs/cloudwatch_logs/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Configuration for AWS CloudWatchLogs output.
[[outputs.cloudwatch_logs]]
## The region is the Amazon region that you wish to connect to.
## Examples include but are not limited to:
## - us-west-1
## - us-west-2
## - us-east-1
## - ap-southeast-1
## - ap-southeast-2
## ...
region = "us-east-1"

## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""

## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""

## Cloud watch log group. Must be created in AWS cloudwatch logs upfront!
## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place
log_group = "my-group-name"

## Log stream in log group
## Either log group name or reference to metric attribute, from which it can be parsed:
## tag:<TAG_NAME> or field:<FIELD_NAME>. If log stream is not exist, it will be created.
## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream)
## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855)
log_stream = "tag:location"

## Source of log data - metric name
## specify the name of the metric, from which the log data should be retrieved.
## I.e., if you are using docker_log plugin to stream logs from container, then
## specify log_data_metric_name = "docker_log"
log_data_metric_name = "docker_log"

## Specify from which metric attribute the log data should be retrieved:
## tag:<TAG_NAME> or field:<FIELD_NAME>.
## I.e., if you are using docker_log plugin to stream logs from container, then
## specify log_data_source = "field:message"
log_data_source = "field:message"
13 changes: 13 additions & 0 deletions plugins/outputs/cratedb/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Configuration for CrateDB to send metrics to.
[[outputs.cratedb]]
# A github.com/jackc/pgx/v4 connection string.
# See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig
url = "postgres://user:password@localhost/schema?sslmode=disable"
# Timeout for all CrateDB queries.
timeout = "5s"
# Name of the table to store metrics in.
table = "metrics"
# If true, and the metrics table does not exist, create it automatically.
table_create = true
# The character(s) to replace any '.' in an object key with
key_separator = "_"
17 changes: 17 additions & 0 deletions plugins/outputs/datadog/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Configuration for DataDog API to send metrics to.
[[outputs.datadog]]
## Datadog API key
apikey = "my-secret-key"

## Connection timeout.
# timeout = "5s"

## Write URL override; useful for debugging.
# url = "https://app.datadoghq.com/api/v1/series"

## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set)
# http_proxy_url = "http://localhost:8888"

## Override the default (none) compression used to send data.
## Supports: "zlib", "none"
# compression = "none"
3 changes: 3 additions & 0 deletions plugins/outputs/discard/sample.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Send metrics to nowhere at all
[[outputs.discard]]
# no configuration
Loading

0 comments on commit 256caed

Please sign in to comment.