diff --git a/.circleci/config.yml b/.circleci/config.yml index 974f6c00f66..8a0fd032b65 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -176,7 +176,7 @@ jobs: node --version && npm --version docker --version google-chrome --version && which google-chrome && chromedriver --version && which chromedriver - timeout 300 bash -c 'while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:9999)" != "200" ]]; do sleep 5; done' || false + timeout 300 bash -c 'while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8086)" != "200" ]]; do sleep 5; done' || false - run: name: Selenium tests command: | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d33ebc2323..2fa675a2712 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,13 +16,13 @@ Ideally, test cases would be in the form of `curl` commands. For example: ```bash # write data -curl -XPOST "http://localhost:9999/api/v2/write?org=YOUR_ORG&bucket=YOUR_BUCKET&precision=s" \ +curl -XPOST "http://localhost:8086/api/v2/write?org=YOUR_ORG&bucket=YOUR_BUCKET&precision=s" \ --header "Authorization: Token YOURAUTHTOKEN" \ --data-raw "mem,host=host1 used_percent=23.43234543 1556896326" # query data # Bug: expected it to return no data, but data comes back. -curl http://localhost:9999/api/v2/query?org=my-org -XPOST -sS \ +curl http://localhost:8086/api/v2/query?org=my-org -XPOST -sS \ -H 'Authorization: Token YOURAUTHTOKEN' \ -H 'Accept: application/csv' \ -H 'Content-type: application/vnd.flux' \ diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index ef4040cdd9a..6f6327e726d 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -41,7 +41,7 @@ cd ui yarn && yarn start ``` -If there are no errors, hit [localhost:8080](http://localhost:8080) and follow the prompts to setup your username and password. *Note the port difference: `8080` vs the production `9999`* +If there are no errors, hit [localhost:8080](http://localhost:8080) and follow the prompts to setup your username and password. *Note the port difference: `8080` vs the production `8086`* You're set up to develop Influx locally. Any changes you make to front-end code under the `ui/` directory will be updated after the watcher process (that was initiated by running `yarn start`) sees them and rebuilds the bundle. Any changes to go code will need to be re-compiled by re-running the `go run` command above. @@ -65,7 +65,7 @@ Tab 1: go run ./cmd/influxd --assets-path=ui/build ``` -This starts the influxdb application server. It handles API requests and can be reached via `localhost:9999`. Any changes to go code will need to be re-compiled by re-running the `go run` command above. +This starts the influxdb application server. It handles API requests and can be reached via `localhost:8086`. Any changes to go code will need to be re-compiled by re-running the `go run` command above. Tab 2: diff --git a/Dockerfile b/Dockerfile index 4134fad79bf..8302a4c132a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -45,7 +45,7 @@ FROM debian:stretch-slim AS influx COPY --from=dbuild-all /code/bin/linux/influxd /usr/bin/influxd COPY --from=dbuild-all /code/bin/linux/influx /usr/bin/influx -EXPOSE 9999 +EXPOSE 8086 ENTRYPOINT [ "/usr/bin/influxd" ] diff --git a/Makefile b/Makefile index a14d638d890..af422bca121 100644 --- a/Makefile +++ b/Makefile @@ -227,7 +227,7 @@ dshell-image: @docker image build --build-arg "USERID=$(shell id -u)" -t influxdb:dshell --target dshell . dshell: dshell-image - @docker container run --rm -p 9999:9999 -p 8080:8080 -u $(shell id -u) -it -v $(shell pwd):/code -w /code influxdb:dshell + @docker container run --rm -p 8086:8086 -p 8080:8080 -u $(shell id -u) -it -v $(shell pwd):/code -w /code influxdb:dshell # .PHONY targets represent actions that do not create an actual file. .PHONY: all $(SUBDIRS) run fmt checkfmt tidy checktidy checkgenerate test test-go test-js test-go-race bench clean node_modules vet nightly chronogiraffe dist ping protoc e2e run-e2e influxd libflux flags dshell dclean docker-image-flux docker-image-influx goreleaser diff --git a/README.md b/README.md index 4ac30081639..69720e6e3ae 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ Planned activities include: ## Installing from Source -We have nightly and weekly versioned Docker images, Debian packages, RPM packages, and tarballs of InfluxDB available at the [InfluxData downloads page](https://portal.influxdata.com/downloads/). +We have nightly and weekly versioned Docker images, Debian packages, RPM packages, and tarballs of InfluxDB available at the [InfluxData downloads page](https://portal.influxdata.com/downloads/). We also provide the `influx` command line interface (CLI) client as a separate binary available at the same location. ## Building From Source @@ -133,17 +133,12 @@ Everything in InfluxDB is organized under a concept of an organization. The API Buckets represent where you store time series data. They're synonymous with what was previously in InfluxDB 1.x a database and retention policy. -The simplest way to get set up is to point your browser to [http://localhost:9999](http://localhost:9999) and go through the prompts. +The simplest way to get set up is to point your browser to [http://localhost:8086](http://localhost:8086) and go through the prompts. -**Note**: Port 9999 will be used during the beta phases of development of InfluxDB v2.0. -This should allow a v2.0-beta instance to be run alongside a v1.x instance without interfering on port 8086. -InfluxDB will thereafter continue to use 8086. +You can also get set up from the CLI using the command `influx setup`: -You can also get set up from the CLI using the subcommands `influx user`, `influx auth`, `influx org` and `influx bucket`, -or do it all in one breath with `influx setup`: - -``` +```bash $ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup Welcome to InfluxDB 2.0! Please type your primary username: marty @@ -169,48 +164,50 @@ Confirm? (y/n): y UserID Username Organization Bucket 033a3f2c5ccaa000 marty InfluxData Telegraf -Your token has been stored in /Users/marty/.influxdbv2/credentials +Your token has been stored in /Users/marty/.influxdbv2/configs ``` -You may get into a development loop where `influx setup` becomes tedious. +You can run this command non-interactively using the `-f, --force` flag if you are automating the setup. Some added flags can help: -``` -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup --username marty --password F1uxKapacit0r85 --org InfluxData --bucket telegraf --retention 168 --token where-were-going-we-dont-need-roads --force -``` - -`~/.influxdbv2/credentials` contains your auth token. -Most `influx` commands read the token from this file path by default. - -You may need the organization ID and bucket ID later: - -``` -$ influx org find -ID Name -033a3f2c708aa000 InfluxData +```bash +$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup \ +--username marty \ +--password F1uxKapacit0r85 \ +--org InfluxData \ +--bucket telegraf \ +--retention 168 \ +--token where-were-going-we-dont-need-roads \ +--force ``` -``` -$ influx bucket find -ID Name Retention Organization OrganizationID -033a3f2c710aa000 telegraf 72h0m0s InfluxData 033a3f2c708aa000 +Once setup is complete, a configuration profile is created to allow you to interact with your local InfluxDB without passing in credentials each time. You can list and manage those profiles using the `influx config` command. +```bash +$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx config +Active Name URL Org +* default http://localhost:8086 InfluxData ``` +## Writing Data Write to measurement `m`, with tag `v=2`, in bucket `telegraf`, which belongs to organization `InfluxData`: -``` -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx write --org InfluxData --bucket telegraf --precision s "m v=2 $(date +%s)" +```bash +$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx write --bucket telegraf --precision s "m v=2 $(date +%s)" ``` +Since you have a default profile set up, you can omit the Organization and Token from the command. + Write the same point using `curl`: -``` -curl --header "Authorization: Token $(cat ~/.influxdbv2/credentials)" --data-raw "m v=2 $(date +%s)" "http://localhost:9999/api/v2/write?org=InfluxData&bucket=telegraf&precision=s" +```bash +curl --header "Authorization: Token $(bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx auth list --json | jq -r '.[0].token')" \ +--data-raw "m v=2 $(date +%s)" \ +"http://localhost:8086/api/v2/write?org=InfluxData&bucket=telegraf&precision=s" ``` Read that back with a simple Flux query: -``` -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx query -o InfluxData 'from(bucket:"telegraf") |> range(start:-1h)' +```bash +$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx query 'from(bucket:"telegraf") |> range(start:-1h)' Result: _result Table: keys: [_start, _stop, _field, _measurement] _start:time _stop:time _field:string _measurement:string _time:time _value:float @@ -218,19 +215,7 @@ Table: keys: [_start, _stop, _field, _measurement] 2019-12-30T22:19:39.043918000Z 2019-12-30T23:19:39.043918000Z v m 2019-12-30T23:17:02.000000000Z 2 ``` -Use the fancy REPL: - -``` -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx repl -o InfluxData -> from(bucket:"telegraf") |> range(start:-1h) -Result: _result -Table: keys: [_start, _stop, _field, _measurement] - _start:time _stop:time _field:string _measurement:string _time:time _value:float ------------------------------- ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- -2019-12-30T22:22:44.776351000Z 2019-12-30T23:22:44.776351000Z v m 2019-12-30T23:17:02.000000000Z 2 -> -``` - +Use the `-r, --raw` option to return the raw flux response from the query. This is useful for moving data from one instance to another as the `influx write` command can accept the Flux response using the `--format csv` option. ## Introducing Flux diff --git a/chronograf/bolt/sources.go b/chronograf/bolt/sources.go index bfc0a474f66..67ce9bbbd88 100644 --- a/chronograf/bolt/sources.go +++ b/chronograf/bolt/sources.go @@ -21,7 +21,7 @@ var DefaultSource = &chronograf.Source{ ID: math.MaxInt32, // Use large number to avoid possible collisions in older chronograf. Name: "autogen", Type: "influx", - URL: "http://localhost:9999", + URL: "http://localhost:8086", Default: false, } diff --git a/cmd/influx/config/config.go b/cmd/influx/config/config.go index 4e506774fb4..d78895b923d 100644 --- a/cmd/influx/config/config.go +++ b/cmd/influx/config/config.go @@ -26,7 +26,7 @@ type Config struct { // DefaultConfig is default config without token var DefaultConfig = Config{ Name: "default", - Host: "http://localhost:9999", + Host: "http://localhost:8086", Active: true, } diff --git a/cmd/influx/config/config_test.go b/cmd/influx/config/config_test.go index c70cd11bfa2..cf822ab9faa 100644 --- a/cmd/influx/config/config_test.go +++ b/cmd/influx/config/config_test.go @@ -36,12 +36,12 @@ func TestWriteConfigs(t *testing.T) { "default": Config{ Token: "token1", Org: "org1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", Active: true, }, }, result: `[default] - url = "http://localhost:9999" + url = "http://localhost:8086" token = "token1" org = "org1" active = true` + commentedStr, diff --git a/cmd/influx/config_test.go b/cmd/influx/config_test.go index a1ab82280b5..647deb16779 100644 --- a/cmd/influx/config_test.go +++ b/cmd/influx/config_test.go @@ -30,7 +30,7 @@ func TestCmdConfig(t *testing.T) { flags: []string{ "--config-name", "default", "--org", "org1", - "--host-url", "http://localhost:9999", + "--host-url", "http://localhost:8086", "--token", "tok1", "--active", }, @@ -40,7 +40,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: true, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, { @@ -48,7 +48,7 @@ func TestCmdConfig(t *testing.T) { flags: []string{ "-n", "default", "-o", "org1", - "-u", "http://localhost:9999", + "-u", "http://localhost:8086", "-t", "tok1", "-a", }, @@ -58,7 +58,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: true, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, { @@ -66,7 +66,7 @@ func TestCmdConfig(t *testing.T) { flags: []string{ "-n", "default", "-o", "org1", - "-u", "http://localhost:9999", + "-u", "http://localhost:8086", "-t", "tok1", "-a", }, @@ -83,7 +83,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: true, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, } @@ -138,7 +138,7 @@ func TestCmdConfig(t *testing.T) { "--configs-path=" + file, "-n", "default", "-o", "org1", - "-u", "http://localhost:9999", + "-u", "http://localhost:8086", "-t", "tok1", "-a", } @@ -152,7 +152,7 @@ func TestCmdConfig(t *testing.T) { require.True(t, ok) assert.Equal(t, "default", cfg.Name) assert.Equal(t, "org1", cfg.Org) - assert.Equal(t, "http://localhost:9999", cfg.Host) + assert.Equal(t, "http://localhost:8086", cfg.Host) assert.Equal(t, "tok1", cfg.Token) assert.True(t, cfg.Active) }) @@ -199,7 +199,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: false, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, expected: config.Config{ @@ -207,7 +207,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: true, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, { @@ -227,7 +227,7 @@ func TestCmdConfig(t *testing.T) { Active: false, PreviousActive: true, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, expected: config.Config{ @@ -235,7 +235,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: true, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, } @@ -299,7 +299,7 @@ func TestCmdConfig(t *testing.T) { flags: []string{ "--config-name", "default", "--org", "org1", - "--host-url", "http://localhost:9999", + "--host-url", "http://localhost:8086", "--token", "tok1", "--active", }, @@ -308,7 +308,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: true, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, { @@ -362,7 +362,7 @@ func TestCmdConfig(t *testing.T) { flags: []string{ "-n", "default", "-o", "org1", - "-u", "http://localhost:9999", + "-u", "http://localhost:8086", "-t", "tok1", "-a", }, @@ -371,7 +371,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: true, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, } @@ -546,7 +546,7 @@ func TestCmdConfig(t *testing.T) { Org: "org1", Active: false, Token: "tok1", - Host: "http://localhost:9999", + Host: "http://localhost:8086", }, }, }, @@ -594,7 +594,7 @@ func testConfigInvalidURLs(t *testing.T, cmdName string, cmdFn func(*globalFlags flags: []string{ "--config-name", "default", "--org", "org1", - "--host-url", "localhost:9999", + "--host-url", "localhost:8086", "--token", "tok1", }, }, diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go index 5e9515567fe..b0313132134 100644 --- a/cmd/influxd/launcher/launcher.go +++ b/cmd/influxd/launcher/launcher.go @@ -196,7 +196,7 @@ func launcherOpts(l *Launcher) []cli.Opt { { DestP: &l.httpBindAddress, Flag: "http-bind-address", - Default: ":9999", + Default: ":8086", Desc: "bind address for the REST HTTP API", }, { diff --git a/cmd/influxd/launcher/pkger_test.go b/cmd/influxd/launcher/pkger_test.go index 38af051c62a..8a09788bd8e 100644 --- a/cmd/influxd/launcher/pkger_test.go +++ b/cmd/influxd/launcher/pkger_test.go @@ -4301,7 +4301,7 @@ const telegrafCfg = `[agent] collection_jitter = "0s" flush_interval = "10s" [[outputs.influxdb_v2]] - urls = ["http://localhost:9999"] + urls = ["http://localhost:8086"] token = "$INFLUX_TOKEN" organization = "rg" bucket = "rucket_3" diff --git a/docker/influxd/Dockerfile b/docker/influxd/Dockerfile index 3c78907c213..35e12af06c3 100644 --- a/docker/influxd/Dockerfile +++ b/docker/influxd/Dockerfile @@ -1,7 +1,7 @@ FROM debian:stable-slim COPY influx influxd /usr/bin/ -EXPOSE 9999 +EXPOSE 8086 ENV DEBIAN_FRONTEND noninteractive diff --git a/e2e/.circleci/config.yml b/e2e/.circleci/config.yml index 30b67b9209d..9cba3dd8cd0 100644 --- a/e2e/.circleci/config.yml +++ b/e2e/.circleci/config.yml @@ -16,7 +16,7 @@ jobs: node --version && npm --version docker --version google-chrome --version && which google-chrome && chromedriver --version && which chromedriver - timeout 300 bash -c 'while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:9999)" != "200" ]]; do sleep 5; done' || false + timeout 300 bash -c 'while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8086)" != "200" ]]; do sleep 5; done' || false - run: name: Selenium tests diff --git a/e2e/README.md b/e2e/README.md index 3f80a090584..ca81d679958 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -55,7 +55,7 @@ For example, to declare the `influx_url` property in the `development` configura e.g. -`export E2E_DEVELOPMENT_INFLUX_URL="http://172.17.0.2:9999"` +`export E2E_DEVELOPMENT_INFLUX_URL="http://172.17.0.2:8086"` This feature was added specifically to define passwords and usernames only via the test environment. However, it can be used with any configuration key value such as the INFLUX_URL endpoint. @@ -121,7 +121,7 @@ e.g. sudo docker run -it -v `pwd`/report:/home/e2e/report -v `pwd`/screenshots:/home/e2e/screenshots \ -v /tmp/e2e/etc:/home/e2e/etc -v /tmp/e2e/downloads:/home/e2e/downloads \ -e SELENIUM_REMOTE_URL="http://${SELENOID_HOST}:4444/wd/hub" \ - -e E2E_${ACTIVE_CONF^^}_INFLUX_URL="http://${INFLUX2_HOST}:9999" --detach \ + -e E2E_${ACTIVE_CONF^^}_INFLUX_URL="http://${INFLUX2_HOST}:8086" --detach \ --name ${TEST_CONTAINER} e2e-${TEST_CONTAINER}:latest ``` diff --git a/e2e/e2e.conf.json b/e2e/e2e.conf.json index 64be3dffb16..1fefd3bc9bc 100644 --- a/e2e/e2e.conf.json +++ b/e2e/e2e.conf.json @@ -2,7 +2,7 @@ "active": "development", "development" : { "config_id" : "development", - "influx_url": "http://localhost:9999", + "influx_url": "http://localhost:8086", "def_ctx": "/", "headless": false, "sel_docker": false, @@ -28,7 +28,7 @@ }, "nightly" : { "config_id": "nightly", - "influx_url": "http://localhost:9999", + "influx_url": "http://localhost:8086", "def_ctx": "/", "headless": true, "sel_docker": false, diff --git a/e2e/features/loadData/scrapers.feature b/e2e/features/loadData/scrapers.feature index ea1a5a8a5be..61229e5330e 100644 --- a/e2e/features/loadData/scrapers.feature +++ b/e2e/features/loadData/scrapers.feature @@ -4,7 +4,7 @@ Feature: Load Data - Scrapers As a user I want to Read Create Update and Delete Scrapers So that I can manage the stores used with Influxdbv2 # Move exercise create scraper popup here -# N.B. can verify scrapers at endpoint http://localhost:9999/api/v2/scrapers +# N.B. can verify scrapers at endpoint http://localhost:8086/api/v2/scrapers @tested @@ -48,7 +48,7 @@ Scenario: Exercise create Scraper popup Then the form element error message is "Target URL cannot be empty" Then a form input error icon is shown Then the Create Scrapper popup create button is disabled - When enter the value "http://localhost:9999/metrics" into the Create Scraper popup url input + When enter the value "http://localhost:8086/metrics" into the Create Scraper popup url input Then the form element error message is not shown Then no form input error icon is shown Then the Create Scrapper popup create button is enabled @@ -74,8 +74,8 @@ Scenario Outline: Create Scrapers Examples: | NAME | ENDPOINT | BUCKET | - | Melnik | http://localhost:9999/metrics | DEFAULT | - | Morlaix | http://localhost:9999/metrics | Duchamp | + | Melnik | http://localhost:8086/metrics | DEFAULT | + | Morlaix | http://localhost:8086/metrics | Duchamp | | Brno | http://localhost:10018/bogus | DEFAULT | | Brest | http://localhost:10018/bogus | Duchamp | diff --git a/e2e/features/loadData/telegrafs.feature b/e2e/features/loadData/telegrafs.feature index 32225a303ed..e7241813c52 100644 --- a/e2e/features/loadData/telegrafs.feature +++ b/e2e/features/loadData/telegrafs.feature @@ -222,5 +222,5 @@ Scenario Outline: Delete Telegraf Card |Norimberk | |Rakovnik | -# N.B. can verify telegrafs at endpoint http://localhost:9999/api/v2/telegrafs +# N.B. can verify telegrafs at endpoint http://localhost:8086/api/v2/telegrafs # TODO - Test installation of telegraf and instructions - check back end diff --git a/e2e/scripts/circleci-run.sh b/e2e/scripts/circleci-run.sh index 98c4f470876..403996bcbd0 100755 --- a/e2e/scripts/circleci-run.sh +++ b/e2e/scripts/circleci-run.sh @@ -3,7 +3,7 @@ sleep 30 sudo netstat -tlnp -curl -v --connect-timeout 60 --max-time 60 http://localhost:9999/debug/flush +curl -v --connect-timeout 60 --max-time 60 http://localhost:8086/debug/flush git clone https://github.com/influxdata/influxdb.git diff --git a/e2e/scripts/containerTests.sh b/e2e/scripts/containerTests.sh index b4da33ea384..bc8ff07cb72 100755 --- a/e2e/scripts/containerTests.sh +++ b/e2e/scripts/containerTests.sh @@ -5,7 +5,7 @@ TEST_CONTAINER=bonitoo_e2e INFLUX2_CONTAINER=influx2_solo E2E_MAP_DIR=/tmp/e2e INFLUX2_HOST=$(sudo docker inspect -f "{{ .NetworkSettings.IPAddress }}" ${INFLUX2_CONTAINER}) -INFLUX2_URL="http://${INFLUX2_HOST}:9999" +INFLUX2_URL="http://${INFLUX2_HOST}:8086" #TAGS="@influx-influx" ACTIVE_CONF=development diff --git a/e2e/scripts/influxdb2_community_test_env.sh b/e2e/scripts/influxdb2_community_test_env.sh index a5da432f406..1318df679e4 100755 --- a/e2e/scripts/influxdb2_community_test_env.sh +++ b/e2e/scripts/influxdb2_community_test_env.sh @@ -42,8 +42,8 @@ pull_docker(){ run_docker_influx(){ mkdir -p ${LOG_DIR} echo "["$(date +"%d.%m.%Y %T")"] starting docker instance ${INSTANCE_NAME}" - sudo docker run --name ${INSTANCE_NAME} --publish 9999:9999 ${DOCKER_IMAGE} > ${LOG_FILE} 2>&1 & - echo "["$(date +"%d.%m.%Y %T")"] started instance $INSTANCE_NAME listening at port 9999." + sudo docker run --name ${INSTANCE_NAME} --publish 8086:8086 ${DOCKER_IMAGE} > ${LOG_FILE} 2>&1 & + echo "["$(date +"%d.%m.%Y %T")"] started instance $INSTANCE_NAME listening at port 8086." echo "logfile at $LOG_FILE" sleep 3 echo "\n$(tail -n32 $LOG_FILE)\n" @@ -56,7 +56,7 @@ run_docker_influx_test_env(){ sudo docker rm $INSTANCE_NAME sudo docker pull quay.io/influxdb/influx:nightly sudo docker build -t influxdb_test_image . - sudo docker run --name $INSTANCE_NAME --publish 9999:9999 influxdb_test_image > ${LOG_FILE} 2>&1 & + sudo docker run --name $INSTANCE_NAME --publish 8086:8086 influxdb_test_image > ${LOG_FILE} 2>&1 & } diff --git a/e2e/src/utils/influxUtils.js b/e2e/src/utils/influxUtils.js index 76a22050432..ee854bfc5c8 100644 --- a/e2e/src/utils/influxUtils.js +++ b/e2e/src/utils/influxUtils.js @@ -507,7 +507,7 @@ const getAuthorizations = async(userName) => { return await authsAPI.getAuthorizations(); }; -// http://localhost:9999/api/v2/labels +// http://localhost:8086/api/v2/labels // {"orgID":"8576cb897e0b4ce9","name":"MyLabel","properties":{"description":"","color":"#7CE490"}} const createLabel = async(userName, labelName, diff --git a/etc/pinger.sh b/etc/pinger.sh index 2a13195dce8..20745c186ef 100755 --- a/etc/pinger.sh +++ b/etc/pinger.sh @@ -1,5 +1,5 @@ ping_cancelled=false # Keep track of whether the loop was cancelled, or succeeded -until nc -z 127.0.0.1 9999; do :; done & +until nc -z 127.0.0.1 8086; do :; done & trap "kill $!; ping_cancelled=true" SIGINT wait $! # Wait for the loop to exit, one way or another trap - INT # Remove the trap, now we're done with it diff --git a/http/dashboard_test.go b/http/dashboard_test.go index 1c81bd14aa6..d19f3b645d4 100644 --- a/http/dashboard_test.go +++ b/http/dashboard_test.go @@ -1851,7 +1851,7 @@ func TestService_handlePostDashboardLabel(t *testing.T) { t.Fatalf("failed to unmarshal label mapping: %v", err) } - url := fmt.Sprintf("http://localhost:9999/api/v2/dashboards/%s/labels", tt.args.dashboardID) + url := fmt.Sprintf("http://localhost:8086/api/v2/dashboards/%s/labels", tt.args.dashboardID) r := httptest.NewRequest("POST", url, bytes.NewReader(b)) w := httptest.NewRecorder() diff --git a/http/session_test.go b/http/session_test.go index 33ba6d4b1ad..affee2757e4 100644 --- a/http/session_test.go +++ b/http/session_test.go @@ -86,7 +86,7 @@ func TestSessionHandler_handleSignin(t *testing.T) { h := NewSessionHandler(zaptest.NewLogger(t), b) w := httptest.NewRecorder() - r := httptest.NewRequest("POST", "http://localhost:9999/api/v2/signin", nil) + r := httptest.NewRequest("POST", "http://localhost:8086/api/v2/signin", nil) r.SetBasicAuth(tt.args.user, tt.args.password) h.ServeHTTP(w, r) diff --git a/http/task_service_test.go b/http/task_service_test.go index 5551593ef5e..fd95e0f9380 100644 --- a/http/task_service_test.go +++ b/http/task_service_test.go @@ -1179,7 +1179,7 @@ func TestService_handlePostTaskLabel(t *testing.T) { t.Fatalf("failed to unmarshal label mapping: %v", err) } - url := fmt.Sprintf("http://localhost:9999/api/v2/tasks/%s/labels", tt.args.taskID) + url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/labels", tt.args.taskID) r := httptest.NewRequest("POST", url, bytes.NewReader(b)) w := httptest.NewRecorder() @@ -1266,7 +1266,7 @@ func TestTaskHandler_CreateTaskWithOrgName(t *testing.T) { const script = `option task = {name:"x", every:1m} from(bucket:"b-src") |> range(start:-1m) |> to(bucket:"b-dst", org:"o")` - url := "http://localhost:9999/api/v2/tasks" + url := "http://localhost:8086/api/v2/tasks" b, err := json.Marshal(influxdb.TaskCreate{ Flux: script, @@ -1402,7 +1402,7 @@ func TestTaskHandler_Sessions(t *testing.T) { } h := newHandler(t, ts) - url := fmt.Sprintf("http://localhost:9999/api/v2/tasks/%s/runs", taskID) + url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/runs", taskID) valCtx := context.WithValue(sessionAllPermsCtx, httprouter.ParamsKey, httprouter.Params{{Key: "id", Value: taskID.String()}}) r := httptest.NewRequest("GET", url, nil).WithContext(valCtx) w := httptest.NewRecorder() @@ -1495,7 +1495,7 @@ func TestTaskHandler_Sessions(t *testing.T) { } h := newHandler(t, ts) - url := fmt.Sprintf("http://localhost:9999/api/v2/tasks/%s/runs/%s", taskID, runID) + url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/runs/%s", taskID, runID) valCtx := context.WithValue(sessionAllPermsCtx, httprouter.ParamsKey, httprouter.Params{ {Key: "id", Value: taskID.String()}, {Key: "rid", Value: runID.String()}, @@ -1590,7 +1590,7 @@ func TestTaskHandler_Sessions(t *testing.T) { } h := newHandler(t, ts) - url := fmt.Sprintf("http://localhost:9999/api/v2/tasks/%s/runs/%s/logs", taskID, runID) + url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/runs/%s/logs", taskID, runID) valCtx := context.WithValue(sessionAllPermsCtx, httprouter.ParamsKey, httprouter.Params{ {Key: "id", Value: taskID.String()}, {Key: "rid", Value: runID.String()}, @@ -1684,7 +1684,7 @@ func TestTaskHandler_Sessions(t *testing.T) { } h := newHandler(t, ts) - url := fmt.Sprintf("http://localhost:9999/api/v2/tasks/%s/runs/%s/retry", taskID, runID) + url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/runs/%s/retry", taskID, runID) valCtx := context.WithValue(sessionAllPermsCtx, httprouter.ParamsKey, httprouter.Params{ {Key: "id", Value: taskID.String()}, {Key: "rid", Value: runID.String()}, diff --git a/http/telegraf_test.go b/http/telegraf_test.go index 8358d4ea474..ecd8c086eea 100644 --- a/http/telegraf_test.go +++ b/http/telegraf_test.go @@ -351,8 +351,8 @@ func TestTelegrafHandler_handleGetTelegraf(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://127.0.0.1:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://127.0.0.1:8086"] ## Token for authentication. token = "no_more_secrets" @@ -437,8 +437,8 @@ func TestTelegrafHandler_handleGetTelegraf(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://127.0.0.1:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://127.0.0.1:8086"] ## Token for authentication. token = "no_more_secrets" @@ -527,8 +527,8 @@ func TestTelegrafHandler_handleGetTelegraf(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://127.0.0.1:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://127.0.0.1:8086"] ## Token for authentication. token = "no_more_secrets" @@ -613,8 +613,8 @@ func TestTelegrafHandler_handleGetTelegraf(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://127.0.0.1:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://127.0.0.1:8086"] ## Token for authentication. token = "no_more_secrets" @@ -705,8 +705,8 @@ func TestTelegrafHandler_handleGetTelegraf(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://127.0.0.1:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://127.0.0.1:8086"] ## Token for authentication. token = "no_more_secrets" @@ -791,8 +791,8 @@ func TestTelegrafHandler_handleGetTelegraf(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://127.0.0.1:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://127.0.0.1:8086"] ## Token for authentication. token = "no_more_secrets" diff --git a/http/variable_test.go b/http/variable_test.go index 395c35784a7..edafbe28305 100644 --- a/http/variable_test.go +++ b/http/variable_test.go @@ -863,7 +863,7 @@ func TestService_handlePostVariableLabel(t *testing.T) { t.Fatalf("failed to unmarshal label mapping: %v", err) } - url := fmt.Sprintf("http://localhost:9999/api/v2/variables/%s/labels", tt.args.variableID) + url := fmt.Sprintf("http://localhost:8086/api/v2/variables/%s/labels", tt.args.variableID) r := httptest.NewRequest("POST", url, bytes.NewReader(b)) w := httptest.NewRecorder() diff --git a/http/write_handler_test.go b/http/write_handler_test.go index 17289a98114..de65cace14d 100644 --- a/http/write_handler_test.go +++ b/http/write_handler_test.go @@ -317,7 +317,7 @@ func TestWriteHandler_handleWrite(t *testing.T) { r := httptest.NewRequest( "POST", - "http://localhost:9999/api/v2/write", + "http://localhost:8086/api/v2/write", strings.NewReader(tt.request.body), ) diff --git a/pkger/testdata/env_refs.yml b/pkger/testdata/env_refs.yml index a3f6dadcb70..ac9c6355bd6 100644 --- a/pkger/testdata/env_refs.yml +++ b/pkger/testdata/env_refs.yml @@ -85,7 +85,7 @@ spec: [agent] interval = "10s" [[outputs.influxdb_v2]] - urls = ["http://localhost:9999"] + urls = ["http://localhost:8086"] token = "$INFLUX_TOKEN" organization = "rg" bucket = "rucket_3" diff --git a/pkger/testdata/telegraf.json b/pkger/testdata/telegraf.json index 12d3012ddaf..9ef5f5d2789 100644 --- a/pkger/testdata/telegraf.json +++ b/pkger/testdata/telegraf.json @@ -32,7 +32,7 @@ "name": "label-2" } ], - "config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:9999\n urls = [\"http://localhost:9999\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]" + "config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:8086\n urls = [\"http://localhost:8086\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]" } }, { @@ -42,7 +42,7 @@ "name": "tele-2" }, "spec": { - "config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:9999\n urls = [\"http://localhost:9999\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]" + "config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:8086\n urls = [\"http://localhost:8086\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]" } } ] diff --git a/pkger/testdata/telegraf.yml b/pkger/testdata/telegraf.yml index 84543f3a503..ebf4dc5c1cf 100644 --- a/pkger/testdata/telegraf.yml +++ b/pkger/testdata/telegraf.yml @@ -80,8 +80,8 @@ spec: ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://localhost:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://localhost:8086"] ## Token for authentication. token = "$INFLUX_TOKEN" diff --git a/telegraf/plugins/outputs/influxdb_v2.go b/telegraf/plugins/outputs/influxdb_v2.go index d685ea02eb8..3f6171eca42 100644 --- a/telegraf/plugins/outputs/influxdb_v2.go +++ b/telegraf/plugins/outputs/influxdb_v2.go @@ -32,7 +32,7 @@ func (i *InfluxDBV2) TOML() string { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 + ## urls exp: http://127.0.0.1:8086 urls = [%s] ## Token for authentication. diff --git a/telegraf/plugins/outputs/outputs_test.go b/telegraf/plugins/outputs/outputs_test.go index f25dba1cafc..cc70e0562a6 100644 --- a/telegraf/plugins/outputs/outputs_test.go +++ b/telegraf/plugins/outputs/outputs_test.go @@ -40,7 +40,7 @@ func TestTOML(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 + ## urls exp: http://127.0.0.1:8086 urls = [] ## Token for authentication. @@ -68,8 +68,8 @@ func TestTOML(t *testing.T) { `, &InfluxDBV2{ URLs: []string{ - "http://192.168.1.10:9999", - "http://192.168.1.11:9999", + "http://192.168.1.10:8086", + "http://192.168.1.11:8086", }, Token: "tok1", Organization: "org1", @@ -79,8 +79,8 @@ func TestTOML(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://192.168.1.10:9999", "http://192.168.1.11:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://192.168.1.10:8086", "http://192.168.1.11:8086"] ## Token for authentication. token = "tok1" @@ -161,16 +161,16 @@ func TestDecodeTOML(t *testing.T) { name: "influxdb_v2 missing token", want: &InfluxDBV2{ URLs: []string{ - "http://localhost:9999", - "http://192.168.0.1:9999", + "http://localhost:8086", + "http://192.168.0.1:8086", }, }, wantErr: errors.New("token is missing for influxdb_v2 output plugin"), output: &InfluxDBV2{}, data: map[string]interface{}{ "urls": []interface{}{ - "http://localhost:9999", - "http://192.168.0.1:9999", + "http://localhost:8086", + "http://192.168.0.1:8086", }, }, }, @@ -178,8 +178,8 @@ func TestDecodeTOML(t *testing.T) { name: "influxdb_v2 missing org", want: &InfluxDBV2{ URLs: []string{ - "http://localhost:9999", - "http://192.168.0.1:9999", + "http://localhost:8086", + "http://192.168.0.1:8086", }, Token: "token1", }, @@ -187,8 +187,8 @@ func TestDecodeTOML(t *testing.T) { output: &InfluxDBV2{}, data: map[string]interface{}{ "urls": []interface{}{ - "http://localhost:9999", - "http://192.168.0.1:9999", + "http://localhost:8086", + "http://192.168.0.1:8086", }, "token": "token1", }, @@ -197,8 +197,8 @@ func TestDecodeTOML(t *testing.T) { name: "influxdb_v2 missing bucket", want: &InfluxDBV2{ URLs: []string{ - "http://localhost:9999", - "http://192.168.0.1:9999", + "http://localhost:8086", + "http://192.168.0.1:8086", }, Token: "token1", Organization: "org1", @@ -207,8 +207,8 @@ func TestDecodeTOML(t *testing.T) { output: &InfluxDBV2{}, data: map[string]interface{}{ "urls": []interface{}{ - "http://localhost:9999", - "http://192.168.0.1:9999", + "http://localhost:8086", + "http://192.168.0.1:8086", }, "token": "token1", "organization": "org1", @@ -218,8 +218,8 @@ func TestDecodeTOML(t *testing.T) { name: "influxdb_v2", want: &InfluxDBV2{ URLs: []string{ - "http://localhost:9999", - "http://192.168.0.1:9999", + "http://localhost:8086", + "http://192.168.0.1:8086", }, Token: "token1", Organization: "org1", @@ -228,8 +228,8 @@ func TestDecodeTOML(t *testing.T) { output: &InfluxDBV2{}, data: map[string]interface{}{ "urls": []interface{}{ - "http://localhost:9999", - "http://192.168.0.1:9999", + "http://localhost:8086", + "http://192.168.0.1:8086", }, "token": "token1", "organization": "org1", diff --git a/telegraf/plugins/plugins.go b/telegraf/plugins/plugins.go index 00a838d01b5..445193f723b 100644 --- a/telegraf/plugins/plugins.go +++ b/telegraf/plugins/plugins.go @@ -908,7 +908,7 @@ var availableInputs = `{ "type": "input", "name": "httpjson", "description": "Read flattened metrics from one or more JSON HTTP endpoints", - "config": "# Read flattened metrics from one or more JSON HTTP endpoints\n[[inputs.httpjson]]\n # alias=\"httpjson\"\n ## NOTE This plugin only reads numerical measurements, strings and booleans\n ## will be ignored.\n\n ## Name for the service being polled. Will be appended to the name of the\n ## measurement e.g. httpjson_webserver_stats\n ##\n ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.\n name = \"webserver_stats\"\n\n ## URL of each server in the service's cluster\n servers = [\n \"http://localhost:9999/stats/\",\n \"http://localhost:9998/stats/\",\n ]\n ## Set response_timeout (default 5 seconds)\n response_timeout = \"5s\"\n\n ## HTTP method to use: GET or POST (case-sensitive)\n method = \"GET\"\n\n ## List of tag names to extract from top-level of JSON server response\n # tag_keys = [\n # \"my_tag_1\",\n # \"my_tag_2\"\n # ]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## HTTP parameters (all values must be strings). For \"GET\" requests, data\n ## will be included in the query. For \"POST\" requests, data will be included\n ## in the request body as \"x-www-form-urlencoded\".\n # [inputs.httpjson.parameters]\n # event_type = \"cpu_spike\"\n # threshold = \"0.75\"\n\n ## HTTP Headers (all values must be strings)\n # [inputs.httpjson.headers]\n # X-Auth-Token = \"my-xauth-token\"\n # apiVersion = \"v1\"\n\n" + "config": "# Read flattened metrics from one or more JSON HTTP endpoints\n[[inputs.httpjson]]\n # alias=\"httpjson\"\n ## NOTE This plugin only reads numerical measurements, strings and booleans\n ## will be ignored.\n\n ## Name for the service being polled. Will be appended to the name of the\n ## measurement e.g. httpjson_webserver_stats\n ##\n ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.\n name = \"webserver_stats\"\n\n ## URL of each server in the service's cluster\n servers = [\n \"http://localhost:8086/stats/\",\n \"http://localhost:9998/stats/\",\n ]\n ## Set response_timeout (default 5 seconds)\n response_timeout = \"5s\"\n\n ## HTTP method to use: GET or POST (case-sensitive)\n method = \"GET\"\n\n ## List of tag names to extract from top-level of JSON server response\n # tag_keys = [\n # \"my_tag_1\",\n # \"my_tag_2\"\n # ]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## HTTP parameters (all values must be strings). For \"GET\" requests, data\n ## will be included in the query. For \"POST\" requests, data will be included\n ## in the request body as \"x-www-form-urlencoded\".\n # [inputs.httpjson.parameters]\n # event_type = \"cpu_spike\"\n # threshold = \"0.75\"\n\n ## HTTP Headers (all values must be strings)\n # [inputs.httpjson.headers]\n # X-Auth-Token = \"my-xauth-token\"\n # apiVersion = \"v1\"\n\n" }, { "type": "input", @@ -1040,7 +1040,7 @@ var availableInputs = `{ "type": "input", "name": "jolokia2_proxy", "description": "Read JMX metrics from a Jolokia REST proxy endpoint", - "config": "# Read JMX metrics from a Jolokia REST proxy endpoint\n[[inputs.jolokia2_proxy]]\n # alias=\"jolokia2_proxy\"\n # default_tag_prefix = \"\"\n # default_field_prefix = \"\"\n # default_field_separator = \".\"\n\n ## Proxy agent\n url = \"http://localhost:8080/jolokia\"\n # username = \"\"\n # password = \"\"\n # response_timeout = \"5s\"\n\n ## Optional TLS config\n # tls_ca = \"/var/private/ca.pem\"\n # tls_cert = \"/var/private/client.pem\"\n # tls_key = \"/var/private/client-key.pem\"\n # insecure_skip_verify = false\n\n ## Add proxy targets to query\n # default_target_username = \"\"\n # default_target_password = \"\"\n [[inputs.jolokia2_proxy.target]]\n url = \"service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi\"\n # username = \"\"\n # password = \"\"\n\n ## Add metrics to read\n [[inputs.jolokia2_proxy.metric]]\n name = \"java_runtime\"\n mbean = \"java.lang:type=Runtime\"\n paths = [\"Uptime\"]\n\n" + "config": "# Read JMX metrics from a Jolokia REST proxy endpoint\n[[inputs.jolokia2_proxy]]\n # alias=\"jolokia2_proxy\"\n # default_tag_prefix = \"\"\n # default_field_prefix = \"\"\n # default_field_separator = \".\"\n\n ## Proxy agent\n url = \"http://localhost:8080/jolokia\"\n # username = \"\"\n # password = \"\"\n # response_timeout = \"5s\"\n\n ## Optional TLS config\n # tls_ca = \"/var/private/ca.pem\"\n # tls_cert = \"/var/private/client.pem\"\n # tls_key = \"/var/private/client-key.pem\"\n # insecure_skip_verify = false\n\n ## Add proxy targets to query\n # default_target_username = \"\"\n # default_target_password = \"\"\n [[inputs.jolokia2_proxy.target]]\n url = \"service:jmx:rmi:///jndi/rmi://targethost:8086/jmxrmi\"\n # username = \"\"\n # password = \"\"\n\n ## Add metrics to read\n [[inputs.jolokia2_proxy.metric]]\n name = \"java_runtime\"\n mbean = \"java.lang:type=Runtime\"\n paths = [\"Uptime\"]\n\n" }, { "type": "input", @@ -1437,7 +1437,7 @@ var availableOutputs = `{ "type": "output", "name": "influxdb_v2", "description": "Configuration for sending metrics to InfluxDB", - "config": "# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" + "config": "# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" }, { "type": "output", diff --git a/telegraf_test.go b/telegraf_test.go index a9235c02014..4b2b510d877 100644 --- a/telegraf_test.go +++ b/telegraf_test.go @@ -63,7 +63,7 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { "comment": "3", "config": { "urls": [ - "http://127.0.0.1:9999" + "http://127.0.0.1:8086" ], "token": "token1", "organization": "org", @@ -147,8 +147,8 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://127.0.0.1:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://127.0.0.1:8086"] ## Token for authentication. token = "token1" @@ -175,7 +175,7 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { func TestTelegrafConfigJSONDecodeTOML(t *testing.T) { s := `{ "name": "config 2", - "config": "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n[[inputs.kernel]]\n[[inputs.kubernetes]]\n ## URL for the kubelet\n ## exp: http://1.1.1.1:10255\n url = \"http://1.1.1.1:12\"\n[[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:9999\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"token1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"org\"\n\n ## Destination bucket to write into.\n bucket = \"bucket\"\n" + "config": "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n[[inputs.kernel]]\n[[inputs.kubernetes]]\n ## URL for the kubelet\n ## exp: http://1.1.1.1:10255\n url = \"http://1.1.1.1:12\"\n[[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:8086\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"token1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"org\"\n\n ## Destination bucket to write into.\n bucket = \"bucket\"\n" }` want := &TelegrafConfig{ @@ -253,8 +253,8 @@ func TestTelegrafConfigJSONDecodeTOML(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 - urls = ["http://127.0.0.1:9999"] + ## urls exp: http://127.0.0.1:8086 + urls = ["http://127.0.0.1:8086"] ## Token for authentication. token = "token1" @@ -294,7 +294,7 @@ func TestTelegrafConfigJSONCompatibleMode(t *testing.T) { cfg: &TelegrafConfig{ ID: *id1, OrgID: *id2, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", + Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", }, expMeta: map[string]interface{}{"buckets": []string{}}, }, @@ -304,7 +304,7 @@ func TestTelegrafConfigJSONCompatibleMode(t *testing.T) { cfg: &TelegrafConfig{ ID: *id1, OrgID: *id2, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", + Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", }, expMeta: map[string]interface{}{"buckets": []string{}}, }, @@ -314,7 +314,7 @@ func TestTelegrafConfigJSONCompatibleMode(t *testing.T) { cfg: &TelegrafConfig{ ID: *id1, OrgID: *id3, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", + Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", }, expMeta: map[string]interface{}{"buckets": []string{}}, }, @@ -396,7 +396,7 @@ func TestTelegrafConfigJSON(t *testing.T) { ID: *id1, OrgID: *id2, Name: "n1", - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.file]]\t\n ## Files to parse each interval.\n ## These accept standard unix glob matching rules, but with the addition of\n ## ** as a \"super asterisk\". ie:\n ## /var/log/**.log -> recursively find all .log files in /var/log\n ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log\n ## /var/log/apache.log -> only read the apache log file\n files = [\"f1\", \"f2\"]\n\n ## The dataformat to be read from files\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n[[outputs.file]]\n ## Files to write to, \"stdout\" is a specially handled file.\n files = [\"stdout\"]\n[[outputs.influxdb_v2]]\t\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:9999\n urls = [\"url1\", \"url2\"]\n\n ## Token for authentication.\n token = \"tok1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n", + Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.file]]\t\n ## Files to parse each interval.\n ## These accept standard unix glob matching rules, but with the addition of\n ## ** as a \"super asterisk\". ie:\n ## /var/log/**.log -> recursively find all .log files in /var/log\n ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log\n ## /var/log/apache.log -> only read the apache log file\n files = [\"f1\", \"f2\"]\n\n ## The dataformat to be read from files\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n[[outputs.file]]\n ## Files to write to, \"stdout\" is a specially handled file.\n files = [\"stdout\"]\n[[outputs.influxdb_v2]]\t\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:8086\n urls = [\"url1\", \"url2\"]\n\n ## Token for authentication.\n token = \"tok1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n", Metadata: map[string]interface{}{"buckets": []string{}}, }, }, @@ -591,7 +591,7 @@ func TestLegacyStruct(t *testing.T) { ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 + ## urls exp: http://127.0.0.1:8086 urls = ["url1", "url2"] ## Token for authentication. diff --git a/ui/cypress/e2e/telegrafs.test.ts b/ui/cypress/e2e/telegrafs.test.ts index 0ff347f090b..ac3d4162f2d 100644 --- a/ui/cypress/e2e/telegrafs.test.ts +++ b/ui/cypress/e2e/telegrafs.test.ts @@ -282,10 +282,10 @@ describe('Collectors', () => { }) it('can add and delete urls', () => { - cy.getByTestID('input-field').type('http://localhost:9999') + cy.getByTestID('input-field').type('http://localhost:8086') cy.contains('Add').click() - cy.contains('http://localhost:9999').should('exist', () => { + cy.contains('http://localhost:8086').should('exist', () => { cy.getByTestID('input-field').type('http://example.com') cy.contains('Add').click() diff --git a/ui/mocks/dummyData.ts b/ui/mocks/dummyData.ts index 1f1f9d949e2..0c7e3be967d 100644 --- a/ui/mocks/dummyData.ts +++ b/ui/mocks/dummyData.ts @@ -418,7 +418,7 @@ export const influxDB2Plugin = { type: 'output', comment: 'write to influxdb v2', config: { - urls: ['http://127.0.0.1:9999'], + urls: ['http://127.0.0.1:8086'], token, organization: 'default', bucket: 'defbuck', @@ -636,7 +636,7 @@ export const setSetupParamsResponse = { status: 201, statusText: 'Created', headers: { - 'access-control-allow-origin': 'http://localhost:9999', + 'access-control-allow-origin': 'http://localhost:8086', date: 'Fri, 11 Jan 2019 22:49:33 GMT', 'access-control-allow-headers': 'Accept, Content-Type, Content-Length, Accept-Encoding, Authorization', @@ -685,7 +685,7 @@ export const scraperTargets = [ orgID: '03636a0aabb51000', organization: 'a', type: 'prometheus', - url: 'http://localhost:9999/metrics', + url: 'http://localhost:8086/metrics', }, { bucket: 'a', @@ -695,7 +695,7 @@ export const scraperTargets = [ orgID: '03636a0aabb51000', organization: 'a', type: 'prometheus', - url: 'http://localhost:9999/metrics', + url: 'http://localhost:8086/metrics', }, ] diff --git a/ui/package.json b/ui/package.json index a4b3352164a..a692105c473 100644 --- a/ui/package.json +++ b/ui/package.json @@ -29,7 +29,7 @@ "test:watch": "jest --watch --verbose false", "test:update": "jest --updateSnapshot", "test:debug": "node --inspect-brk $(npm bin)/jest --runInBand --watch --verbose false", - "test:e2e": "CYPRESS_baseUrl=http://localhost:9999 cypress run --browser chrome --reporter junit --reporter-options 'mochaFile=junit-results/test-output-[hash].xml'", + "test:e2e": "CYPRESS_baseUrl=http://localhost:8086 cypress run --browser chrome --reporter junit --reporter-options 'mochaFile=junit-results/test-output-[hash].xml'", "test:e2e:report": "junit-viewer --results=junit-results --save-file=cypress/site/junit-report.html", "test:e2e:clean": "rm junit-results/*.xml", "test:e2e:all": "yarn test:e2e:clean && yarn test:e2e; yarn test:e2e:report;", @@ -43,7 +43,7 @@ "prettier:fix": "prettier --config .prettierrc.json --write '{src,cypress}/**/*.{ts,tsx}'", "tsc": "tsc -p ./tsconfig.json --noEmit --pretty --skipLibCheck", "tsc:cypress": "tsc -p ./cypress/tsconfig.json --noEmit --pretty --skipLibCheck", - "cy": "CYPRESS_baseUrl=http://localhost:9999 cypress open", + "cy": "CYPRESS_baseUrl=http://localhost:8086 cypress open", "cy:dev": "CYPRESS_baseUrl=http://localhost:8080 cypress open", "generate": "oats ../http/swagger.yml > ./src/client/generatedRoutes.ts" }, diff --git a/ui/src/telegrafs/components/TelegrafOutputOverlay.tsx b/ui/src/telegrafs/components/TelegrafOutputOverlay.tsx index 67194b3f883..94d30c966eb 100644 --- a/ui/src/telegrafs/components/TelegrafOutputOverlay.tsx +++ b/ui/src/telegrafs/components/TelegrafOutputOverlay.tsx @@ -37,7 +37,7 @@ const TELEGRAF_OUTPUT = ` [[outputs.influxdb_v2]] ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:9999 + ## urls exp: http://127.0.0.1:8086 urls = ["<%= server %>"] ## Token for authentication. @@ -51,7 +51,7 @@ const TELEGRAF_OUTPUT = ` [[outputs.influxdb_v2]] ` const OUTPUT_DEFAULTS = { - server: 'http://127.0.0.1:9999', + server: 'http://127.0.0.1:8086', token: '$INFLUX_TOKEN', org: 'orgID', bucket: 'bucketID', diff --git a/ui/src/writeData/components/telegrafPlugins/httpjson.md b/ui/src/writeData/components/telegrafPlugins/httpjson.md index 19fe0144577..7f60f4ca156 100644 --- a/ui/src/writeData/components/telegrafPlugins/httpjson.md +++ b/ui/src/writeData/components/telegrafPlugins/httpjson.md @@ -19,7 +19,7 @@ Deprecated (1.6): use the [http](../http) input. ## URL of each server in the service's cluster servers = [ - "http://localhost:9999/stats/", + "http://localhost:8086/stats/", "http://localhost:9998/stats/", ] ## Set response_timeout (default 5 seconds) @@ -89,7 +89,7 @@ Given the following response body: ``` The following metric is produced: -`httpjson,server=http://localhost:9999/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001` +`httpjson,server=http://localhost:8086/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001` Note that only numerical values are extracted and the type is float. @@ -102,7 +102,7 @@ If `tag_keys` is included in the configuration: Then the `service` tag will also be added: -`httpjson,server=http://localhost:9999/stats/,service=service01 b_d=0.1,a=0.5,b_e=5,response_time=0.001` +`httpjson,server=http://localhost:8086/stats/,service=service01 b_d=0.1,a=0.5,b_e=5,response_time=0.001` **Array Output:** @@ -131,5 +131,5 @@ If the service returns an array of objects, one metric is be created for each ob ] ``` -`httpjson,server=http://localhost:9999/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003` -`httpjson,server=http://localhost:9999/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003` +`httpjson,server=http://localhost:8086/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003` +`httpjson,server=http://localhost:8086/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003` diff --git a/ui/webpack.dev.ts b/ui/webpack.dev.ts index 9bff4c709ba..6e27dd6d219 100644 --- a/ui/webpack.dev.ts +++ b/ui/webpack.dev.ts @@ -27,9 +27,9 @@ module.exports = merge(common, { }, compress: true, proxy: { - '/api/v2': 'http://localhost:9999', - '/debug/flush': 'http://localhost:9999', - '/oauth': 'http://localhost:9999', + '/api/v2': 'http://localhost:8086', + '/debug/flush': 'http://localhost:8086', + '/oauth': 'http://localhost:8086', }, disableHostCheck: true, host: '0.0.0.0', diff --git a/vault/README.md b/vault/README.md index e3996de9082..9063805031b 100644 --- a/vault/README.md +++ b/vault/README.md @@ -37,7 +37,7 @@ Once the vault and influxdb servers have been started and initialized, you may t ```sh curl --request GET \ - --url http://localhost:9999/api/v2/orgs//secrets \ + --url http://localhost:8086/api/v2/orgs//secrets \ --header 'authorization: Token # should return @@ -53,7 +53,7 @@ curl --request GET \ ```sh curl --request PATCH \ - --url http://localhost:9999/api/v2/orgs//secrets \ + --url http://localhost:8086/api/v2/orgs//secrets \ --header 'authorization: Token \ --header 'content-type: application/json' \ --data '{ @@ -66,7 +66,7 @@ curl --request PATCH \ ```sh curl --request GET \ - --url http://localhost:9999/api/v2/orgs//secrets \ + --url http://localhost:8086/api/v2/orgs//secrets \ --header 'authorization: Token # should return