diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml new file mode 100644 index 0000000000000..7dc9f439d0460 --- /dev/null +++ b/.github/workflows/semantic.yml @@ -0,0 +1,15 @@ +--- +name: "Semantic PR and Commit Messages" + +on: + pull_request: + types: [opened, reopened, synchronize, edited] + branches: + - master + +jobs: + semantic: + uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main + with: + CHECK_PR_TITLE_OR_ONE_COMMIT: true + diff --git a/internal/process/process.go b/internal/process/process.go index 3bfc3bb7e44e6..88da25168b63e 100644 --- a/internal/process/process.go +++ b/internal/process/process.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "os" "os/exec" "sync" "sync/atomic" @@ -26,13 +27,14 @@ type Process struct { name string args []string + envs []string pid int32 cancel context.CancelFunc mainLoopWg sync.WaitGroup } // New creates a new process wrapper -func New(command []string) (*Process, error) { +func New(command []string, envs []string) (*Process, error) { if len(command) == 0 { return nil, errors.New("no command") } @@ -41,6 +43,7 @@ func New(command []string) (*Process, error) { RestartDelay: 5 * time.Second, name: command[0], args: []string{}, + envs: envs, } if len(command) > 1 { @@ -85,6 +88,10 @@ func (p *Process) Stop() { func (p *Process) cmdStart() error { p.Cmd = exec.Command(p.name, p.args...) + if len(p.envs) > 0 { + p.Cmd.Env = append(os.Environ(), p.envs...) + } + var err error p.Stdin, err = p.Cmd.StdinPipe() if err != nil { diff --git a/internal/process/process_test.go b/internal/process/process_test.go index e07d6a46ee276..d453c73804e7e 100644 --- a/internal/process/process_test.go +++ b/internal/process/process_test.go @@ -27,7 +27,7 @@ func TestRestartingRebindsPipes(t *testing.T) { exe, err := os.Executable() require.NoError(t, err) - p, err := New([]string{exe, "-external"}) + p, err := New([]string{exe, "-external"}, []string{"INTERNAL_PROCESS_MODE=application"}) p.RestartDelay = 100 * time.Nanosecond p.Log = testutil.Logger{} require.NoError(t, err) @@ -62,7 +62,8 @@ var external = flag.Bool("external", false, func TestMain(m *testing.M) { flag.Parse() - if *external { + runMode := os.Getenv("INTERNAL_PROCESS_MODE") + if *external && runMode == "application" { externalProcess() os.Exit(0) } diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index b8abcbcd177c6..50c1f2306beab 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -201,7 +201,7 @@ func (a *Aerospike) parseNodeInfo(acc telegraf.Accumulator, stats map[string]str if len(parts) < 2 { continue } - key := strings.Replace(parts[0], "-", "_", -1) + key := strings.ReplaceAll(parts[0], "-", "_") nFields[key] = parseAerospikeValue(key, parts[1]) } acc.AddFields("aerospike_node", nFields, nTags, time.Now()) @@ -244,7 +244,7 @@ func (a *Aerospike) parseNamespaceInfo(acc telegraf.Accumulator, stats map[strin if len(parts) < 2 { continue } - key := strings.Replace(parts[0], "-", "_", -1) + key := strings.ReplaceAll(parts[0], "-", "_") nFields[key] = parseAerospikeValue(key, parts[1]) } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) @@ -311,7 +311,7 @@ func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]stri continue } - key := strings.Replace(pieces[0], "-", "_", -1) + key := strings.ReplaceAll(pieces[0], "-", "_") nFields[key] = parseAerospikeValue(key, pieces[1]) } acc.AddFields("aerospike_set", nFields, nTags, time.Now()) @@ -403,7 +403,7 @@ func (a *Aerospike) parseHistogram(acc telegraf.Accumulator, stats map[string]st } } - acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.Replace(histogramType, "-", "_", -1)), nFields, nTags, time.Now()) + acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.ReplaceAll(histogramType, "-", "_")), nFields, nTags, time.Now()) } func splitNamespaceSet(namespaceSet string) (namespace string, set string) { diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go index 20cde2157d20b..9dfd8dd3284a5 100644 --- a/plugins/inputs/aliyuncms/aliyuncms.go +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -473,14 +473,14 @@ func formatField(metricName string, statistic string) string { } func formatMeasurement(project string) string { - project = strings.Replace(project, "/", "_", -1) + project = strings.ReplaceAll(project, "/", "_") project = snakeCase(project) return fmt.Sprintf("aliyuncms_%s", project) } func snakeCase(s string) string { s = internal.SnakeCase(s) - s = strings.Replace(s, "__", "_", -1) + s = strings.ReplaceAll(s, "__", "_") return s } diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go index 41647539a93bf..f613a7017ac83 100644 --- a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go @@ -141,7 +141,7 @@ func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric { setTagIfUsed(tags, "gpu_id", payload.GpuID) setTagIfUsed(tags, "gpu_unique_id", payload.GpuUniqueID) - setIfUsed("int", fields, "driver_version", strings.Replace(system["system"].DriverVersion, ".", "", -1)) + setIfUsed("int", fields, "driver_version", strings.ReplaceAll(system["system"].DriverVersion, ".", "")) setIfUsed("int", fields, "fan_speed", payload.GpuFanSpeedPercentage) setIfUsed("int64", fields, "memory_total", payload.GpuVRAMTotalMemory) setIfUsed("int64", fields, "memory_used", payload.GpuVRAMTotalUsedMemory) diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index 7eaf059974615..d03d22c324b43 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -107,7 +107,7 @@ func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { line := sc.Text() if strings.Contains(line, ":") { parts := strings.SplitN(line, ":", 2) - key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1]) + key, part := strings.ReplaceAll(parts[0], " ", ""), strings.TrimSpace(parts[1]) switch key { case "Scoreboard": diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index db58df6fc94e8..2bf9c75fb0a1e 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -16,7 +16,7 @@ import ( // remap uri to json file, eg: /v3/kafka -> ./testdata/v3_kafka.json func getResponseJSON(requestURI string) ([]byte, int) { uri := strings.TrimLeft(requestURI, "/") - mappedFile := strings.Replace(uri, "/", "_", -1) + mappedFile := strings.ReplaceAll(uri, "/", "_") jsonFile := fmt.Sprintf("./testdata/%s.json", mappedFile) code := 200 diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 6569b133fe56c..99440bdf9ae15 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -299,8 +299,8 @@ func (c *Ceph) execute(command string) (string, error) { // Ceph doesn't sanitize its output, and may return invalid JSON. Patch this // up for them, as having some inaccurate data is better than none. - output = strings.Replace(output, "-inf", "0", -1) - output = strings.Replace(output, "inf", "0", -1) + output = strings.ReplaceAll(output, "-inf", "0") + output = strings.ReplaceAll(output, "inf", "0") return output, nil } diff --git a/plugins/inputs/chrony/chrony.go b/plugins/inputs/chrony/chrony.go index 2a3242ca8ed5d..1fbb4e2881692 100644 --- a/plugins/inputs/chrony/chrony.go +++ b/plugins/inputs/chrony/chrony.go @@ -82,7 +82,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string if len(stats) < 2 { return nil, nil, fmt.Errorf("unexpected output from chronyc, expected ':' in %s", out) } - name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1)) + name := strings.ToLower(strings.ReplaceAll(strings.TrimSpace(stats[0]), " ", "_")) // ignore reference time if strings.Contains(name, "ref_time") { continue diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 0194fcb1b6898..547237bd98d4a 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -136,7 +136,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { // Fill extra tags c.extraTags = make(map[string]map[string]struct{}) for _, tag := range c.EmbeddedTags { - dir := strings.Replace(path.Dir(tag), "-", "_", -1) + dir := strings.ReplaceAll(path.Dir(tag), "-", "_") if _, hasKey := c.extraTags[dir]; !hasKey { c.extraTags[dir] = make(map[string]struct{}) } @@ -441,7 +441,7 @@ func decodeTag(field *telemetry.TelemetryField) string { // Recursively parse tag fields func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemetry.TelemetryField, prefix string) { - localname := strings.Replace(field.Name, "-", "_", -1) + localname := strings.ReplaceAll(field.Name, "-", "_") name := localname if len(localname) == 0 { name = prefix @@ -529,7 +529,7 @@ func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGroup func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, encodingPath string, tags map[string]string, timestamp time.Time) { - name := strings.Replace(field.Name, "-", "_", -1) + name := strings.ReplaceAll(field.Name, "-", "_") if (name == "modTs" || name == "createTs") && decodeValue(field) == "never" { return @@ -540,7 +540,7 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie name = prefix + "/" + name } - extraTags := c.extraTags[strings.Replace(encodingPath, "-", "_", -1)+"/"+name] + extraTags := c.extraTags[strings.ReplaceAll(encodingPath, "-", "_")+"/"+name] if value := decodeValue(field); value != nil { // Do alias lookup, to shorten measurement names @@ -571,7 +571,7 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie if len(extraTags) > 0 { for _, subfield := range field.Fields { if _, isExtraTag := extraTags[subfield.Name]; isExtraTag { - tags[name+"/"+strings.Replace(subfield.Name, "-", "_", -1)] = decodeTag(subfield) + tags[name+"/"+strings.ReplaceAll(subfield.Name, "-", "_")] = decodeTag(subfield) } } } diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 7c0215ebbcac1..b75561cbf0266 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -522,15 +522,15 @@ func New() *CloudWatch { } func sanitizeMeasurement(namespace string) string { - namespace = strings.Replace(namespace, "/", "_", -1) + namespace = strings.ReplaceAll(namespace, "/", "_") namespace = snakeCase(namespace) return "cloudwatch_" + namespace } func snakeCase(s string) string { s = internal.SnakeCase(s) - s = strings.Replace(s, " ", "_", -1) - s = strings.Replace(s, "__", "_", -1) + s = strings.ReplaceAll(s, " ", "_") + s = strings.ReplaceAll(s, "__", "_") return s } diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index 2d34a4c6d10a2..46dc2261ba255 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -105,222 +105,220 @@ func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, fields map return err } - // Use length of any set of metrics, they will all be the same length. - lastEntry := len(extendedBucketStats.Op.Samples.CouchTotalDiskSize) - 1 - cb.addBucketFieldChecked(fields, "couch_total_disk_size", extendedBucketStats.Op.Samples.CouchTotalDiskSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_docs_fragmentation", extendedBucketStats.Op.Samples.CouchDocsFragmentation, lastEntry) - cb.addBucketFieldChecked(fields, "couch_views_fragmentation", extendedBucketStats.Op.Samples.CouchViewsFragmentation, lastEntry) - cb.addBucketFieldChecked(fields, "hit_ratio", extendedBucketStats.Op.Samples.HitRatio, lastEntry) - cb.addBucketFieldChecked(fields, "ep_cache_miss_rate", extendedBucketStats.Op.Samples.EpCacheMissRate, lastEntry) - cb.addBucketFieldChecked(fields, "ep_resident_items_rate", extendedBucketStats.Op.Samples.EpResidentItemsRate, lastEntry) - cb.addBucketFieldChecked(fields, "vb_avg_active_queue_age", extendedBucketStats.Op.Samples.VbAvgActiveQueueAge, lastEntry) - cb.addBucketFieldChecked(fields, "vb_avg_replica_queue_age", extendedBucketStats.Op.Samples.VbAvgReplicaQueueAge, lastEntry) - cb.addBucketFieldChecked(fields, "vb_avg_pending_queue_age", extendedBucketStats.Op.Samples.VbAvgPendingQueueAge, lastEntry) - cb.addBucketFieldChecked(fields, "vb_avg_total_queue_age", extendedBucketStats.Op.Samples.VbAvgTotalQueueAge, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_resident_items_ratio", extendedBucketStats.Op.Samples.VbActiveResidentItemsRatio, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_resident_items_ratio", extendedBucketStats.Op.Samples.VbReplicaResidentItemsRatio, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_resident_items_ratio", extendedBucketStats.Op.Samples.VbPendingResidentItemsRatio, lastEntry) - cb.addBucketFieldChecked(fields, "avg_disk_update_time", extendedBucketStats.Op.Samples.AvgDiskUpdateTime, lastEntry) - cb.addBucketFieldChecked(fields, "avg_disk_commit_time", extendedBucketStats.Op.Samples.AvgDiskCommitTime, lastEntry) - cb.addBucketFieldChecked(fields, "avg_bg_wait_time", extendedBucketStats.Op.Samples.AvgBgWaitTime, lastEntry) - cb.addBucketFieldChecked(fields, "avg_active_timestamp_drift", extendedBucketStats.Op.Samples.AvgActiveTimestampDrift, lastEntry) - cb.addBucketFieldChecked(fields, "avg_replica_timestamp_drift", extendedBucketStats.Op.Samples.AvgReplicaTimestampDrift, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_backoff", extendedBucketStats.Op.Samples.EpDcpViewsIndexesBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "bg_wait_count", extendedBucketStats.Op.Samples.BgWaitCount, lastEntry) - cb.addBucketFieldChecked(fields, "bg_wait_total", extendedBucketStats.Op.Samples.BgWaitTotal, lastEntry) - cb.addBucketFieldChecked(fields, "bytes_read", extendedBucketStats.Op.Samples.BytesRead, lastEntry) - cb.addBucketFieldChecked(fields, "bytes_written", extendedBucketStats.Op.Samples.BytesWritten, lastEntry) - cb.addBucketFieldChecked(fields, "cas_badval", extendedBucketStats.Op.Samples.CasBadval, lastEntry) - cb.addBucketFieldChecked(fields, "cas_hits", extendedBucketStats.Op.Samples.CasHits, lastEntry) - cb.addBucketFieldChecked(fields, "cas_misses", extendedBucketStats.Op.Samples.CasMisses, lastEntry) - cb.addBucketFieldChecked(fields, "cmd_get", extendedBucketStats.Op.Samples.CmdGet, lastEntry) - cb.addBucketFieldChecked(fields, "cmd_lookup", extendedBucketStats.Op.Samples.CmdLookup, lastEntry) - cb.addBucketFieldChecked(fields, "cmd_set", extendedBucketStats.Op.Samples.CmdSet, lastEntry) - cb.addBucketFieldChecked(fields, "couch_docs_actual_disk_size", extendedBucketStats.Op.Samples.CouchDocsActualDiskSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_docs_data_size", extendedBucketStats.Op.Samples.CouchDocsDataSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_docs_disk_size", extendedBucketStats.Op.Samples.CouchDocsDiskSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_spatial_data_size", extendedBucketStats.Op.Samples.CouchSpatialDataSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_spatial_disk_size", extendedBucketStats.Op.Samples.CouchSpatialDiskSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_spatial_ops", extendedBucketStats.Op.Samples.CouchSpatialOps, lastEntry) - cb.addBucketFieldChecked(fields, "couch_views_actual_disk_size", extendedBucketStats.Op.Samples.CouchViewsActualDiskSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_views_data_size", extendedBucketStats.Op.Samples.CouchViewsDataSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_views_disk_size", extendedBucketStats.Op.Samples.CouchViewsDiskSize, lastEntry) - cb.addBucketFieldChecked(fields, "couch_views_ops", extendedBucketStats.Op.Samples.CouchViewsOps, lastEntry) - cb.addBucketFieldChecked(fields, "curr_connections", extendedBucketStats.Op.Samples.CurrConnections, lastEntry) - cb.addBucketFieldChecked(fields, "curr_items", extendedBucketStats.Op.Samples.CurrItems, lastEntry) - cb.addBucketFieldChecked(fields, "curr_items_tot", extendedBucketStats.Op.Samples.CurrItemsTot, lastEntry) - cb.addBucketFieldChecked(fields, "decr_hits", extendedBucketStats.Op.Samples.DecrHits, lastEntry) - cb.addBucketFieldChecked(fields, "decr_misses", extendedBucketStats.Op.Samples.DecrMisses, lastEntry) - cb.addBucketFieldChecked(fields, "delete_hits", extendedBucketStats.Op.Samples.DeleteHits, lastEntry) - cb.addBucketFieldChecked(fields, "delete_misses", extendedBucketStats.Op.Samples.DeleteMisses, lastEntry) - cb.addBucketFieldChecked(fields, "disk_commit_count", extendedBucketStats.Op.Samples.DiskCommitCount, lastEntry) - cb.addBucketFieldChecked(fields, "disk_commit_total", extendedBucketStats.Op.Samples.DiskCommitTotal, lastEntry) - cb.addBucketFieldChecked(fields, "disk_update_count", extendedBucketStats.Op.Samples.DiskUpdateCount, lastEntry) - cb.addBucketFieldChecked(fields, "disk_update_total", extendedBucketStats.Op.Samples.DiskUpdateTotal, lastEntry) - cb.addBucketFieldChecked(fields, "disk_write_queue", extendedBucketStats.Op.Samples.DiskWriteQueue, lastEntry) - cb.addBucketFieldChecked(fields, "ep_active_ahead_exceptions", extendedBucketStats.Op.Samples.EpActiveAheadExceptions, lastEntry) - cb.addBucketFieldChecked(fields, "ep_active_hlc_drift", extendedBucketStats.Op.Samples.EpActiveHlcDrift, lastEntry) - cb.addBucketFieldChecked(fields, "ep_active_hlc_drift_count", extendedBucketStats.Op.Samples.EpActiveHlcDriftCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_bg_fetched", extendedBucketStats.Op.Samples.EpBgFetched, lastEntry) - cb.addBucketFieldChecked(fields, "ep_clock_cas_drift_threshold_exceeded", extendedBucketStats.Op.Samples.EpClockCasDriftThresholdExceeded, lastEntry) - cb.addBucketFieldChecked(fields, "ep_data_read_failed", extendedBucketStats.Op.Samples.EpDataReadFailed, lastEntry) - cb.addBucketFieldChecked(fields, "ep_data_write_failed", extendedBucketStats.Op.Samples.EpDataWriteFailed, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_2i_backoff", extendedBucketStats.Op.Samples.EpDcp2IBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_2i_count", extendedBucketStats.Op.Samples.EpDcp2ICount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_remaining", extendedBucketStats.Op.Samples.EpDcp2IItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_sent", extendedBucketStats.Op.Samples.EpDcp2IItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_2i_producer_count", extendedBucketStats.Op.Samples.EpDcp2IProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_backlog_size", extendedBucketStats.Op.Samples.EpDcp2ITotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_bytes", extendedBucketStats.Op.Samples.EpDcp2ITotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_cbas_backoff", extendedBucketStats.Op.Samples.EpDcpCbasBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_cbas_count", extendedBucketStats.Op.Samples.EpDcpCbasCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_remaining", extendedBucketStats.Op.Samples.EpDcpCbasItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_sent", extendedBucketStats.Op.Samples.EpDcpCbasItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_cbas_producer_count", extendedBucketStats.Op.Samples.EpDcpCbasProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpCbasTotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_bytes", extendedBucketStats.Op.Samples.EpDcpCbasTotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_eventing_backoff", extendedBucketStats.Op.Samples.EpDcpEventingBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_eventing_count", extendedBucketStats.Op.Samples.EpDcpEventingCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_remaining", extendedBucketStats.Op.Samples.EpDcpEventingItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_sent", extendedBucketStats.Op.Samples.EpDcpEventingItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_eventing_producer_count", extendedBucketStats.Op.Samples.EpDcpEventingProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpEventingTotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_bytes", extendedBucketStats.Op.Samples.EpDcpEventingTotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_fts_backoff", extendedBucketStats.Op.Samples.EpDcpFtsBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_fts_count", extendedBucketStats.Op.Samples.EpDcpFtsCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_remaining", extendedBucketStats.Op.Samples.EpDcpFtsItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_sent", extendedBucketStats.Op.Samples.EpDcpFtsItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_fts_producer_count", extendedBucketStats.Op.Samples.EpDcpFtsProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpFtsTotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_bytes", extendedBucketStats.Op.Samples.EpDcpFtsTotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_other_backoff", extendedBucketStats.Op.Samples.EpDcpOtherBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_other_count", extendedBucketStats.Op.Samples.EpDcpOtherCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_other_items_remaining", extendedBucketStats.Op.Samples.EpDcpOtherItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_other_items_sent", extendedBucketStats.Op.Samples.EpDcpOtherItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_other_producer_count", extendedBucketStats.Op.Samples.EpDcpOtherProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_other_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpOtherTotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_other_total_bytes", extendedBucketStats.Op.Samples.EpDcpOtherTotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_replica_backoff", extendedBucketStats.Op.Samples.EpDcpReplicaBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_replica_count", extendedBucketStats.Op.Samples.EpDcpReplicaCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_remaining", extendedBucketStats.Op.Samples.EpDcpReplicaItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_sent", extendedBucketStats.Op.Samples.EpDcpReplicaItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_replica_producer_count", extendedBucketStats.Op.Samples.EpDcpReplicaProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_bytes", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views_backoff", extendedBucketStats.Op.Samples.EpDcpViewsBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views_count", extendedBucketStats.Op.Samples.EpDcpViewsCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsTotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_views_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsTotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_backoff", extendedBucketStats.Op.Samples.EpDcpXdcrBackoff, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_count", extendedBucketStats.Op.Samples.EpDcpXdcrCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_remaining", extendedBucketStats.Op.Samples.EpDcpXdcrItemsRemaining, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_sent", extendedBucketStats.Op.Samples.EpDcpXdcrItemsSent, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_producer_count", extendedBucketStats.Op.Samples.EpDcpXdcrProducerCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBacklogSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_bytes", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBytes, lastEntry) - cb.addBucketFieldChecked(fields, "ep_diskqueue_drain", extendedBucketStats.Op.Samples.EpDiskqueueDrain, lastEntry) - cb.addBucketFieldChecked(fields, "ep_diskqueue_fill", extendedBucketStats.Op.Samples.EpDiskqueueFill, lastEntry) - cb.addBucketFieldChecked(fields, "ep_diskqueue_items", extendedBucketStats.Op.Samples.EpDiskqueueItems, lastEntry) - cb.addBucketFieldChecked(fields, "ep_flusher_todo", extendedBucketStats.Op.Samples.EpFlusherTodo, lastEntry) - cb.addBucketFieldChecked(fields, "ep_item_commit_failed", extendedBucketStats.Op.Samples.EpItemCommitFailed, lastEntry) - cb.addBucketFieldChecked(fields, "ep_kv_size", extendedBucketStats.Op.Samples.EpKvSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_max_size", extendedBucketStats.Op.Samples.EpMaxSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_mem_high_wat", extendedBucketStats.Op.Samples.EpMemHighWat, lastEntry) - cb.addBucketFieldChecked(fields, "ep_mem_low_wat", extendedBucketStats.Op.Samples.EpMemLowWat, lastEntry) - cb.addBucketFieldChecked(fields, "ep_meta_data_memory", extendedBucketStats.Op.Samples.EpMetaDataMemory, lastEntry) - cb.addBucketFieldChecked(fields, "ep_num_non_resident", extendedBucketStats.Op.Samples.EpNumNonResident, lastEntry) - cb.addBucketFieldChecked(fields, "ep_num_ops_del_meta", extendedBucketStats.Op.Samples.EpNumOpsDelMeta, lastEntry) - cb.addBucketFieldChecked(fields, "ep_num_ops_del_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsDelRetMeta, lastEntry) - cb.addBucketFieldChecked(fields, "ep_num_ops_get_meta", extendedBucketStats.Op.Samples.EpNumOpsGetMeta, lastEntry) - cb.addBucketFieldChecked(fields, "ep_num_ops_set_meta", extendedBucketStats.Op.Samples.EpNumOpsSetMeta, lastEntry) - cb.addBucketFieldChecked(fields, "ep_num_ops_set_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsSetRetMeta, lastEntry) - cb.addBucketFieldChecked(fields, "ep_num_value_ejects", extendedBucketStats.Op.Samples.EpNumValueEjects, lastEntry) - cb.addBucketFieldChecked(fields, "ep_oom_errors", extendedBucketStats.Op.Samples.EpOomErrors, lastEntry) - cb.addBucketFieldChecked(fields, "ep_ops_create", extendedBucketStats.Op.Samples.EpOpsCreate, lastEntry) - cb.addBucketFieldChecked(fields, "ep_ops_update", extendedBucketStats.Op.Samples.EpOpsUpdate, lastEntry) - cb.addBucketFieldChecked(fields, "ep_overhead", extendedBucketStats.Op.Samples.EpOverhead, lastEntry) - cb.addBucketFieldChecked(fields, "ep_queue_size", extendedBucketStats.Op.Samples.EpQueueSize, lastEntry) - cb.addBucketFieldChecked(fields, "ep_replica_ahead_exceptions", extendedBucketStats.Op.Samples.EpReplicaAheadExceptions, lastEntry) - cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift", extendedBucketStats.Op.Samples.EpReplicaHlcDrift, lastEntry) - cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift_count", extendedBucketStats.Op.Samples.EpReplicaHlcDriftCount, lastEntry) - cb.addBucketFieldChecked(fields, "ep_tmp_oom_errors", extendedBucketStats.Op.Samples.EpTmpOomErrors, lastEntry) - cb.addBucketFieldChecked(fields, "ep_vb_total", extendedBucketStats.Op.Samples.EpVbTotal, lastEntry) - cb.addBucketFieldChecked(fields, "evictions", extendedBucketStats.Op.Samples.Evictions, lastEntry) - cb.addBucketFieldChecked(fields, "get_hits", extendedBucketStats.Op.Samples.GetHits, lastEntry) - cb.addBucketFieldChecked(fields, "get_misses", extendedBucketStats.Op.Samples.GetMisses, lastEntry) - cb.addBucketFieldChecked(fields, "incr_hits", extendedBucketStats.Op.Samples.IncrHits, lastEntry) - cb.addBucketFieldChecked(fields, "incr_misses", extendedBucketStats.Op.Samples.IncrMisses, lastEntry) - cb.addBucketFieldChecked(fields, "misses", extendedBucketStats.Op.Samples.Misses, lastEntry) - cb.addBucketFieldChecked(fields, "ops", extendedBucketStats.Op.Samples.Ops, lastEntry) - cb.addBucketFieldChecked(fields, "timestamp", extendedBucketStats.Op.Samples.Timestamp, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_eject", extendedBucketStats.Op.Samples.VbActiveEject, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_itm_memory", extendedBucketStats.Op.Samples.VbActiveItmMemory, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_meta_data_memory", extendedBucketStats.Op.Samples.VbActiveMetaDataMemory, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_num", extendedBucketStats.Op.Samples.VbActiveNum, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_num_non_resident", extendedBucketStats.Op.Samples.VbActiveNumNonResident, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_ops_create", extendedBucketStats.Op.Samples.VbActiveOpsCreate, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_ops_update", extendedBucketStats.Op.Samples.VbActiveOpsUpdate, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_queue_age", extendedBucketStats.Op.Samples.VbActiveQueueAge, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_queue_drain", extendedBucketStats.Op.Samples.VbActiveQueueDrain, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_queue_fill", extendedBucketStats.Op.Samples.VbActiveQueueFill, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_queue_size", extendedBucketStats.Op.Samples.VbActiveQueueSize, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_sync_write_aborted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAbortedCount, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_sync_write_accepted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAcceptedCount, lastEntry) - cb.addBucketFieldChecked(fields, "vb_active_sync_write_committed_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteCommittedCount, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_curr_items", extendedBucketStats.Op.Samples.VbPendingCurrItems, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_eject", extendedBucketStats.Op.Samples.VbPendingEject, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_itm_memory", extendedBucketStats.Op.Samples.VbPendingItmMemory, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_meta_data_memory", extendedBucketStats.Op.Samples.VbPendingMetaDataMemory, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_num", extendedBucketStats.Op.Samples.VbPendingNum, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_num_non_resident", extendedBucketStats.Op.Samples.VbPendingNumNonResident, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_ops_create", extendedBucketStats.Op.Samples.VbPendingOpsCreate, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_ops_update", extendedBucketStats.Op.Samples.VbPendingOpsUpdate, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_queue_age", extendedBucketStats.Op.Samples.VbPendingQueueAge, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_queue_drain", extendedBucketStats.Op.Samples.VbPendingQueueDrain, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_queue_fill", extendedBucketStats.Op.Samples.VbPendingQueueFill, lastEntry) - cb.addBucketFieldChecked(fields, "vb_pending_queue_size", extendedBucketStats.Op.Samples.VbPendingQueueSize, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_curr_items", extendedBucketStats.Op.Samples.VbReplicaCurrItems, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_eject", extendedBucketStats.Op.Samples.VbReplicaEject, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_itm_memory", extendedBucketStats.Op.Samples.VbReplicaItmMemory, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_meta_data_memory", extendedBucketStats.Op.Samples.VbReplicaMetaDataMemory, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_num", extendedBucketStats.Op.Samples.VbReplicaNum, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_num_non_resident", extendedBucketStats.Op.Samples.VbReplicaNumNonResident, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_ops_create", extendedBucketStats.Op.Samples.VbReplicaOpsCreate, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_ops_update", extendedBucketStats.Op.Samples.VbReplicaOpsUpdate, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_queue_age", extendedBucketStats.Op.Samples.VbReplicaQueueAge, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_queue_drain", extendedBucketStats.Op.Samples.VbReplicaQueueDrain, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_queue_fill", extendedBucketStats.Op.Samples.VbReplicaQueueFill, lastEntry) - cb.addBucketFieldChecked(fields, "vb_replica_queue_size", extendedBucketStats.Op.Samples.VbReplicaQueueSize, lastEntry) - cb.addBucketFieldChecked(fields, "vb_total_queue_age", extendedBucketStats.Op.Samples.VbTotalQueueAge, lastEntry) - cb.addBucketFieldChecked(fields, "xdc_ops", extendedBucketStats.Op.Samples.XdcOps, lastEntry) - cb.addBucketFieldChecked(fields, "allocstall", extendedBucketStats.Op.Samples.Allocstall, lastEntry) - cb.addBucketFieldChecked(fields, "cpu_cores_available", extendedBucketStats.Op.Samples.CPUCoresAvailable, lastEntry) - cb.addBucketFieldChecked(fields, "cpu_irq_rate", extendedBucketStats.Op.Samples.CPUIrqRate, lastEntry) - cb.addBucketFieldChecked(fields, "cpu_stolen_rate", extendedBucketStats.Op.Samples.CPUStolenRate, lastEntry) - cb.addBucketFieldChecked(fields, "cpu_sys_rate", extendedBucketStats.Op.Samples.CPUSysRate, lastEntry) - cb.addBucketFieldChecked(fields, "cpu_user_rate", extendedBucketStats.Op.Samples.CPUUserRate, lastEntry) - cb.addBucketFieldChecked(fields, "cpu_utilization_rate", extendedBucketStats.Op.Samples.CPUUtilizationRate, lastEntry) - cb.addBucketFieldChecked(fields, "hibernated_requests", extendedBucketStats.Op.Samples.HibernatedRequests, lastEntry) - cb.addBucketFieldChecked(fields, "hibernated_waked", extendedBucketStats.Op.Samples.HibernatedWaked, lastEntry) - cb.addBucketFieldChecked(fields, "mem_actual_free", extendedBucketStats.Op.Samples.MemActualFree, lastEntry) - cb.addBucketFieldChecked(fields, "mem_actual_used", extendedBucketStats.Op.Samples.MemActualUsed, lastEntry) - cb.addBucketFieldChecked(fields, "mem_free", extendedBucketStats.Op.Samples.MemFree, lastEntry) - cb.addBucketFieldChecked(fields, "mem_limit", extendedBucketStats.Op.Samples.MemLimit, lastEntry) - cb.addBucketFieldChecked(fields, "mem_total", extendedBucketStats.Op.Samples.MemTotal, lastEntry) - cb.addBucketFieldChecked(fields, "mem_used_sys", extendedBucketStats.Op.Samples.MemUsedSys, lastEntry) - cb.addBucketFieldChecked(fields, "odp_report_failed", extendedBucketStats.Op.Samples.OdpReportFailed, lastEntry) - cb.addBucketFieldChecked(fields, "rest_requests", extendedBucketStats.Op.Samples.RestRequests, lastEntry) - cb.addBucketFieldChecked(fields, "swap_total", extendedBucketStats.Op.Samples.SwapTotal, lastEntry) - cb.addBucketFieldChecked(fields, "swap_used", extendedBucketStats.Op.Samples.SwapUsed, lastEntry) + cb.addBucketFieldChecked(fields, "couch_total_disk_size", extendedBucketStats.Op.Samples.CouchTotalDiskSize) + cb.addBucketFieldChecked(fields, "couch_docs_fragmentation", extendedBucketStats.Op.Samples.CouchDocsFragmentation) + cb.addBucketFieldChecked(fields, "couch_views_fragmentation", extendedBucketStats.Op.Samples.CouchViewsFragmentation) + cb.addBucketFieldChecked(fields, "hit_ratio", extendedBucketStats.Op.Samples.HitRatio) + cb.addBucketFieldChecked(fields, "ep_cache_miss_rate", extendedBucketStats.Op.Samples.EpCacheMissRate) + cb.addBucketFieldChecked(fields, "ep_resident_items_rate", extendedBucketStats.Op.Samples.EpResidentItemsRate) + cb.addBucketFieldChecked(fields, "vb_avg_active_queue_age", extendedBucketStats.Op.Samples.VbAvgActiveQueueAge) + cb.addBucketFieldChecked(fields, "vb_avg_replica_queue_age", extendedBucketStats.Op.Samples.VbAvgReplicaQueueAge) + cb.addBucketFieldChecked(fields, "vb_avg_pending_queue_age", extendedBucketStats.Op.Samples.VbAvgPendingQueueAge) + cb.addBucketFieldChecked(fields, "vb_avg_total_queue_age", extendedBucketStats.Op.Samples.VbAvgTotalQueueAge) + cb.addBucketFieldChecked(fields, "vb_active_resident_items_ratio", extendedBucketStats.Op.Samples.VbActiveResidentItemsRatio) + cb.addBucketFieldChecked(fields, "vb_replica_resident_items_ratio", extendedBucketStats.Op.Samples.VbReplicaResidentItemsRatio) + cb.addBucketFieldChecked(fields, "vb_pending_resident_items_ratio", extendedBucketStats.Op.Samples.VbPendingResidentItemsRatio) + cb.addBucketFieldChecked(fields, "avg_disk_update_time", extendedBucketStats.Op.Samples.AvgDiskUpdateTime) + cb.addBucketFieldChecked(fields, "avg_disk_commit_time", extendedBucketStats.Op.Samples.AvgDiskCommitTime) + cb.addBucketFieldChecked(fields, "avg_bg_wait_time", extendedBucketStats.Op.Samples.AvgBgWaitTime) + cb.addBucketFieldChecked(fields, "avg_active_timestamp_drift", extendedBucketStats.Op.Samples.AvgActiveTimestampDrift) + cb.addBucketFieldChecked(fields, "avg_replica_timestamp_drift", extendedBucketStats.Op.Samples.AvgReplicaTimestampDrift) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesCount) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_backoff", extendedBucketStats.Op.Samples.EpDcpViewsIndexesBackoff) + cb.addBucketFieldChecked(fields, "bg_wait_count", extendedBucketStats.Op.Samples.BgWaitCount) + cb.addBucketFieldChecked(fields, "bg_wait_total", extendedBucketStats.Op.Samples.BgWaitTotal) + cb.addBucketFieldChecked(fields, "bytes_read", extendedBucketStats.Op.Samples.BytesRead) + cb.addBucketFieldChecked(fields, "bytes_written", extendedBucketStats.Op.Samples.BytesWritten) + cb.addBucketFieldChecked(fields, "cas_badval", extendedBucketStats.Op.Samples.CasBadval) + cb.addBucketFieldChecked(fields, "cas_hits", extendedBucketStats.Op.Samples.CasHits) + cb.addBucketFieldChecked(fields, "cas_misses", extendedBucketStats.Op.Samples.CasMisses) + cb.addBucketFieldChecked(fields, "cmd_get", extendedBucketStats.Op.Samples.CmdGet) + cb.addBucketFieldChecked(fields, "cmd_lookup", extendedBucketStats.Op.Samples.CmdLookup) + cb.addBucketFieldChecked(fields, "cmd_set", extendedBucketStats.Op.Samples.CmdSet) + cb.addBucketFieldChecked(fields, "couch_docs_actual_disk_size", extendedBucketStats.Op.Samples.CouchDocsActualDiskSize) + cb.addBucketFieldChecked(fields, "couch_docs_data_size", extendedBucketStats.Op.Samples.CouchDocsDataSize) + cb.addBucketFieldChecked(fields, "couch_docs_disk_size", extendedBucketStats.Op.Samples.CouchDocsDiskSize) + cb.addBucketFieldChecked(fields, "couch_spatial_data_size", extendedBucketStats.Op.Samples.CouchSpatialDataSize) + cb.addBucketFieldChecked(fields, "couch_spatial_disk_size", extendedBucketStats.Op.Samples.CouchSpatialDiskSize) + cb.addBucketFieldChecked(fields, "couch_spatial_ops", extendedBucketStats.Op.Samples.CouchSpatialOps) + cb.addBucketFieldChecked(fields, "couch_views_actual_disk_size", extendedBucketStats.Op.Samples.CouchViewsActualDiskSize) + cb.addBucketFieldChecked(fields, "couch_views_data_size", extendedBucketStats.Op.Samples.CouchViewsDataSize) + cb.addBucketFieldChecked(fields, "couch_views_disk_size", extendedBucketStats.Op.Samples.CouchViewsDiskSize) + cb.addBucketFieldChecked(fields, "couch_views_ops", extendedBucketStats.Op.Samples.CouchViewsOps) + cb.addBucketFieldChecked(fields, "curr_connections", extendedBucketStats.Op.Samples.CurrConnections) + cb.addBucketFieldChecked(fields, "curr_items", extendedBucketStats.Op.Samples.CurrItems) + cb.addBucketFieldChecked(fields, "curr_items_tot", extendedBucketStats.Op.Samples.CurrItemsTot) + cb.addBucketFieldChecked(fields, "decr_hits", extendedBucketStats.Op.Samples.DecrHits) + cb.addBucketFieldChecked(fields, "decr_misses", extendedBucketStats.Op.Samples.DecrMisses) + cb.addBucketFieldChecked(fields, "delete_hits", extendedBucketStats.Op.Samples.DeleteHits) + cb.addBucketFieldChecked(fields, "delete_misses", extendedBucketStats.Op.Samples.DeleteMisses) + cb.addBucketFieldChecked(fields, "disk_commit_count", extendedBucketStats.Op.Samples.DiskCommitCount) + cb.addBucketFieldChecked(fields, "disk_commit_total", extendedBucketStats.Op.Samples.DiskCommitTotal) + cb.addBucketFieldChecked(fields, "disk_update_count", extendedBucketStats.Op.Samples.DiskUpdateCount) + cb.addBucketFieldChecked(fields, "disk_update_total", extendedBucketStats.Op.Samples.DiskUpdateTotal) + cb.addBucketFieldChecked(fields, "disk_write_queue", extendedBucketStats.Op.Samples.DiskWriteQueue) + cb.addBucketFieldChecked(fields, "ep_active_ahead_exceptions", extendedBucketStats.Op.Samples.EpActiveAheadExceptions) + cb.addBucketFieldChecked(fields, "ep_active_hlc_drift", extendedBucketStats.Op.Samples.EpActiveHlcDrift) + cb.addBucketFieldChecked(fields, "ep_active_hlc_drift_count", extendedBucketStats.Op.Samples.EpActiveHlcDriftCount) + cb.addBucketFieldChecked(fields, "ep_bg_fetched", extendedBucketStats.Op.Samples.EpBgFetched) + cb.addBucketFieldChecked(fields, "ep_clock_cas_drift_threshold_exceeded", extendedBucketStats.Op.Samples.EpClockCasDriftThresholdExceeded) + cb.addBucketFieldChecked(fields, "ep_data_read_failed", extendedBucketStats.Op.Samples.EpDataReadFailed) + cb.addBucketFieldChecked(fields, "ep_data_write_failed", extendedBucketStats.Op.Samples.EpDataWriteFailed) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_backoff", extendedBucketStats.Op.Samples.EpDcp2IBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_count", extendedBucketStats.Op.Samples.EpDcp2ICount) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_remaining", extendedBucketStats.Op.Samples.EpDcp2IItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_sent", extendedBucketStats.Op.Samples.EpDcp2IItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_producer_count", extendedBucketStats.Op.Samples.EpDcp2IProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_backlog_size", extendedBucketStats.Op.Samples.EpDcp2ITotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_bytes", extendedBucketStats.Op.Samples.EpDcp2ITotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_backoff", extendedBucketStats.Op.Samples.EpDcpCbasBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_count", extendedBucketStats.Op.Samples.EpDcpCbasCount) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_remaining", extendedBucketStats.Op.Samples.EpDcpCbasItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_sent", extendedBucketStats.Op.Samples.EpDcpCbasItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_producer_count", extendedBucketStats.Op.Samples.EpDcpCbasProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpCbasTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_bytes", extendedBucketStats.Op.Samples.EpDcpCbasTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_backoff", extendedBucketStats.Op.Samples.EpDcpEventingBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_count", extendedBucketStats.Op.Samples.EpDcpEventingCount) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_remaining", extendedBucketStats.Op.Samples.EpDcpEventingItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_sent", extendedBucketStats.Op.Samples.EpDcpEventingItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_producer_count", extendedBucketStats.Op.Samples.EpDcpEventingProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpEventingTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_bytes", extendedBucketStats.Op.Samples.EpDcpEventingTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_backoff", extendedBucketStats.Op.Samples.EpDcpFtsBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_count", extendedBucketStats.Op.Samples.EpDcpFtsCount) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_remaining", extendedBucketStats.Op.Samples.EpDcpFtsItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_sent", extendedBucketStats.Op.Samples.EpDcpFtsItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_producer_count", extendedBucketStats.Op.Samples.EpDcpFtsProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpFtsTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_bytes", extendedBucketStats.Op.Samples.EpDcpFtsTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_other_backoff", extendedBucketStats.Op.Samples.EpDcpOtherBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_other_count", extendedBucketStats.Op.Samples.EpDcpOtherCount) + cb.addBucketFieldChecked(fields, "ep_dcp_other_items_remaining", extendedBucketStats.Op.Samples.EpDcpOtherItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_other_items_sent", extendedBucketStats.Op.Samples.EpDcpOtherItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_other_producer_count", extendedBucketStats.Op.Samples.EpDcpOtherProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_other_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpOtherTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_other_total_bytes", extendedBucketStats.Op.Samples.EpDcpOtherTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_backoff", extendedBucketStats.Op.Samples.EpDcpReplicaBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_count", extendedBucketStats.Op.Samples.EpDcpReplicaCount) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_remaining", extendedBucketStats.Op.Samples.EpDcpReplicaItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_sent", extendedBucketStats.Op.Samples.EpDcpReplicaItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_producer_count", extendedBucketStats.Op.Samples.EpDcpReplicaProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_bytes", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_views_backoff", extendedBucketStats.Op.Samples.EpDcpViewsBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_views_count", extendedBucketStats.Op.Samples.EpDcpViewsCount) + cb.addBucketFieldChecked(fields, "ep_dcp_views_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_views_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_views_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_views_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_views_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_backoff", extendedBucketStats.Op.Samples.EpDcpXdcrBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_count", extendedBucketStats.Op.Samples.EpDcpXdcrCount) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_remaining", extendedBucketStats.Op.Samples.EpDcpXdcrItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_sent", extendedBucketStats.Op.Samples.EpDcpXdcrItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_producer_count", extendedBucketStats.Op.Samples.EpDcpXdcrProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_bytes", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBytes) + cb.addBucketFieldChecked(fields, "ep_diskqueue_drain", extendedBucketStats.Op.Samples.EpDiskqueueDrain) + cb.addBucketFieldChecked(fields, "ep_diskqueue_fill", extendedBucketStats.Op.Samples.EpDiskqueueFill) + cb.addBucketFieldChecked(fields, "ep_diskqueue_items", extendedBucketStats.Op.Samples.EpDiskqueueItems) + cb.addBucketFieldChecked(fields, "ep_flusher_todo", extendedBucketStats.Op.Samples.EpFlusherTodo) + cb.addBucketFieldChecked(fields, "ep_item_commit_failed", extendedBucketStats.Op.Samples.EpItemCommitFailed) + cb.addBucketFieldChecked(fields, "ep_kv_size", extendedBucketStats.Op.Samples.EpKvSize) + cb.addBucketFieldChecked(fields, "ep_max_size", extendedBucketStats.Op.Samples.EpMaxSize) + cb.addBucketFieldChecked(fields, "ep_mem_high_wat", extendedBucketStats.Op.Samples.EpMemHighWat) + cb.addBucketFieldChecked(fields, "ep_mem_low_wat", extendedBucketStats.Op.Samples.EpMemLowWat) + cb.addBucketFieldChecked(fields, "ep_meta_data_memory", extendedBucketStats.Op.Samples.EpMetaDataMemory) + cb.addBucketFieldChecked(fields, "ep_num_non_resident", extendedBucketStats.Op.Samples.EpNumNonResident) + cb.addBucketFieldChecked(fields, "ep_num_ops_del_meta", extendedBucketStats.Op.Samples.EpNumOpsDelMeta) + cb.addBucketFieldChecked(fields, "ep_num_ops_del_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsDelRetMeta) + cb.addBucketFieldChecked(fields, "ep_num_ops_get_meta", extendedBucketStats.Op.Samples.EpNumOpsGetMeta) + cb.addBucketFieldChecked(fields, "ep_num_ops_set_meta", extendedBucketStats.Op.Samples.EpNumOpsSetMeta) + cb.addBucketFieldChecked(fields, "ep_num_ops_set_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsSetRetMeta) + cb.addBucketFieldChecked(fields, "ep_num_value_ejects", extendedBucketStats.Op.Samples.EpNumValueEjects) + cb.addBucketFieldChecked(fields, "ep_oom_errors", extendedBucketStats.Op.Samples.EpOomErrors) + cb.addBucketFieldChecked(fields, "ep_ops_create", extendedBucketStats.Op.Samples.EpOpsCreate) + cb.addBucketFieldChecked(fields, "ep_ops_update", extendedBucketStats.Op.Samples.EpOpsUpdate) + cb.addBucketFieldChecked(fields, "ep_overhead", extendedBucketStats.Op.Samples.EpOverhead) + cb.addBucketFieldChecked(fields, "ep_queue_size", extendedBucketStats.Op.Samples.EpQueueSize) + cb.addBucketFieldChecked(fields, "ep_replica_ahead_exceptions", extendedBucketStats.Op.Samples.EpReplicaAheadExceptions) + cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift", extendedBucketStats.Op.Samples.EpReplicaHlcDrift) + cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift_count", extendedBucketStats.Op.Samples.EpReplicaHlcDriftCount) + cb.addBucketFieldChecked(fields, "ep_tmp_oom_errors", extendedBucketStats.Op.Samples.EpTmpOomErrors) + cb.addBucketFieldChecked(fields, "ep_vb_total", extendedBucketStats.Op.Samples.EpVbTotal) + cb.addBucketFieldChecked(fields, "evictions", extendedBucketStats.Op.Samples.Evictions) + cb.addBucketFieldChecked(fields, "get_hits", extendedBucketStats.Op.Samples.GetHits) + cb.addBucketFieldChecked(fields, "get_misses", extendedBucketStats.Op.Samples.GetMisses) + cb.addBucketFieldChecked(fields, "incr_hits", extendedBucketStats.Op.Samples.IncrHits) + cb.addBucketFieldChecked(fields, "incr_misses", extendedBucketStats.Op.Samples.IncrMisses) + cb.addBucketFieldChecked(fields, "misses", extendedBucketStats.Op.Samples.Misses) + cb.addBucketFieldChecked(fields, "ops", extendedBucketStats.Op.Samples.Ops) + cb.addBucketFieldChecked(fields, "timestamp", extendedBucketStats.Op.Samples.Timestamp) + cb.addBucketFieldChecked(fields, "vb_active_eject", extendedBucketStats.Op.Samples.VbActiveEject) + cb.addBucketFieldChecked(fields, "vb_active_itm_memory", extendedBucketStats.Op.Samples.VbActiveItmMemory) + cb.addBucketFieldChecked(fields, "vb_active_meta_data_memory", extendedBucketStats.Op.Samples.VbActiveMetaDataMemory) + cb.addBucketFieldChecked(fields, "vb_active_num", extendedBucketStats.Op.Samples.VbActiveNum) + cb.addBucketFieldChecked(fields, "vb_active_num_non_resident", extendedBucketStats.Op.Samples.VbActiveNumNonResident) + cb.addBucketFieldChecked(fields, "vb_active_ops_create", extendedBucketStats.Op.Samples.VbActiveOpsCreate) + cb.addBucketFieldChecked(fields, "vb_active_ops_update", extendedBucketStats.Op.Samples.VbActiveOpsUpdate) + cb.addBucketFieldChecked(fields, "vb_active_queue_age", extendedBucketStats.Op.Samples.VbActiveQueueAge) + cb.addBucketFieldChecked(fields, "vb_active_queue_drain", extendedBucketStats.Op.Samples.VbActiveQueueDrain) + cb.addBucketFieldChecked(fields, "vb_active_queue_fill", extendedBucketStats.Op.Samples.VbActiveQueueFill) + cb.addBucketFieldChecked(fields, "vb_active_queue_size", extendedBucketStats.Op.Samples.VbActiveQueueSize) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_aborted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAbortedCount) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_accepted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAcceptedCount) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_committed_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteCommittedCount) + cb.addBucketFieldChecked(fields, "vb_pending_curr_items", extendedBucketStats.Op.Samples.VbPendingCurrItems) + cb.addBucketFieldChecked(fields, "vb_pending_eject", extendedBucketStats.Op.Samples.VbPendingEject) + cb.addBucketFieldChecked(fields, "vb_pending_itm_memory", extendedBucketStats.Op.Samples.VbPendingItmMemory) + cb.addBucketFieldChecked(fields, "vb_pending_meta_data_memory", extendedBucketStats.Op.Samples.VbPendingMetaDataMemory) + cb.addBucketFieldChecked(fields, "vb_pending_num", extendedBucketStats.Op.Samples.VbPendingNum) + cb.addBucketFieldChecked(fields, "vb_pending_num_non_resident", extendedBucketStats.Op.Samples.VbPendingNumNonResident) + cb.addBucketFieldChecked(fields, "vb_pending_ops_create", extendedBucketStats.Op.Samples.VbPendingOpsCreate) + cb.addBucketFieldChecked(fields, "vb_pending_ops_update", extendedBucketStats.Op.Samples.VbPendingOpsUpdate) + cb.addBucketFieldChecked(fields, "vb_pending_queue_age", extendedBucketStats.Op.Samples.VbPendingQueueAge) + cb.addBucketFieldChecked(fields, "vb_pending_queue_drain", extendedBucketStats.Op.Samples.VbPendingQueueDrain) + cb.addBucketFieldChecked(fields, "vb_pending_queue_fill", extendedBucketStats.Op.Samples.VbPendingQueueFill) + cb.addBucketFieldChecked(fields, "vb_pending_queue_size", extendedBucketStats.Op.Samples.VbPendingQueueSize) + cb.addBucketFieldChecked(fields, "vb_replica_curr_items", extendedBucketStats.Op.Samples.VbReplicaCurrItems) + cb.addBucketFieldChecked(fields, "vb_replica_eject", extendedBucketStats.Op.Samples.VbReplicaEject) + cb.addBucketFieldChecked(fields, "vb_replica_itm_memory", extendedBucketStats.Op.Samples.VbReplicaItmMemory) + cb.addBucketFieldChecked(fields, "vb_replica_meta_data_memory", extendedBucketStats.Op.Samples.VbReplicaMetaDataMemory) + cb.addBucketFieldChecked(fields, "vb_replica_num", extendedBucketStats.Op.Samples.VbReplicaNum) + cb.addBucketFieldChecked(fields, "vb_replica_num_non_resident", extendedBucketStats.Op.Samples.VbReplicaNumNonResident) + cb.addBucketFieldChecked(fields, "vb_replica_ops_create", extendedBucketStats.Op.Samples.VbReplicaOpsCreate) + cb.addBucketFieldChecked(fields, "vb_replica_ops_update", extendedBucketStats.Op.Samples.VbReplicaOpsUpdate) + cb.addBucketFieldChecked(fields, "vb_replica_queue_age", extendedBucketStats.Op.Samples.VbReplicaQueueAge) + cb.addBucketFieldChecked(fields, "vb_replica_queue_drain", extendedBucketStats.Op.Samples.VbReplicaQueueDrain) + cb.addBucketFieldChecked(fields, "vb_replica_queue_fill", extendedBucketStats.Op.Samples.VbReplicaQueueFill) + cb.addBucketFieldChecked(fields, "vb_replica_queue_size", extendedBucketStats.Op.Samples.VbReplicaQueueSize) + cb.addBucketFieldChecked(fields, "vb_total_queue_age", extendedBucketStats.Op.Samples.VbTotalQueueAge) + cb.addBucketFieldChecked(fields, "xdc_ops", extendedBucketStats.Op.Samples.XdcOps) + cb.addBucketFieldChecked(fields, "allocstall", extendedBucketStats.Op.Samples.Allocstall) + cb.addBucketFieldChecked(fields, "cpu_cores_available", extendedBucketStats.Op.Samples.CPUCoresAvailable) + cb.addBucketFieldChecked(fields, "cpu_irq_rate", extendedBucketStats.Op.Samples.CPUIrqRate) + cb.addBucketFieldChecked(fields, "cpu_stolen_rate", extendedBucketStats.Op.Samples.CPUStolenRate) + cb.addBucketFieldChecked(fields, "cpu_sys_rate", extendedBucketStats.Op.Samples.CPUSysRate) + cb.addBucketFieldChecked(fields, "cpu_user_rate", extendedBucketStats.Op.Samples.CPUUserRate) + cb.addBucketFieldChecked(fields, "cpu_utilization_rate", extendedBucketStats.Op.Samples.CPUUtilizationRate) + cb.addBucketFieldChecked(fields, "hibernated_requests", extendedBucketStats.Op.Samples.HibernatedRequests) + cb.addBucketFieldChecked(fields, "hibernated_waked", extendedBucketStats.Op.Samples.HibernatedWaked) + cb.addBucketFieldChecked(fields, "mem_actual_free", extendedBucketStats.Op.Samples.MemActualFree) + cb.addBucketFieldChecked(fields, "mem_actual_used", extendedBucketStats.Op.Samples.MemActualUsed) + cb.addBucketFieldChecked(fields, "mem_free", extendedBucketStats.Op.Samples.MemFree) + cb.addBucketFieldChecked(fields, "mem_limit", extendedBucketStats.Op.Samples.MemLimit) + cb.addBucketFieldChecked(fields, "mem_total", extendedBucketStats.Op.Samples.MemTotal) + cb.addBucketFieldChecked(fields, "mem_used_sys", extendedBucketStats.Op.Samples.MemUsedSys) + cb.addBucketFieldChecked(fields, "odp_report_failed", extendedBucketStats.Op.Samples.OdpReportFailed) + cb.addBucketFieldChecked(fields, "rest_requests", extendedBucketStats.Op.Samples.RestRequests) + cb.addBucketFieldChecked(fields, "swap_total", extendedBucketStats.Op.Samples.SwapTotal) + cb.addBucketFieldChecked(fields, "swap_used", extendedBucketStats.Op.Samples.SwapUsed) return nil } @@ -333,12 +331,12 @@ func (cb *Couchbase) addBucketField(fields map[string]interface{}, fieldKey stri fields[fieldKey] = value } -func (cb *Couchbase) addBucketFieldChecked(fields map[string]interface{}, fieldKey string, values []float64, index int) { +func (cb *Couchbase) addBucketFieldChecked(fields map[string]interface{}, fieldKey string, values []float64) { if values == nil { return } - cb.addBucketField(fields, fieldKey, values[index]) + cb.addBucketField(fields, fieldKey, values[len(values)-1]) } func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, bucketStats *BucketStats) error { diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 05e1e8f097600..f3a6a6d3f6c64 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -175,7 +175,7 @@ type point struct { func (d *DCOS) createPoints(m *Metrics) []*point { points := make(map[string]*point) for _, dp := range m.Datapoints { - fieldKey := strings.Replace(dp.Name, ".", "_", -1) + fieldKey := strings.ReplaceAll(dp.Name, ".", "_") tags := dp.Tags if tags == nil { diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index 461f4bdc9bf44..62945ef11c1d4 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -12,8 +12,7 @@ import ( type DiskStats struct { ps system.PS - // Legacy support - LegacyMountPoints []string `toml:"mountpoints"` + LegacyMountPoints []string `toml:"mountpoints" deprecated:"0.10.2;2.0.0;use 'mount_points' instead"` MountPoints []string `toml:"mount_points"` IgnoreFS []string `toml:"ignore_fs"` @@ -47,7 +46,7 @@ func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { mountOpts := MountOptions(partitions[i].Opts) tags := map[string]string{ "path": du.Path, - "device": strings.Replace(partitions[i].Device, "/dev/", "", -1), + "device": strings.ReplaceAll(partitions[i].Device, "/dev/", ""), "fstype": du.Fstype, "mode": mountOpts.Mode(), } diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index d8850dbfc2386..3f0c73bf1a149 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -328,7 +328,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { ) for _, rawData := range info.DriverStatus { - name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1)) + name := strings.ToLower(strings.ReplaceAll(rawData[0], " ", "_")) if name == "pool_name" { poolName = rawData[1] continue diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go index aff67d2baa884..ab97277f4e119 100644 --- a/plugins/inputs/elasticsearch_query/aggregation_query.go +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -153,7 +153,7 @@ func (aggregation *esAggregation) buildAggregationQuery() error { measurement: aggregation.MeasurementName, function: aggregation.MetricFunction, field: k, - name: strings.Replace(k, ".", "_", -1) + "_" + aggregation.MetricFunction, + name: strings.ReplaceAll(k, ".", "_") + "_" + aggregation.MetricFunction, }, isParent: true, aggregation: agg, @@ -185,7 +185,7 @@ func (aggregation *esAggregation) buildAggregationQuery() error { measurement: aggregation.MeasurementName, function: "terms", field: term, - name: strings.Replace(term, ".", "_", -1), + name: strings.ReplaceAll(term, ".", "_"), }, isParent: true, aggregation: agg, diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go index e017681b7c58d..cccebc3ba3253 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go @@ -546,9 +546,9 @@ func setupIntegrationTest() error { logline := nginxlog{ IPaddress: parts[0], Timestamp: time.Now().UTC(), - Method: strings.Replace(parts[5], `"`, "", -1), + Method: strings.ReplaceAll(parts[5], `"`, ""), URI: parts[6], - Httpversion: strings.Replace(parts[7], `"`, "", -1), + Httpversion: strings.ReplaceAll(parts[7], `"`, ""), Response: parts[8], Size: float64(size), ResponseTime: float64(responseTime), diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index a7266c98ff271..3d188bbbcf7d6 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -17,6 +17,12 @@ This plugin can be used to poll for custom metrics from any source. "/tmp/collect_*.sh" ] + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + ## Timeout for each command to complete. timeout = "5s" @@ -55,7 +61,7 @@ It can be paired with the following configuration and will be run at the `interv ### My script works when I run it by hand, but not when Telegraf is running as a service -This may be related to the Telegraf service running as a different user. The +This may be related to the Telegraf service running as a different user. The official packages run Telegraf as the `telegraf` user and group on Linux systems. diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 1cb9433db0b96..3369979eafb55 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "os" osExec "os/exec" "path/filepath" "runtime" @@ -24,9 +25,10 @@ import ( const MaxStderrBytes int = 512 type Exec struct { - Commands []string `toml:"commands"` - Command string `toml:"command"` - Timeout config.Duration `toml:"timeout"` + Commands []string `toml:"commands"` + Command string `toml:"command"` + Environment []string `toml:"environment"` + Timeout config.Duration `toml:"timeout"` parser parsers.Parser @@ -42,13 +44,14 @@ func NewExec() *Exec { } type Runner interface { - Run(string, time.Duration) ([]byte, []byte, error) + Run(string, []string, time.Duration) ([]byte, []byte, error) } type CommandRunner struct{} func (c CommandRunner) Run( command string, + environments []string, timeout time.Duration, ) ([]byte, []byte, error) { splitCmd, err := shellquote.Split(command) @@ -58,6 +61,10 @@ func (c CommandRunner) Run( cmd := osExec.Command(splitCmd[0], splitCmd[1:]...) + if len(environments) > 0 { + cmd.Env = append(os.Environ(), environments...) + } + var ( out bytes.Buffer stderr bytes.Buffer @@ -120,7 +127,7 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync defer wg.Done() _, isNagios := e.parser.(*nagios.NagiosParser) - out, errBuf, runErr := e.runner.Run(command, time.Duration(e.Timeout)) + out, errBuf, runErr := e.runner.Run(command, e.Environment, time.Duration(e.Timeout)) if !isNagios && runErr != nil { err := fmt.Errorf("exec: %s for command '%s': %s", runErr, command, string(errBuf)) acc.AddError(err) diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 22465318bbe71..85c1db3da7a45 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -75,7 +75,7 @@ func newRunnerMock(out []byte, errout []byte, err error) Runner { } } -func (r runnerMock) Run(_ string, _ time.Duration) ([]byte, []byte, error) { +func (r runnerMock) Run(_ string, _ []string, _ time.Duration) ([]byte, []byte, error) { return r.out, r.errout, r.err } @@ -191,6 +191,23 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) { acc.AssertContainsFields(t, "metric", fields) } +func TestExecCommandWithEnv(t *testing.T) { + parser, _ := parsers.NewValueParser("metric", "string", "", nil) + e := NewExec() + e.Commands = []string{"/bin/sh -c 'echo ${METRIC_NAME}'"} + e.Environment = []string{"METRIC_NAME=metric_value"} + e.SetParser(parser) + + var acc testutil.Accumulator + err := acc.GatherError(e.Gather) + require.NoError(t, err) + + fields := map[string]interface{}{ + "value": "metric_value", + } + acc.AssertContainsFields(t, "metric", fields) +} + func TestTruncate(t *testing.T) { tests := []struct { name string diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md index 42acbc36e8aa0..31997e9587eb5 100644 --- a/plugins/inputs/execd/README.md +++ b/plugins/inputs/execd/README.md @@ -22,6 +22,12 @@ STDERR from the process will be relayed to Telegraf as errors in the logs. ## NOTE: process and each argument should each be their own string command = ["telegraf-smartctl", "-d", "/dev/sda"] + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + ## Define how the process is signaled on each collection interval. ## Valid values are: ## "none" : Do not signal anything. (Recommended for service inputs) diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index afd0214a6aac9..08ef50ef7394c 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -19,6 +19,7 @@ import ( type Execd struct { Command []string `toml:"command"` + Environment []string `toml:"environment"` Signal string `toml:"signal"` RestartDelay config.Duration `toml:"restart_delay"` Log telegraf.Logger `toml:"-"` @@ -35,7 +36,7 @@ func (e *Execd) SetParser(parser parsers.Parser) { func (e *Execd) Start(acc telegraf.Accumulator) error { e.acc = acc var err error - e.process, err = process.New(e.Command) + e.process, err = process.New(e.Command, e.Environment) if err != nil { return fmt.Errorf("error creating new process: %w", err) } diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index a8c8364394480..729db3785f03e 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -25,6 +25,7 @@ func TestSettingConfigWorks(t *testing.T) { cfg := ` [[inputs.execd]] command = ["a", "b", "c"] + environment = ["d=e", "f=1"] restart_delay = "1m" signal = "SIGHUP" ` @@ -35,6 +36,7 @@ func TestSettingConfigWorks(t *testing.T) { inp, ok := conf.Inputs[0].Input.(*Execd) require.True(t, ok) require.EqualValues(t, []string{"a", "b", "c"}, inp.Command) + require.EqualValues(t, []string{"d=e", "f=1"}, inp.Environment) require.EqualValues(t, 1*time.Minute, inp.RestartDelay) require.EqualValues(t, "SIGHUP", inp.Signal) } @@ -48,6 +50,7 @@ func TestExternalInputWorks(t *testing.T) { e := &Execd{ Command: []string{exe, "-counter"}, + Environment: []string{"PLUGINS_INPUTS_EXECD_MODE=application", "METRIC_NAME=counter"}, RestartDelay: config.Duration(5 * time.Second), parser: influxParser, Signal: "STDIN", @@ -152,7 +155,8 @@ var counter = flag.Bool("counter", false, func TestMain(m *testing.M) { flag.Parse() - if *counter { + runMode := os.Getenv("PLUGINS_INPUTS_EXECD_MODE") + if *counter && runMode == "application" { if err := runCounterProgram(); err != nil { os.Exit(1) } @@ -163,6 +167,7 @@ func TestMain(m *testing.M) { } func runCounterProgram() error { + envMetricName := os.Getenv("METRIC_NAME") i := 0 serializer, err := serializers.NewInfluxSerializer() if err != nil { @@ -173,7 +178,7 @@ func runCounterProgram() error { scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { - m := metric.New("counter", + m := metric.New(envMetricName, map[string]string{}, map[string]interface{}{ "count": i, diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 42754348e8c4b..f45bf0d8e4c65 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -413,7 +413,7 @@ func (c *GNMI) handleTelemetryField(update *gnmiLib.Update, tags map[string]stri jsondata = val.JsonVal } - name := strings.Replace(gpath, "-", "_", -1) + name := strings.ReplaceAll(gpath, "-", "_") fields := make(map[string]interface{}) if value != nil { fields[name] = value @@ -462,7 +462,7 @@ func (c *GNMI) handlePath(gnmiPath *gnmiLib.Path, tags map[string]string, prefix if tags != nil { for key, val := range elem.Key { - key = strings.Replace(key, "-", "_", -1) + key = strings.ReplaceAll(key, "-", "_") // Use short-form of key if possible if _, exists := tags[key]; exists { diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 8b8d95fbfa7e4..27de16916a63a 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -188,6 +188,7 @@ func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) } return rc, nil } + return io.NopCloser(reader), nil } diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index a7044428f7449..39e201c9b79d2 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -233,7 +233,7 @@ func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAss if r.UseSudo { // run pqos with `/bin/sh -c "sudo /path/to/pqos ..."` - args = []string{"-c", fmt.Sprintf("sudo %s %s", r.PqosPath, strings.Replace(strings.Join(args, " "), ";", "\\;", -1))} + args = []string{"-c", fmt.Sprintf("sudo %s %s", r.PqosPath, strings.ReplaceAll(strings.Join(args, " "), ";", "\\;"))} cmd = exec.Command("/bin/sh", args...) } diff --git a/plugins/inputs/ipmi_sensor/ipmi_sensor.go b/plugins/inputs/ipmi_sensor/ipmi_sensor.go index d79b4d5225ab2..134e889fe4bbf 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_sensor.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor.go @@ -278,7 +278,7 @@ func trim(s string) string { func transform(s string) string { s = trim(s) s = strings.ToLower(s) - return strings.Replace(s, " ", "_", -1) + return strings.ReplaceAll(s, " ", "_") } func init() { diff --git a/plugins/inputs/jolokia2/common/point_builder.go b/plugins/inputs/jolokia2/common/point_builder.go index feba74b11620a..ae45fdaa744c1 100644 --- a/plugins/inputs/jolokia2/common/point_builder.go +++ b/plugins/inputs/jolokia2/common/point_builder.go @@ -143,7 +143,7 @@ func (pb *pointBuilder) formatFieldName(attribute, path string) string { } if path != "" { - fieldName = fieldName + fieldSeparator + strings.Replace(path, "/", fieldSeparator, -1) + fieldName = fieldName + fieldSeparator + strings.ReplaceAll(path, "/", fieldSeparator) } return fieldName @@ -200,7 +200,7 @@ func (pb *pointBuilder) applySubstitutions(mbean string, fieldMap map[string]int substitution := properties[subKey] for fieldName, fieldValue := range fieldMap { - newFieldName := strings.Replace(fieldName, symbol, substitution, -1) + newFieldName := strings.ReplaceAll(fieldName, symbol, substitution) if fieldName != newFieldName { fieldMap[newFieldName] = fieldValue delete(fieldMap, fieldName) diff --git a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go index b4ca80468432b..9d12ad46c8254 100644 --- a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go @@ -74,7 +74,7 @@ func spitTagsNPath(xmlpath string) (string, map[string]string) { // we must emit multiple tags for _, kv := range strings.Split(sub[2], " and ") { key := tagKey + strings.TrimSpace(strings.Split(kv, "=")[0]) - tagValue := strings.Replace(strings.Split(kv, "=")[1], "'", "", -1) + tagValue := strings.ReplaceAll(strings.Split(kv, "=")[1], "'", "") tags[key] = tagValue } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 79c48f7c11518..88eb029f004ec 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1875,8 +1875,8 @@ func (m *Mysql) parseValueByDatabaseTypeName(value sql.RawBytes, databaseTypeNam func findThreadState(rawCommand, rawState string) string { var ( // replace '_' symbol with space - command = strings.Replace(strings.ToLower(rawCommand), "_", " ", -1) - state = strings.Replace(strings.ToLower(rawState), "_", " ", -1) + command = strings.ReplaceAll(strings.ToLower(rawCommand), "_", " ") + state = strings.ReplaceAll(strings.ToLower(rawState), "_", " ") ) // if the state is already valid, then return it if _, ok := generalThreadStates[state]; ok { @@ -1909,7 +1909,7 @@ func findThreadState(rawCommand, rawState string) string { // newNamespace can be used to make a namespace func newNamespace(words ...string) string { - return strings.Replace(strings.Join(words, "_"), " ", "_", -1) + return strings.ReplaceAll(strings.Join(words, "_"), " ", "_") } func copyTags(in map[string]string) map[string]string { diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index 948f205953a80..4b633148c63b1 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -110,7 +110,7 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { } } } else { - field := strings.Replace(stat, ".", "_", -1) + field := strings.ReplaceAll(stat, ".", "_") fields[field] = fieldValue } } diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index 4853ff19d11da..45ae42a350b0b 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -170,8 +170,8 @@ func dnToMetric(dn string, o *Openldap) string { var metricParts []string dn = strings.Trim(dn, " ") - dn = strings.Replace(dn, " ", "_", -1) - dn = strings.Replace(dn, "cn=", "", -1) + dn = strings.ReplaceAll(dn, " ", "_") + dn = strings.ReplaceAll(dn, "cn=", "") dn = strings.ToLower(dn) metricParts = strings.Split(dn, ",") for i, j := 0, len(metricParts)-1; i < j; i, j = i+1, j-1 { @@ -181,12 +181,12 @@ func dnToMetric(dn string, o *Openldap) string { } metricName := strings.Trim(dn, " ") - metricName = strings.Replace(metricName, " ", "_", -1) + metricName = strings.ReplaceAll(metricName, " ", "_") metricName = strings.ToLower(metricName) metricName = strings.TrimPrefix(metricName, "cn=") - metricName = strings.Replace(metricName, strings.ToLower("cn=Monitor"), "", -1) - metricName = strings.Replace(metricName, "cn=", "_", -1) - return strings.Replace(metricName, ",", "", -1) + metricName = strings.ReplaceAll(metricName, strings.ToLower("cn=Monitor"), "") + metricName = strings.ReplaceAll(metricName, "cn=", "_") + return strings.ReplaceAll(metricName, ",", "") } func init() { diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index 2b670c03de92c..7159e47a60111 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -87,7 +87,7 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { continue } - field := strings.Replace(stat, ".", "_", -1) + field := strings.ReplaceAll(stat, ".", "_") fields[field], err = strconv.ParseFloat(value, 64) if err != nil { diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 09ebde883dfb4..52ef1f7f8bc78 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -227,7 +227,7 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { } fields := make(map[string]interface{}) for k, v := range stats[pool] { - fields[strings.Replace(k, " ", "_", -1)] = v + fields[strings.ReplaceAll(k, " ", "_")] = v } acc.AddFields("phpfpm", fields, tags) } diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index af4813325f73a..79789f8a51e99 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -28,8 +28,9 @@ The example below has two queries are specified, with the following parameters: # to grab metrics for. # address = "host=localhost user=postgres sslmode=disable" - # A list of databases to pull metrics about. If not specified, metrics for all - # databases are gathered. + + ## A list of databases to pull metrics about. + ## deprecated in 1.22.3; use the sqlquery option to specify database to use # databases = ["app_production", "testing"] ## Whether to use prepared statements when connecting to the database. @@ -38,20 +39,6 @@ The example below has two queries are specified, with the following parameters: prepared_statements = true # Define the toml config where the sql queries are stored - # New queries can be added, if the withdbname is set to true and there is no - # databases defined in the 'databases field', the sql query is ended by a 'is - # not null' in order to make the query succeed. - # Be careful that the sqlquery must contain the where clause with a part of - # the filtering, the plugin will add a 'IN (dbname list)' clause if the - # withdbname is set to true - # Example : - # The sqlquery : "SELECT * FROM pg_stat_database where datname" become - # "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" - # because the databases variable was set to ['postgres', 'pgbench' ] and the - # withdbname was true. - # Be careful that if the withdbname is set to false you don't have to define - # the where clause (aka with the dbname) - # # The script option can be used to specify the .sql file path. # If script and sqlquery options specified at same time, sqlquery will be used # diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 7ee05b1045149..bbd647e08c406 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -18,7 +18,7 @@ import ( type Postgresql struct { postgresql.Service - Databases []string + Databases []string `deprecated:"1.22.4;use the sqlquery option to specify database to use"` AdditionalTags []string Timestamp string Query query @@ -32,7 +32,7 @@ type query []struct { Sqlquery string Script string Version int - Withdbname bool + Withdbname bool `deprecated:"1.22.4;use the sqlquery option to specify database to use"` Tagvalue string Measurement string Timestamp string diff --git a/plugins/inputs/redis_sentinel/redis_sentinel.go b/plugins/inputs/redis_sentinel/redis_sentinel.go index be80ebe2ba69b..b425fcd94d3b1 100644 --- a/plugins/inputs/redis_sentinel/redis_sentinel.go +++ b/plugins/inputs/redis_sentinel/redis_sentinel.go @@ -138,7 +138,7 @@ func prepareFieldValues(fields map[string]string, typeMap map[string]configField preparedFields := make(map[string]interface{}) for key, val := range fields { - key = strings.Replace(key, "-", "_", -1) + key = strings.ReplaceAll(key, "-", "_") valType, ok := typeMap[key] if !ok { diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 82aaf20d265fd..d73c543e9ef58 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -92,7 +92,7 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error { // snake converts string to snake case func snake(input string) string { - return strings.ToLower(strings.Replace(strings.TrimSpace(input), " ", "_", -1)) + return strings.ToLower(strings.ReplaceAll(strings.TrimSpace(input), " ", "_")) } func init() { diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 4064c0ffbb928..383fca60d4321 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -730,12 +730,12 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai wwn := wwnInfo.FindStringSubmatch(line) if len(wwn) > 1 { - deviceTags["wwn"] = strings.Replace(wwn[1], " ", "", -1) + deviceTags["wwn"] = strings.ReplaceAll(wwn[1], " ", "") } capacity := userCapacityInfo.FindStringSubmatch(line) if len(capacity) > 1 { - deviceTags["capacity"] = strings.Replace(capacity[1], ",", "", -1) + deviceTags["capacity"] = strings.ReplaceAll(capacity[1], ",", "") } enabled := smartEnabledInfo.FindStringSubmatch(line) @@ -1004,7 +1004,7 @@ func parseDataUnits(fields, deviceFields map[string]interface{}, str string) err } func parseCommaSeparatedIntWithAccumulator(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error { - i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64) + i, err := strconv.ParseInt(strings.ReplaceAll(str, ",", ""), 10, 64) if err != nil { return err } diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index c21cc11c5e5a7..fbbdd03f00c58 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -395,7 +395,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { return nil } -// tcpListen() starts listening for udp packets on the configured port. +// tcpListen() starts listening for TCP packets on the configured port. func (s *Statsd) tcpListen(listener *net.TCPListener) error { for { select { @@ -436,7 +436,7 @@ func (s *Statsd) tcpListen(listener *net.TCPListener) error { } } -// udpListen starts listening for udp packets on the configured port. +// udpListen starts listening for UDP packets on the configured port. func (s *Statsd) udpListen(conn *net.UDPConn) error { if s.ReadBufferSize > 0 { if err := s.UDPlistener.SetReadBuffer(s.ReadBufferSize); err != nil { @@ -456,7 +456,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { s.Log.Errorf("Error reading: %s", err.Error()) continue } - return err + return nil } s.UDPPacketsRecv.Incr(1) s.UDPBytesRecv.Incr(int64(n)) @@ -713,8 +713,8 @@ func (s *Statsd) parseName(bucket string) (name string, field string, tags map[s } if s.ConvertNames { - name = strings.Replace(name, ".", "_", -1) - name = strings.Replace(name, "-", "__", -1) + name = strings.ReplaceAll(name, ".", "_") + name = strings.ReplaceAll(name, "-", "__") } if field == "" { field = defaultFieldName diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index d5592e45878de..1dd6ca85be31c 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -151,7 +151,7 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error { } } } else { - field := strings.Replace(stat, ".", "_", -1) + field := strings.ReplaceAll(stat, ".", "_") fields[field] = fieldValue } } diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 79dcca290d248..c29b9e9062111 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -55,11 +55,11 @@ func (a *Amon) Write(metrics []telegraf.Metric) error { metricCounter := 0 for _, m := range metrics { - mname := strings.Replace(m.Name(), "_", ".", -1) + mname := strings.ReplaceAll(m.Name(), "_", ".") if amonPts, err := buildMetrics(m); err == nil { for fieldName, amonPt := range amonPts { metric := &Metric{ - Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1), + Metric: mname + "_" + strings.ReplaceAll(fieldName, "_", "."), } metric.Points[0] = amonPt tempSeries = append(tempSeries, metric) diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index 6b0637cac383e..462f75cf01815 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -3,12 +3,14 @@ package azure_monitor import ( "bytes" "compress/gzip" + "context" "encoding/binary" "encoding/json" "fmt" "hash/fnv" "io" "net/http" + "net/url" "regexp" "strings" "time" @@ -109,12 +111,7 @@ func (a *AzureMonitor) Connect() error { a.Timeout = config.Duration(defaultRequestTimeout) } - a.client = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - }, - Timeout: time.Duration(a.Timeout), - } + a.initHTTPClient() var err error var region string @@ -168,6 +165,15 @@ func (a *AzureMonitor) Connect() error { return nil } +func (a *AzureMonitor) initHTTPClient() { + a.client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + Timeout: time.Duration(a.Timeout), + } +} + // vmMetadata retrieves metadata about the current Azure VM func vmInstanceMetadata(c *http.Client) (region string, resourceID string, err error) { req, err := http.NewRequest("GET", vmInstanceMetadataURL, nil) @@ -313,6 +319,10 @@ func (a *AzureMonitor) send(body []byte) error { resp, err := a.client.Do(req) if err != nil { + if err.(*url.Error).Unwrap() == context.DeadlineExceeded { + a.initHTTPClient() + } + return err } defer resp.Body.Close() diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index 60b41a69bda5a..283421e971240 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -183,7 +183,7 @@ func escapeObject(m map[string]interface{}, keyReplacement string) (string, erro // escapeString wraps s in the given quote string and replaces all occurrences // of it inside of s with a double quote. func escapeString(s string, quote string) string { - return quote + strings.Replace(s, quote, quote+quote, -1) + quote + return quote + strings.ReplaceAll(s, quote, quote+quote) + quote } // hashID returns a cryptographic hash int64 hash that includes the metric name diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 682be05070b66..5419c4da40e81 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -148,13 +148,13 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { } if err != nil { - return fmt.Errorf("unable to create http.Request, %s", strings.Replace(err.Error(), d.Apikey, redactedAPIKey, -1)) + return fmt.Errorf("unable to create http.Request, %s", strings.ReplaceAll(err.Error(), d.Apikey, redactedAPIKey)) } req.Header.Add("Content-Type", "application/json") resp, err := d.client.Do(req) if err != nil { - return fmt.Errorf("error POSTing metrics, %s", strings.Replace(err.Error(), d.Apikey, redactedAPIKey, -1)) + return fmt.Errorf("error POSTing metrics, %s", strings.ReplaceAll(err.Error(), d.Apikey, redactedAPIKey)) } defer resp.Body.Close() diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md index 1a08d884ed064..99875b38ef847 100644 --- a/plugins/outputs/exec/README.md +++ b/plugins/outputs/exec/README.md @@ -20,6 +20,12 @@ For better performance, consider execd, which runs continuously. ## Command to ingest metrics via stdin. command = ["tee", "-a", "/dev/null"] + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + ## Timeout for command to complete. # timeout = "5s" diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 395e493467111..c8346adc100c6 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "os" "os/exec" "runtime" "time" @@ -19,9 +20,10 @@ const maxStderrBytes = 512 // Exec defines the exec output plugin. type Exec struct { - Command []string `toml:"command"` - Timeout config.Duration `toml:"timeout"` - Log telegraf.Logger `toml:"-"` + Command []string `toml:"command"` + Environment []string `toml:"environment"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` runner Runner serializer serializers.Serializer @@ -61,12 +63,12 @@ func (e *Exec) Write(metrics []telegraf.Metric) error { return nil } - return e.runner.Run(time.Duration(e.Timeout), e.Command, &buffer) + return e.runner.Run(time.Duration(e.Timeout), e.Command, e.Environment, &buffer) } // Runner provides an interface for running exec.Cmd. type Runner interface { - Run(time.Duration, []string, io.Reader) error + Run(time.Duration, []string, []string, io.Reader) error } // CommandRunner runs a command with the ability to kill the process before the timeout. @@ -76,8 +78,11 @@ type CommandRunner struct { } // Run runs the command. -func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.Reader) error { +func (c *CommandRunner) Run(timeout time.Duration, command []string, environments []string, buffer io.Reader) error { cmd := exec.Command(command[0], command[1:]...) + if len(environments) > 0 { + cmd.Env = append(os.Environ(), environments...) + } cmd.Stdin = buffer var stderr bytes.Buffer cmd.Stderr = &stderr diff --git a/plugins/outputs/execd/README.md b/plugins/outputs/execd/README.md index ef4943492ceeb..59be1de03eecc 100644 --- a/plugins/outputs/execd/README.md +++ b/plugins/outputs/execd/README.md @@ -13,6 +13,12 @@ Telegraf minimum version: Telegraf 1.15.0 ## NOTE: process and each argument should each be their own string command = ["my-telegraf-output", "--some-flag", "value"] + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + ## Delay before the process is restarted after an unexpected termination restart_delay = "10s" diff --git a/plugins/outputs/execd/execd.go b/plugins/outputs/execd/execd.go index 4f9f52da27ce5..e880c3f1d8b15 100644 --- a/plugins/outputs/execd/execd.go +++ b/plugins/outputs/execd/execd.go @@ -16,6 +16,7 @@ import ( type Execd struct { Command []string `toml:"command"` + Environment []string `toml:"environment"` RestartDelay config.Duration `toml:"restart_delay"` Log telegraf.Logger @@ -34,7 +35,7 @@ func (e *Execd) Init() error { var err error - e.process, err = process.New(e.Command) + e.process, err = process.New(e.Command, e.Environment) if err != nil { return fmt.Errorf("error creating process %s: %w", e.Command, err) } diff --git a/plugins/outputs/execd/execd_test.go b/plugins/outputs/execd/execd_test.go index 66bc28561a625..ff2ef5b92e4f7 100644 --- a/plugins/outputs/execd/execd_test.go +++ b/plugins/outputs/execd/execd_test.go @@ -32,6 +32,7 @@ func TestExternalOutputWorks(t *testing.T) { e := &Execd{ Command: []string{exe, "-testoutput"}, + Environment: []string{"PLUGINS_OUTPUTS_EXECD_MODE=application", "METRIC_NAME=cpu"}, RestartDelay: config.Duration(5 * time.Second), serializer: influxSerializer, Log: testutil.Logger{}, @@ -74,7 +75,8 @@ var testoutput = flag.Bool("testoutput", false, func TestMain(m *testing.M) { flag.Parse() - if *testoutput { + runMode := os.Getenv("PLUGINS_OUTPUTS_EXECD_MODE") + if *testoutput && runMode == "application" { runOutputConsumerProgram() os.Exit(0) } @@ -83,6 +85,7 @@ func TestMain(m *testing.M) { } func runOutputConsumerProgram() { + metricName := os.Getenv("METRIC_NAME") parser := influx.NewStreamParser(os.Stdin) for { @@ -103,7 +106,7 @@ func runOutputConsumerProgram() { os.Exit(1) } - expected := testutil.MustMetric("cpu", + expected := testutil.MustMetric(metricName, map[string]string{"name": "cpu1"}, map[string]interface{}{"idle": 50, "sys": 30}, now, diff --git a/plugins/outputs/sql/sql.go b/plugins/outputs/sql/sql.go index 4340f04c29d43..fb2d072736982 100644 --- a/plugins/outputs/sql/sql.go +++ b/plugins/outputs/sql/sql.go @@ -71,12 +71,12 @@ func (p *SQL) Close() error { // Quote an identifier (table or column name) func quoteIdent(name string) string { - return `"` + strings.Replace(sanitizeQuoted(name), `"`, `""`, -1) + `"` + return `"` + strings.ReplaceAll(sanitizeQuoted(name), `"`, `""`) + `"` } // Quote a string literal func quoteStr(name string) string { - return "'" + strings.Replace(name, "'", "''", -1) + "'" + return "'" + strings.ReplaceAll(name, "'", "''") + "'" } func sanitizeQuoted(in string) string { @@ -143,10 +143,10 @@ func (p *SQL) generateCreateTable(metric telegraf.Metric) string { } query := p.TableTemplate - query = strings.Replace(query, "{TABLE}", quoteIdent(metric.Name()), -1) - query = strings.Replace(query, "{TABLELITERAL}", quoteStr(metric.Name()), -1) - query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) - //query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) + query = strings.ReplaceAll(query, "{TABLE}", quoteIdent(metric.Name())) + query = strings.ReplaceAll(query, "{TABLELITERAL}", quoteStr(metric.Name())) + query = strings.ReplaceAll(query, "{COLUMNS}", strings.Join(columns, ",")) + //query = strings.ReplaceAll(query, "{KEY_COLUMNS}", strings.Join(pk, ",")) return query } @@ -175,7 +175,7 @@ func (p *SQL) generateInsert(tablename string, columns []string) string { } func (p *SQL) tableExists(tableName string) bool { - stmt := strings.Replace(p.TableExistsTemplate, "{TABLE}", quoteIdent(tableName), -1) + stmt := strings.ReplaceAll(p.TableExistsTemplate, "{TABLE}", quoteIdent(tableName)) _, err := p.db.Exec(stmt) return err == nil diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index bfe4ef3569207..06d6a5af6d9ba 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -162,7 +162,7 @@ func buildValue(v interface{}) (string, error) { case int64: retv = intToString(p) case string: - retv = fmt.Sprintf("'%s'", strings.Replace(p, "'", "\\'", -1)) + retv = fmt.Sprintf("'%s'", strings.ReplaceAll(p, "'", "\\'")) case bool: retv = boolToString(p) case uint64: diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index f869b3039c483..026785f034889 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -291,7 +291,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { if len(parts) == 2 { padded := fmt.Sprintf("%-9s", parts[1]) - nsString := strings.Replace(padded[:9], " ", "0", -1) + nsString := strings.ReplaceAll(padded[:9], " ", "0") nanosec, err := strconv.ParseInt(nsString, 10, 64) if err != nil { p.Log.Errorf("Error parsing %s to timestamp: %s", v, err) @@ -357,7 +357,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case Drop: // goodbye! default: - v = strings.Replace(v, ",", ".", -1) + v = strings.ReplaceAll(v, ",", ".") ts, err := time.ParseInLocation(t, v, p.loc) if err == nil { if ts.Year() == 0 { diff --git a/plugins/processors/execd/README.md b/plugins/processors/execd/README.md index b77857fe5c32b..7a3e0cc1a874b 100644 --- a/plugins/processors/execd/README.md +++ b/plugins/processors/execd/README.md @@ -30,6 +30,12 @@ Telegraf minimum version: Telegraf 1.15.0 ## eg: command = ["/path/to/your_program", "arg1", "arg2"] command = ["cat"] + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + ## Delay before the process is restarted after an unexpected termination # restart_delay = "10s" ``` diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go index bf1519e46d914..4deba08d6d703 100644 --- a/plugins/processors/execd/execd.go +++ b/plugins/processors/execd/execd.go @@ -19,6 +19,7 @@ import ( type Execd struct { Command []string `toml:"command"` + Environment []string `toml:"environment"` RestartDelay config.Duration `toml:"restart_delay"` Log telegraf.Logger @@ -54,7 +55,7 @@ func (e *Execd) Start(acc telegraf.Accumulator) error { } e.acc = acc - e.process, err = process.New(e.Command) + e.process, err = process.New(e.Command, e.Environment) if err != nil { return fmt.Errorf("error creating new process: %w", err) } diff --git a/plugins/processors/execd/execd_test.go b/plugins/processors/execd/execd_test.go index 26af720132959..48fd83d3682b6 100644 --- a/plugins/processors/execd/execd_test.go +++ b/plugins/processors/execd/execd_test.go @@ -24,6 +24,7 @@ func TestExternalProcessorWorks(t *testing.T) { require.NoError(t, err) t.Log(exe) e.Command = []string{exe, "-countmultiplier"} + e.Environment = []string{"PLUGINS_PROCESSORS_EXECD_MODE=application", "FIELD_NAME=count"} e.RestartDelay = config.Duration(5 * time.Second) acc := &testutil.Accumulator{} @@ -84,6 +85,7 @@ func TestParseLinesWithNewLines(t *testing.T) { require.NoError(t, err) t.Log(exe) e.Command = []string{exe, "-countmultiplier"} + e.Environment = []string{"PLUGINS_PROCESSORS_EXECD_MODE=application", "FIELD_NAME=count"} e.RestartDelay = config.Duration(5 * time.Second) acc := &testutil.Accumulator{} @@ -129,7 +131,8 @@ var countmultiplier = flag.Bool("countmultiplier", false, func TestMain(m *testing.M) { flag.Parse() - if *countmultiplier { + runMode := os.Getenv("PLUGINS_PROCESSORS_EXECD_MODE") + if *countmultiplier && runMode == "application" { runCountMultiplierProgram() os.Exit(0) } @@ -138,6 +141,7 @@ func TestMain(m *testing.M) { } func runCountMultiplierProgram() { + fieldName := os.Getenv("FIELD_NAME") parser := influx.NewStreamParser(os.Stdin) serializer, _ := serializers.NewInfluxSerializer() @@ -159,23 +163,23 @@ func runCountMultiplierProgram() { os.Exit(1) } - c, found := m.GetField("count") + c, found := m.GetField(fieldName) if !found { //nolint:errcheck,revive // Test will fail anyway - fmt.Fprintf(os.Stderr, "metric has no count field\n") + fmt.Fprintf(os.Stderr, "metric has no %s field\n", fieldName) //nolint:revive // os.Exit called intentionally os.Exit(1) } switch t := c.(type) { case float64: t *= 2 - m.AddField("count", t) + m.AddField(fieldName, t) case int64: t *= 2 - m.AddField("count", t) + m.AddField(fieldName, t) default: //nolint:errcheck,revive // Test will fail anyway - fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c) + fmt.Fprintf(os.Stderr, "%s is not an unknown type, it's a %T\n", fieldName, c) //nolint:revive // os.Exit called intentionally os.Exit(1) } diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index a93bac9e26cb8..8cc3d5ba9c3d0 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -224,7 +224,7 @@ func (s *Strings) initOnce() { for _, c := range s.Replace { c := c c.fn = func(s string) string { - newString := strings.Replace(s, c.Old, c.New, -1) + newString := strings.ReplaceAll(s, c.Old, c.New) if newString == "" { return s } diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go index db8c38222972e..58400c618601b 100644 --- a/plugins/serializers/carbon2/carbon2.go +++ b/plugins/serializers/carbon2/carbon2.go @@ -90,14 +90,14 @@ func (s *Serializer) createObject(metric telegraf.Metric) []byte { } for _, tag := range metric.TagList() { - m.WriteString(strings.Replace(tag.Key, " ", "_", -1)) //nolint:revive // from buffer.go: "err is always nil" - m.WriteString("=") //nolint:revive // from buffer.go: "err is always nil" + m.WriteString(strings.ReplaceAll(tag.Key, " ", "_")) //nolint:revive // from buffer.go: "err is always nil" + m.WriteString("=") //nolint:revive // from buffer.go: "err is always nil" value := tag.Value if len(value) == 0 { value = "null" } - m.WriteString(strings.Replace(value, " ", "_", -1)) //nolint:revive // from buffer.go: "err is always nil" - m.WriteString(" ") //nolint:revive // from buffer.go: "err is always nil" + m.WriteString(strings.ReplaceAll(value, " ", "_")) //nolint:revive // from buffer.go: "err is always nil" + m.WriteString(" ") //nolint:revive // from buffer.go: "err is always nil" } m.WriteString(" ") //nolint:revive // from buffer.go: "err is always nil" m.WriteString(formatValue(fieldValue)) //nolint:revive // from buffer.go: "err is always nil" @@ -122,15 +122,15 @@ func (s *Serializer) IsMetricsFormatUnset() bool { func serializeMetricFieldSeparate(name, fieldName string) string { return fmt.Sprintf("metric=%s field=%s ", - strings.Replace(name, " ", "_", -1), - strings.Replace(fieldName, " ", "_", -1), + strings.ReplaceAll(name, " ", "_"), + strings.ReplaceAll(fieldName, " ", "_"), ) } func serializeMetricIncludeField(name, fieldName string) string { return fmt.Sprintf("metric=%s_%s ", - strings.Replace(name, " ", "_", -1), - strings.Replace(fieldName, " ", "_", -1), + strings.ReplaceAll(name, " ", "_"), + strings.ReplaceAll(fieldName, " ", "_"), ) } diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index c6130c7b7c4b4..074b36e82376b 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -180,7 +180,7 @@ func SerializeBucketName( default: // This is a tag being applied if tagvalue, ok := tagsCopy[templatePart]; ok { - out = append(out, strings.Replace(tagvalue, ".", "_", -1)) + out = append(out, strings.ReplaceAll(tagvalue, ".", "_")) delete(tagsCopy, templatePart) } } @@ -307,7 +307,7 @@ func buildTags(tags map[string]string) string { var tagStr string for i, k := range keys { - tagValue := strings.Replace(tags[k], ".", "_", -1) + tagValue := strings.ReplaceAll(tags[k], ".", "_") if i == 0 { tagStr += tagValue } else { diff --git a/scripts/telegraf.service b/scripts/telegraf.service index c4eed38ea79f8..1726913b02b8b 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -1,7 +1,8 @@ [Unit] Description=The plugin-driven server agent for reporting metrics into InfluxDB Documentation=https://github.com/influxdata/telegraf -After=network.target +After=network-online.target +Wants=network-online.target [Service] Type=notify