diff --git a/.github/workflows/main.yml b/.github/workflows/goreleaser.yml
similarity index 100%
rename from .github/workflows/main.yml
rename to .github/workflows/goreleaser.yml
diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml
new file mode 100644
index 00000000..e3fb54cc
--- /dev/null
+++ b/.github/workflows/linter.yml
@@ -0,0 +1,22 @@
+name: linter
+
+on: push
+
+jobs:
+ linter:
+ runs-on: ubuntu-latest
+ steps:
+ - name: checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: set up go
+ uses: actions/setup-go@v4
+ with:
+ go-version: ">=1.20"
+
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v3
+ with:
+ args: --config .golangci.yml
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 00000000..fd7d6a8a
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,35 @@
+run:
+ timeout: 30m
+output:
+ format: line-number
+linters:
+ disable-all: false
+ enable:
+ - bodyclose
+ - depguard
+ - dogsled
+ #- dupl
+ - errcheck
+ - exportloopref
+ #- funlen
+ - gocognit
+ - goconst
+ - gocritic
+ - godox
+ - gofmt
+ - goimports
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - nakedret
+ - revive
+ - staticcheck
+ - stylecheck
+ - typecheck
+ - unconvert
+ - unparam
+ - unused
+ - whitespace
+ - reassign
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..19d0837f
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,12 @@
+all: test
+
+test:
+ @go test -v ./...
+
+check:
+ @golangci-lint run --config .golangci.yml
+
+coverage:
+ @go test -cover -coverprofile cover.out -v ./...
+ @go tool cover -func=cover.out
+ @rm -f cover.out
diff --git a/collectors/raw_metrics_collector.go b/collectors/raw_metrics_collector.go
index 1adbfe06..ac89c379 100644
--- a/collectors/raw_metrics_collector.go
+++ b/collectors/raw_metrics_collector.go
@@ -2,17 +2,18 @@ package collectors
import (
"compress/gzip"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
"github.com/bosh-prometheus/firehose_exporter/metrics"
"github.com/gogo/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
log "github.com/sirupsen/logrus"
- "io"
- "net/http"
- "strings"
- "sync"
- "time"
)
var gzipPool = sync.Pool{
@@ -32,7 +33,6 @@ func NewRawMetricsCollector(
pointBuffer chan []*metrics.RawMetric,
metricExpireIn time.Duration,
) *RawMetricsCollector {
-
return &RawMetricsCollector{
pointBuffer: pointBuffer,
metricStore: &sync.Map{},
@@ -46,7 +46,7 @@ func (c *RawMetricsCollector) Collect() {
for _, point := range points {
smapMetric, _ := c.metricStore.LoadOrStore(point.MetricName(), &sync.Map{})
point.ExpireIn(c.metricExpireIn)
- smapMetric.(*sync.Map).Store(point.Id(), point)
+ smapMetric.(*sync.Map).Store(point.ID(), point)
}
}
}
diff --git a/firehose_exporter.go b/firehose_exporter.go
index e93128f0..91436f75 100644
--- a/firehose_exporter.go
+++ b/firehose_exporter.go
@@ -8,21 +8,19 @@ import (
"time"
"code.cloudfoundry.org/go-loggregator/v8"
+ "github.com/alecthomas/kingpin/v2"
"github.com/bosh-prometheus/firehose_exporter/collectors"
"github.com/bosh-prometheus/firehose_exporter/metricmaker"
"github.com/bosh-prometheus/firehose_exporter/metrics"
"github.com/bosh-prometheus/firehose_exporter/nozzle"
"github.com/prometheus/common/version"
log "github.com/sirupsen/logrus"
- "github.com/alecthomas/kingpin/v2"
)
var (
- retroCompatDisable = kingpin.Flag("retro_compat.disable", "Disable retro compatibility",
- ).Envar("FIREHOSE_EXPORTER_RETRO_COMPAT_DISABLE").Default("false").Bool()
+ retroCompatDisable = kingpin.Flag("retro_compat.disable", "Disable retro compatibility").Envar("FIREHOSE_EXPORTER_RETRO_COMPAT_DISABLE").Default("false").Bool()
- enableRetroCompatDelta = kingpin.Flag("retro_compat.enable_delta", "Enable retro compatibility delta in counter",
- ).Envar("FIREHOSE_EXPORTER_RETRO_COMPAT_ENABLE_DELTA").Default("false").Bool()
+ enableRetroCompatDelta = kingpin.Flag("retro_compat.enable_delta", "Enable retro compatibility delta in counter").Envar("FIREHOSE_EXPORTER_RETRO_COMPAT_ENABLE_DELTA").Default("false").Bool()
loggingURL = kingpin.Flag(
"logging.url", "Cloud Foundry Logging endpoint ($FIREHOSE_EXPORTER_LOGGING_URL)",
@@ -48,7 +46,7 @@ var (
"metrics.batch_size", "Batch size for nozzle envelop buffer ($FIREHOSE_EXPORTER_METRICS_NAMESPACE)",
).Envar("FIREHOSE_EXPORTER_METRICS_BATCH_SIZE").Default("-1").Int()
- metricsShardId = kingpin.Flag(
+ metricsShardID = kingpin.Flag(
"metrics.shard_id", "The sharding group name to use for egress from RLP ($FIREHOSE_EXPORTER_SHARD_ID)",
).Envar("FIREHOSE_EXPORTER_SHARD_ID").Default("firehose_exporter").String()
@@ -104,14 +102,11 @@ var (
"web.tls.key_file", "Path to a file that contains the TLS private key (PEM format) ($FIREHOSE_EXPORTER_WEB_TLS_KEYFILE)",
).Envar("FIREHOSE_EXPORTER_WEB_TLS_KEYFILE").ExistingFile()
- enableProfiler = kingpin.Flag("profiler.enable", "Enable pprof profiling on app on /debug/pprof",
- ).Envar("FIREHOSE_EXPORTER_ENABLE_PROFILER").Default("false").Bool()
+ enableProfiler = kingpin.Flag("profiler.enable", "Enable pprof profiling on app on /debug/pprof").Envar("FIREHOSE_EXPORTER_ENABLE_PROFILER").Default("false").Bool()
- logLevel = kingpin.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
- ).Envar("FIREHOSE_EXPORTER_LOG_LEVEL").Default("info").String()
+ logLevel = kingpin.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").Envar("FIREHOSE_EXPORTER_LOG_LEVEL").Default("info").String()
- logInJson = kingpin.Flag("log.in_json", "Log in json",
- ).Envar("FIREHOSE_EXPORTER_LOG_IN_JSON").Default("false").Bool()
+ logInJSON = kingpin.Flag("log.in_json", "Log in json").Envar("FIREHOSE_EXPORTER_LOG_IN_JSON").Default("false").Bool()
)
type basicAuthHandler struct {
@@ -129,7 +124,6 @@ func (h *basicAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
h.handler(w, r)
- return
}
func initLog() {
@@ -138,7 +132,7 @@ func initLog() {
log.Panic(err.Error())
}
log.SetLevel(logLvl)
- if *logInJson {
+ if *logInJSON {
log.SetFormatter(&log.JSONFormatter{})
}
}
@@ -154,7 +148,6 @@ func initMetricMaker() {
} else {
metricmaker.PrependMetricConverter(metricmaker.SuffixCounterWithTotal)
}
-
}
func MakeStreamer() (*loggregator.EnvelopeStreamConnector, error) {
@@ -210,7 +203,7 @@ func main() {
im := metrics.NewInternalMetrics(*metricsNamespace, *metricsEnvironment)
nozz := nozzle.NewNozzle(
streamer,
- *metricsShardId,
+ *metricsShardID,
*metricsNodeIndex,
pointBuffer,
im,
@@ -249,7 +242,7 @@ func main() {
router.Handle("/debug/vars", expvar.Handler())
}
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte(`
+ _, _ = w.Write([]byte(`
Cloud Foundry Firehose Exporter
Cloud Foundry Firehose Exporter
@@ -258,13 +251,21 @@ func main() {
`))
})
+ server := &http.Server{
+ Addr: *listenAddress,
+ ReadTimeout: time.Second * 5,
+ ReadHeaderTimeout: time.Second * 10,
+ }
+
if *tlsCertFile != "" && *tlsKeyFile != "" {
log.Infoln("Listening TLS on", *listenAddress)
- log.Fatal(http.ListenAndServeTLS(*listenAddress, *tlsCertFile, *tlsKeyFile, router))
+ err = server.ListenAndServeTLS(*tlsCertFile, *tlsKeyFile)
} else {
log.Infoln("Listening on", *listenAddress)
- log.Fatal(http.ListenAndServe(*listenAddress, router))
+ err = server.ListenAndServe()
}
+
+ log.Fatal(err)
}
func prometheusHandler(collector *collectors.RawMetricsCollector) http.Handler {
diff --git a/metricmaker/converters.go b/metricmaker/converters.go
index 85574939..9702fb21 100644
--- a/metricmaker/converters.go
+++ b/metricmaker/converters.go
@@ -33,7 +33,7 @@ func OrderAndSanitizeLabels(metric *metrics.RawMetric) {
continue
}
if strings.Contains(label.GetName(), "-") {
- label.Name = proto.String(strings.Replace(label.GetName(), "-", "_", -1))
+ label.Name = proto.String(strings.ReplaceAll(label.GetName(), "-", "_"))
}
labels = append(labels, label)
}
diff --git a/metricmaker/metricmaker_suite_test.go b/metricmaker/metricmaker_suite_test.go
index 76d987f4..bf7932f9 100644
--- a/metricmaker/metricmaker_suite_test.go
+++ b/metricmaker/metricmaker_suite_test.go
@@ -11,4 +11,3 @@ func TestMetricmaker(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Metricate Suite")
}
-
diff --git a/metricmaker/metricmaker_test.go b/metricmaker/metricmaker_test.go
index 98f47406..b1732227 100644
--- a/metricmaker/metricmaker_test.go
+++ b/metricmaker/metricmaker_test.go
@@ -105,11 +105,11 @@ var _ = Describe("MetricMaker", func() {
Message: &loggregator_v2.Envelope_Gauge{
Gauge: &loggregator_v2.Gauge{
Metrics: map[string]*loggregator_v2.GaugeValue{
- "my_metric_1": &loggregator_v2.GaugeValue{
+ "my_metric_1": {
Unit: "bytes",
Value: 1,
},
- "my_metric_2": &loggregator_v2.GaugeValue{
+ "my_metric_2": {
Unit: "bytes",
Value: 1,
},
diff --git a/metrics/consts.go b/metrics/consts.go
index b0615e02..7e0d0247 100644
--- a/metrics/consts.go
+++ b/metrics/consts.go
@@ -1,8 +1,8 @@
package metrics
const (
- GorouterHttpMetricName = "http"
- GorouterHttpCounterMetricName = GorouterHttpMetricName + "_total"
- GorouterHttpHistogramMetricName = GorouterHttpMetricName + "_duration_seconds"
- GorouterHttpSummaryMetricName = GorouterHttpMetricName + "_response_size_bytes"
+ GorouterHTTPMetricName = "http"
+ GorouterHTTPCounterMetricName = GorouterHTTPMetricName + "_total"
+ GorouterHTTPHistogramMetricName = GorouterHTTPMetricName + "_duration_seconds"
+ GorouterHTTPSummaryMetricName = GorouterHTTPMetricName + "_response_size_bytes"
)
diff --git a/metrics/internal_metrics.go b/metrics/internal_metrics.go
index 84a6581e..a4897326 100644
--- a/metrics/internal_metrics.go
+++ b/metrics/internal_metrics.go
@@ -20,8 +20,8 @@ type InternalMetrics struct {
LastCounterEventReceivedTimestamp prometheus.Gauge
TotalValueMetricsReceived prometheus.Counter
LastValueMetricReceivedTimestamp prometheus.Gauge
- TotalHttpMetricsReceived prometheus.Counter
- LastHttpMetricReceivedTimestamp prometheus.Gauge
+ TotalHTTPMetricsReceived prometheus.Counter
+ LastHTTPMetricReceivedTimestamp prometheus.Gauge
}
func NewInternalMetrics(namespace string, environment string) *InternalMetrics {
@@ -109,7 +109,7 @@ func NewInternalMetrics(namespace string, environment string) *InternalMetrics {
},
)
- im.TotalHttpMetricsReceived = promauto.NewCounter(
+ im.TotalHTTPMetricsReceived = promauto.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "",
@@ -118,7 +118,7 @@ func NewInternalMetrics(namespace string, environment string) *InternalMetrics {
ConstLabels: prometheus.Labels{"environment": environment},
},
)
- im.LastHttpMetricReceivedTimestamp = promauto.NewGauge(
+ im.LastHTTPMetricReceivedTimestamp = promauto.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
diff --git a/metrics/raw_metric.go b/metrics/raw_metric.go
index 69a91b83..bb8d74ef 100644
--- a/metrics/raw_metric.go
+++ b/metrics/raw_metric.go
@@ -121,7 +121,7 @@ func (r *RawMetric) EstimateMetricSize() (size int) {
return size
}
-func (r *RawMetric) Id() uint64 {
+func (r *RawMetric) ID() uint64 {
if r.id != 0 {
return r.id
}
@@ -131,8 +131,8 @@ func (r *RawMetric) Id() uint64 {
if label.GetName() == model.MetricNameLabel {
continue
}
- xxh.WriteString("$" + label.GetName() + "$" + label.GetValue())
- xxh.Write(separatorByteSlice)
+ _, _ = xxh.WriteString("$" + label.GetName() + "$" + label.GetValue())
+ _, _ = xxh.Write(separatorByteSlice)
}
r.id = xxh.Sum64()
return r.id
diff --git a/metrics/raw_metric_test.go b/metrics/raw_metric_test.go
index c474904f..797702a0 100644
--- a/metrics/raw_metric_test.go
+++ b/metrics/raw_metric_test.go
@@ -69,8 +69,8 @@ var _ = Describe("RawMetric", func() {
}),
})
- Expect(m1.Id()).To(Equal(m2.Id()))
- Expect(m1.Id()).ToNot(Equal(m3.Id()))
+ Expect(m1.ID()).To(Equal(m2.ID()))
+ Expect(m1.ID()).ToNot(Equal(m3.ID()))
})
})
})
diff --git a/nozzle/filter_selector.go b/nozzle/filter_selector.go
index 88ebfc42..21648380 100644
--- a/nozzle/filter_selector.go
+++ b/nozzle/filter_selector.go
@@ -9,13 +9,13 @@ import (
type FilterSelectorType int32
const (
- FilterSelectorType_CONTAINER_METRIC FilterSelectorType = 0
- FilterSelectorType_COUNTER_EVENT FilterSelectorType = 1
- FilterSelectorType_HTTP_START_STOP FilterSelectorType = 2
- FilterSelectorType_VALUE_METRIC FilterSelectorType = 3
+ FilterSelectorTypeContainerMetric FilterSelectorType = 0
+ FilterSelectorTypeCounterEvent FilterSelectorType = 1
+ FilterSelectorTypeHTTPStartStop FilterSelectorType = 2
+ FilterSelectorTypeValueMetric FilterSelectorType = 3
)
-var FilterSelectorType_value = map[string]int32{
+var FilterSelectorTypeValue = map[string]int32{
"containermetric": 0,
"counterevent": 1,
"httpstartstop": 2,
@@ -60,7 +60,7 @@ func (f FilterSelector) ValueMetricDisabled() bool {
return f.valueMetricDisabled
}
-func (f FilterSelector) HttpStartStopDisabled() bool {
+func (f FilterSelector) HTTPStartStopDisabled() bool {
return f.httpStartStopDisabled
}
@@ -79,13 +79,13 @@ func (f FilterSelector) AllGaugeDisabled() bool {
func (f *FilterSelector) Filters(filterSelectorTypes ...FilterSelectorType) {
for _, filterSelectorType := range filterSelectorTypes {
switch filterSelectorType {
- case FilterSelectorType_CONTAINER_METRIC:
+ case FilterSelectorTypeContainerMetric:
f.containerMetricDisabled = false
- case FilterSelectorType_COUNTER_EVENT:
+ case FilterSelectorTypeCounterEvent:
f.counterEventDisabled = false
- case FilterSelectorType_HTTP_START_STOP:
+ case FilterSelectorTypeHTTPStartStop:
f.httpStartStopDisabled = false
- case FilterSelectorType_VALUE_METRIC:
+ case FilterSelectorTypeValueMetric:
f.valueMetricDisabled = false
}
}
@@ -94,7 +94,7 @@ func (f *FilterSelector) Filters(filterSelectorTypes ...FilterSelectorType) {
func (f *FilterSelector) FiltersByNames(filterSelectorNames ...string) {
filterSelectorTypes := make([]FilterSelectorType, 0)
for _, filterSelectorName := range filterSelectorNames {
- if selectorType, ok := FilterSelectorType_value[strings.ToLower(filterSelectorName)]; ok {
+ if selectorType, ok := FilterSelectorTypeValue[strings.ToLower(filterSelectorName)]; ok {
filterSelectorTypes = append(filterSelectorTypes, FilterSelectorType(selectorType))
}
}
@@ -117,7 +117,7 @@ func (f *FilterSelector) ToSelectorTypes() []*loggregator_v2.Selector {
},
})
}
- if !f.HttpStartStopDisabled() {
+ if !f.HTTPStartStopDisabled() {
selectors = append(selectors, &loggregator_v2.Selector{
Message: &loggregator_v2.Selector_Timer{
Timer: &loggregator_v2.TimerSelector{},
diff --git a/nozzle/nozzle.go b/nozzle/nozzle.go
index d8ba79de..e72a82fc 100644
--- a/nozzle/nozzle.go
+++ b/nozzle/nozzle.go
@@ -21,19 +21,19 @@ import (
const (
MaxBatchSizeInBytes = 32 * 1024
- lenGuid = 36
+ lenGUID = 36
)
-var regexGuid = regexp.MustCompile(`(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}`)
+var regexGUID = regexp.MustCompile(`(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}`)
// Nozzle reads envelopes and writes points to firehose_exporter.
type Nozzle struct {
internalMetrics *metrics.InternalMetrics
- s StreamConnector
- shardId string
- nodeIndex int
- ingressBuffer *diodes.OneToOne
+ s StreamConnector
+ shardIdshardID string
+ nodeIndex int
+ ingressBuffer *diodes.OneToOne
timerBuffer *diodes.OneToOne
timerRollupBufferSize uint
@@ -55,11 +55,11 @@ type StreamConnector interface {
}
const (
- BATCH_FLUSH_INTERVAL = 500 * time.Millisecond
+ BatchFlushInterval = 500 * time.Millisecond
)
func NewNozzle(c StreamConnector,
- shardId string,
+ shardID string,
nodeIndex int,
pointBuffer chan []*metrics.RawMetric,
internalMetrics *metrics.InternalMetrics,
@@ -67,7 +67,7 @@ func NewNozzle(c StreamConnector,
n := &Nozzle{
internalMetrics: internalMetrics,
s: c,
- shardId: shardId,
+ shardIdshardID: shardID,
nodeIndex: nodeIndex,
timerRollupBufferSize: 4096,
totalRollup: rollup.NewNullRollup(),
@@ -122,7 +122,6 @@ func WithNozzleTimerRollup(interval time.Duration, totalResponseSizeRollupTags,
nodeIndex := strconv.Itoa(n.nodeIndex)
n.totalRollup = rollup.NewCounterRollup(nodeIndex, totalResponseSizeRollupTags)
n.responseSizeRollup = rollup.NewSummaryRollup(nodeIndex, totalResponseSizeRollupTags)
- // TODO: rename HistogramRollup
n.durationRollup = rollup.NewHistogramRollup(nodeIndex, durationRollupTags)
}
}
@@ -130,7 +129,6 @@ func WithNozzleTimerRollup(interval time.Duration, totalResponseSizeRollupTags,
// Start() starts reading envelopes from the logs provider and writes them to
// firehose_exporter.
func (n *Nozzle) Start() {
-
rx := n.s.Stream(context.Background(), n.buildBatchReq())
go n.timerProcessor()
@@ -145,7 +143,7 @@ func (n *Nozzle) pointBatcher() {
poller := diodes.NewPoller(n.ingressBuffer)
points := make([]*metrics.RawMetric, 0)
- t := time.NewTimer(BATCH_FLUSH_INTERVAL)
+ t := time.NewTimer(BatchFlushInterval)
for {
data, found := poller.TryNext()
@@ -161,7 +159,7 @@ func (n *Nozzle) pointBatcher() {
if len(points) > 0 {
points = n.writeToChannelOrDiscard(points)
}
- t.Reset(BATCH_FLUSH_INTERVAL)
+ t.Reset(BatchFlushInterval)
size = 0
default:
// Do we care if one envelope produces multiple points, in which a
@@ -170,7 +168,7 @@ func (n *Nozzle) pointBatcher() {
// if len(points) >= BATCH_CHANNEL_SIZE {
if size >= MaxBatchSizeInBytes {
points = n.writeToChannelOrDiscard(points)
- t.Reset(BATCH_FLUSH_INTERVAL)
+ t.Reset(BatchFlushInterval)
size = 0
}
@@ -195,9 +193,9 @@ func (n *Nozzle) writeToChannelOrDiscard(points []*metrics.RawMetric) []*metrics
n.internalMetrics.LastContainerMetricReceivedTimestamp.Set(float64(time.Now().Unix()))
continue
}
- if utils.MetricIsHttpMetric(point) {
- n.internalMetrics.TotalHttpMetricsReceived.Inc()
- n.internalMetrics.LastHttpMetricReceivedTimestamp.Set(float64(time.Now().Unix()))
+ if utils.MetricIsHTTPMetric(point) {
+ n.internalMetrics.TotalHTTPMetricsReceived.Inc()
+ n.internalMetrics.LastHTTPMetricReceivedTimestamp.Set(float64(time.Now().Unix()))
continue
}
if *point.MetricType() == dto.MetricType_GAUGE {
@@ -221,7 +219,6 @@ func (n *Nozzle) writeToChannelOrDiscard(points []*metrics.RawMetric) []*metrics
}
func (n *Nozzle) envelopeReader(rx loggregator.EnvelopeStream) {
-
for {
envelopeBatch := rx()
for _, envelope := range envelopeBatch {
@@ -246,7 +243,7 @@ func (n *Nozzle) timerProcessor() {
}
// we skip metric with source_id with a guid (guid means an app) to avoid duplicate with metric from cf_app
- if envelope.Tags["app_id"] == "" && len(envelope.GetSourceId()) == lenGuid && regexGuid.MatchString(envelope.GetSourceId()) {
+ if envelope.Tags["app_id"] == "" && len(envelope.GetSourceId()) == lenGUID && regexGUID.MatchString(envelope.GetSourceId()) {
continue
}
@@ -311,15 +308,15 @@ func (n *Nozzle) timerEmitter() {
}
if len(points) > 0 {
- points = n.writeToChannelOrDiscard(points)
+ n.writeToChannelOrDiscard(points)
}
}
}
-func (n *Nozzle) captureGorouterHttpTimerMetricsForRollup(envelope *loggregator_v2.Envelope) {
+func (n *Nozzle) captureGorouterHTTPTimerMetricsForRollup(envelope *loggregator_v2.Envelope) {
timer := envelope.GetTimer()
- if timer.GetName() != metrics.GorouterHttpMetricName {
+ if timer.GetName() != metrics.GorouterHTTPMetricName {
return
}
@@ -348,7 +345,7 @@ func (n *Nozzle) convertEnvelopeToPoints(envelope *loggregator_v2.Envelope) []*m
envelope.GetGauge().Metrics = metricsGauge
case *loggregator_v2.Envelope_Timer:
- n.captureGorouterHttpTimerMetricsForRollup(envelope)
+ n.captureGorouterHTTPTimerMetricsForRollup(envelope)
return []*metrics.RawMetric{}
}
return metricmaker.NewRawMetricsFromEnvelop(envelope)
@@ -356,7 +353,7 @@ func (n *Nozzle) convertEnvelopeToPoints(envelope *loggregator_v2.Envelope) []*m
func (n *Nozzle) buildBatchReq() *loggregator_v2.EgressBatchRequest {
return &loggregator_v2.EgressBatchRequest{
- ShardId: n.shardId,
+ ShardId: n.shardIdshardID,
UsePreferredTags: true,
Selectors: n.filterSelector.ToSelectorTypes(),
}
diff --git a/nozzle/nozzle_suite_test.go b/nozzle/nozzle_suite_test.go
index 248722ca..701345ac 100644
--- a/nozzle/nozzle_suite_test.go
+++ b/nozzle/nozzle_suite_test.go
@@ -23,10 +23,11 @@ func TestNozzle(t *testing.T) {
RunSpecs(t, "Nozzle Suite")
}
-func addEnvelope(total uint64, name, sourceId string, c *spyStreamConnector) {
+// nolint:unparam
+func addEnvelope(total uint64, name, sourceID string, c *spyStreamConnector) {
c.envelopes <- []*loggregator_v2.Envelope{
{
- SourceId: sourceId,
+ SourceId: sourceID,
Tags: map[string]string{},
Message: &loggregator_v2.Envelope_Counter{
Counter: &loggregator_v2.Counter{Name: name, Total: total},
@@ -36,9 +37,9 @@ func addEnvelope(total uint64, name, sourceId string, c *spyStreamConnector) {
}
type spyStreamConnector struct {
- mu sync.Mutex
- requests_ []*loggregator_v2.EgressBatchRequest
- envelopes chan []*loggregator_v2.Envelope
+ mu sync.Mutex
+ internalRequests []*loggregator_v2.EgressBatchRequest
+ envelopes chan []*loggregator_v2.Envelope
}
func newSpyStreamConnector() *spyStreamConnector {
@@ -50,11 +51,10 @@ func newSpyStreamConnector() *spyStreamConnector {
func (s *spyStreamConnector) Stream(_ context.Context, req *loggregator_v2.EgressBatchRequest) loggregator.EnvelopeStream {
s.mu.Lock()
defer s.mu.Unlock()
- s.requests_ = append(s.requests_, req)
+ s.internalRequests = append(s.internalRequests, req)
return func() []*loggregator_v2.Envelope {
select {
-
case ee := <-s.envelopes:
finalEnvelopes := make([]*loggregator_v2.Envelope, 0)
for _, e := range ee {
@@ -83,8 +83,8 @@ func (s *spyStreamConnector) requests() []*loggregator_v2.EgressBatchRequest {
s.mu.Lock()
defer s.mu.Unlock()
- reqs := make([]*loggregator_v2.EgressBatchRequest, len(s.requests_))
- copy(reqs, s.requests_)
+ reqs := make([]*loggregator_v2.EgressBatchRequest, len(s.internalRequests))
+ copy(reqs, s.internalRequests)
return reqs
}
diff --git a/nozzle/nozzle_test.go b/nozzle/nozzle_test.go
index cc753c7e..bf65c413 100644
--- a/nozzle/nozzle_test.go
+++ b/nozzle/nozzle_test.go
@@ -196,7 +196,7 @@ var _ = Describe("Nozzle", func() {
Message: &loggregator_v2.Envelope_Gauge{
Gauge: &loggregator_v2.Gauge{
Metrics: map[string]*loggregator_v2.GaugeValue{
- "cpu": &loggregator_v2.GaugeValue{
+ "cpu": {
Unit: "",
Value: 1,
},
@@ -211,7 +211,7 @@ var _ = Describe("Nozzle", func() {
Message: &loggregator_v2.Envelope_Gauge{
Gauge: &loggregator_v2.Gauge{
Metrics: map[string]*loggregator_v2.GaugeValue{
- "a_gauge": &loggregator_v2.GaugeValue{
+ "a_gauge": {
Unit: "",
Value: 1,
},
@@ -235,7 +235,7 @@ var _ = Describe("Nozzle", func() {
})
Describe("when selector CounterEvent", func() {
BeforeEach(func() {
- filterSelector.Filters(FilterSelectorType_COUNTER_EVENT)
+ filterSelector.Filters(FilterSelectorTypeCounterEvent)
})
It("should only take counter metric", func() {
Eventually(metricStore.GetPoints).Should(HaveLen(1))
@@ -247,7 +247,7 @@ var _ = Describe("Nozzle", func() {
})
Describe("when selector ContainerMetric", func() {
BeforeEach(func() {
- filterSelector.Filters(FilterSelectorType_CONTAINER_METRIC)
+ filterSelector.Filters(FilterSelectorTypeContainerMetric)
})
It("should only take container metric", func() {
Eventually(metricStore.GetPoints).Should(HaveLen(1))
@@ -259,7 +259,7 @@ var _ = Describe("Nozzle", func() {
})
Describe("when selector ValueMetric", func() {
BeforeEach(func() {
- filterSelector.Filters(FilterSelectorType_VALUE_METRIC)
+ filterSelector.Filters(FilterSelectorTypeValueMetric)
})
It("should only take gauge metric which is not container metric", func() {
Eventually(metricStore.GetPoints).Should(HaveLen(1))
@@ -271,7 +271,7 @@ var _ = Describe("Nozzle", func() {
})
Describe("when selector Http", func() {
BeforeEach(func() {
- filterSelector.Filters(FilterSelectorType_HTTP_START_STOP)
+ filterSelector.Filters(FilterSelectorTypeHTTPStartStop)
})
It("should only take timer metric", func() {
Eventually(metricStore.GetPoints).Should(HaveLen(2))
@@ -311,7 +311,7 @@ var _ = Describe("Nozzle", func() {
Message: &loggregator_v2.Envelope_Gauge{
Gauge: &loggregator_v2.Gauge{
Metrics: map[string]*loggregator_v2.GaugeValue{
- "memory": &loggregator_v2.GaugeValue{
+ "memory": {
Unit: "",
Value: 1,
},
@@ -328,7 +328,7 @@ var _ = Describe("Nozzle", func() {
Message: &loggregator_v2.Envelope_Gauge{
Gauge: &loggregator_v2.Gauge{
Metrics: map[string]*loggregator_v2.GaugeValue{
- "cpu": &loggregator_v2.GaugeValue{
+ "cpu": {
Unit: "",
Value: 1,
},
diff --git a/nozzle/rollup/counter.go b/nozzle/rollup/counter.go
index b176bc2c..38e873d7 100644
--- a/nozzle/rollup/counter.go
+++ b/nozzle/rollup/counter.go
@@ -1,15 +1,20 @@
package rollup
import (
+ "sync"
+ "time"
+
"github.com/bosh-prometheus/firehose_exporter/metricmaker"
"github.com/bosh-prometheus/firehose_exporter/metrics"
"github.com/bosh-prometheus/firehose_exporter/transform"
"github.com/gogo/protobuf/proto"
- "sync"
- "time"
)
-type counterRollup struct {
+var (
+ OriginCfApp = "cf_app"
+)
+
+type CounterRollup struct {
nodeIndex string
rollupTags []string
countersInInterval *sync.Map
@@ -20,17 +25,17 @@ type counterRollup struct {
cleanPeriodicDuration time.Duration
}
-type CounterOpt func(r *counterRollup)
+type CounterOpt func(r *CounterRollup)
func SetCounterCleaning(metricExpireIn time.Duration, cleanPeriodicDuration time.Duration) CounterOpt {
- return func(r *counterRollup) {
+ return func(r *CounterRollup) {
r.metricExpireIn = metricExpireIn
r.cleanPeriodicDuration = cleanPeriodicDuration
}
}
-func NewCounterRollup(nodeIndex string, rollupTags []string, opts ...CounterOpt) *counterRollup {
- cr := &counterRollup{
+func NewCounterRollup(nodeIndex string, rollupTags []string, opts ...CounterOpt) *CounterRollup {
+ cr := &CounterRollup{
nodeIndex: nodeIndex,
rollupTags: rollupTags,
countersInInterval: &sync.Map{},
@@ -46,7 +51,7 @@ func NewCounterRollup(nodeIndex string, rollupTags []string, opts ...CounterOpt)
return cr
}
-func (r *counterRollup) CleanPeriodic() {
+func (r *CounterRollup) CleanPeriodic() {
for {
time.Sleep(r.cleanPeriodicDuration)
now := time.Now()
@@ -66,8 +71,8 @@ func (r *counterRollup) CleanPeriodic() {
}
}
-func (r *counterRollup) Record(sourceId string, tags map[string]string, value int64) {
- key := keyFromTags(r.rollupTags, sourceId, tags)
+func (r *CounterRollup) Record(sourceID string, tags map[string]string, value int64) {
+ key := keyFromTags(r.rollupTags, sourceID, tags)
r.countersInInterval.Store(key, struct{}{})
@@ -79,19 +84,16 @@ func (r *counterRollup) Record(sourceId string, tags map[string]string, value in
r.keyCleaningTime.Store(key, time.Now())
}
-func (r *counterRollup) Rollup(timestamp int64) []*PointsBatch {
+func (r *CounterRollup) Rollup(timestamp int64) []*PointsBatch {
var batches []*PointsBatch
r.countersInInterval.Range(func(k, _ interface{}) bool {
- labels, err := labelsFromKey(k.(string), r.nodeIndex, r.rollupTags)
- if err != nil {
- return true
- }
+ labels := labelsFromKey(k.(string), r.nodeIndex, r.rollupTags)
if _, ok := labels["app_id"]; ok {
- labels["origin"] = "cf_app"
+ labels["origin"] = OriginCfApp
}
value, _ := r.counters.Load(k)
- metric := metricmaker.NewRawMetricCounter(metrics.GorouterHttpCounterMetricName, labels, float64(value.(int64)))
+ metric := metricmaker.NewRawMetricCounter(metrics.GorouterHTTPCounterMetricName, labels, float64(value.(int64)))
metric.Metric().TimestampMs = proto.Int64(transform.NanosecondsToMilliseconds(timestamp))
batches = append(batches, &PointsBatch{
Points: []*metrics.RawMetric{metric},
diff --git a/nozzle/rollup/counter_test.go b/nozzle/rollup/counter_test.go
index 49cc160b..b9f181b1 100644
--- a/nozzle/rollup/counter_test.go
+++ b/nozzle/rollup/counter_test.go
@@ -1,23 +1,20 @@
package rollup_test
import (
+ "time"
+
"github.com/bosh-prometheus/firehose_exporter/metrics"
. "github.com/bosh-prometheus/firehose_exporter/nozzle/rollup"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "time"
)
var _ = Describe("Counter Rollup", func() {
extract := func(batches []*PointsBatch) []*metrics.RawMetric {
var points []*metrics.RawMetric
-
for _, b := range batches {
- for _, p := range b.Points {
- points = append(points, p)
- }
+ points = append(points, b.Points...)
}
-
return points
}
diff --git a/nozzle/rollup/histogram.go b/nozzle/rollup/histogram.go
index 812656c7..58385159 100644
--- a/nozzle/rollup/histogram.go
+++ b/nozzle/rollup/histogram.go
@@ -1,17 +1,18 @@
package rollup
import (
+ "sync"
+ "time"
+
"github.com/bosh-prometheus/firehose_exporter/metricmaker"
"github.com/bosh-prometheus/firehose_exporter/metrics"
"github.com/bosh-prometheus/firehose_exporter/transform"
"github.com/gogo/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
- "sync"
- "time"
)
-type histogramRollup struct {
+type HistogramRollup struct {
nodeIndex string
rollupTags []string
histogramsInInterval *sync.Map
@@ -22,17 +23,17 @@ type histogramRollup struct {
cleanPeriodicDuration time.Duration
}
-type HistogramOpt func(r *histogramRollup)
+type HistogramOpt func(r *HistogramRollup)
func SetHistogramCleaning(metricExpireIn time.Duration, cleanPeriodicDuration time.Duration) HistogramOpt {
- return func(r *histogramRollup) {
+ return func(r *HistogramRollup) {
r.metricExpireIn = metricExpireIn
r.cleanPeriodicDuration = cleanPeriodicDuration
}
}
-func NewHistogramRollup(nodeIndex string, rollupTags []string, opts ...HistogramOpt) *histogramRollup {
- hr := &histogramRollup{
+func NewHistogramRollup(nodeIndex string, rollupTags []string, opts ...HistogramOpt) *HistogramRollup {
+ hr := &HistogramRollup{
nodeIndex: nodeIndex,
rollupTags: rollupTags,
histogramsInInterval: &sync.Map{},
@@ -50,7 +51,7 @@ func NewHistogramRollup(nodeIndex string, rollupTags []string, opts ...Histogram
return hr
}
-func (r *histogramRollup) CleanPeriodic() {
+func (r *HistogramRollup) CleanPeriodic() {
for {
time.Sleep(r.cleanPeriodicDuration)
now := time.Now()
@@ -70,13 +71,13 @@ func (r *histogramRollup) CleanPeriodic() {
}
}
-func (r *histogramRollup) Record(sourceId string, tags map[string]string, value int64) {
- key := keyFromTags(r.rollupTags, sourceId, tags)
+func (r *HistogramRollup) Record(sourceID string, tags map[string]string, value int64) {
+ key := keyFromTags(r.rollupTags, sourceID, tags)
histo, found := r.histograms.Load(key)
if !found {
histo = prometheus.NewHistogram(prometheus.HistogramOpts{
- Name: metrics.GorouterHttpHistogramMetricName,
+ Name: metrics.GorouterHTTPHistogramMetricName,
})
r.histograms.Store(key, histo)
}
@@ -87,23 +88,20 @@ func (r *histogramRollup) Record(sourceId string, tags map[string]string, value
r.keyCleaningTime.Store(key, time.Now())
}
-func (r *histogramRollup) Rollup(timestamp int64) []*PointsBatch {
+func (r *HistogramRollup) Rollup(timestamp int64) []*PointsBatch {
var batches []*PointsBatch
r.histogramsInInterval.Range(func(k, _ interface{}) bool {
- labels, err := labelsFromKey(k.(string), r.nodeIndex, r.rollupTags)
- if err != nil {
- return true
- }
+ labels := labelsFromKey(k.(string), r.nodeIndex, r.rollupTags)
if _, ok := labels["app_id"]; ok {
- labels["origin"] = "cf_app"
+ labels["origin"] = OriginCfApp
}
m := &dto.Metric{}
histo, _ := r.histograms.Load(k)
_ = histo.(prometheus.Histogram).Write(m)
m.Label = transform.LabelsMapToLabelPairs(labels)
- metric := metricmaker.NewRawMetricFromMetric(metrics.GorouterHttpHistogramMetricName, m)
+ metric := metricmaker.NewRawMetricFromMetric(metrics.GorouterHTTPHistogramMetricName, m)
metric.Metric().TimestampMs = proto.Int64(transform.NanosecondsToMilliseconds(timestamp))
batches = append(batches, &PointsBatch{
Points: []*metrics.RawMetric{metric},
diff --git a/nozzle/rollup/histogram_test.go b/nozzle/rollup/histogram_test.go
index ccd677cc..c7fc1173 100644
--- a/nozzle/rollup/histogram_test.go
+++ b/nozzle/rollup/histogram_test.go
@@ -13,13 +13,17 @@ import (
. "github.com/onsi/gomega"
)
+const (
+ HTTPDurationName = "http_duration_seconds"
+)
+
type histogram struct {
points []*metrics.RawMetric
}
func (h *histogram) Count() int {
for _, p := range h.points {
- if p.MetricName() == "http_duration_seconds" {
+ if p.MetricName() == HTTPDurationName {
return int(*p.Metric().Histogram.SampleCount)
}
}
@@ -29,7 +33,7 @@ func (h *histogram) Count() int {
func (h *histogram) Sum() int {
for _, p := range h.points {
- if p.MetricName() == "http_duration_seconds" {
+ if p.MetricName() == HTTPDurationName {
return int(*p.Metric().Histogram.SampleSum)
}
}
@@ -43,7 +47,7 @@ func (h *histogram) Points() []*metrics.RawMetric {
func (h *histogram) Bucket(le string) *dto.Histogram {
for _, p := range h.points {
- if p.MetricName() != "http_duration_seconds" {
+ if p.MetricName() != HTTPDurationName {
continue
}
for _, label := range p.Metric().Label {
@@ -62,9 +66,7 @@ var _ = Describe("Histogram Rollup", func() {
for _, b := range batches {
h := &histogram{}
- for _, p := range b.Points {
- h.points = append(h.points, p)
- }
+ h.points = append(h.points, b.Points...)
histograms = append(histograms, h)
}
diff --git a/nozzle/rollup/null.go b/nozzle/rollup/null.go
index 24366aef..6a8f0840 100644
--- a/nozzle/rollup/null.go
+++ b/nozzle/rollup/null.go
@@ -1,15 +1,15 @@
package rollup
-type nullRollup struct {
+type NullRollup struct {
}
-func NewNullRollup() *nullRollup {
- return &nullRollup{}
+func NewNullRollup() *NullRollup {
+ return &NullRollup{}
}
-func (h *nullRollup) Record(string, map[string]string, int64) {
+func (h *NullRollup) Record(string, map[string]string, int64) {
}
-func (h *nullRollup) Rollup(_ int64) []*PointsBatch {
+func (h *NullRollup) Rollup(_ int64) []*PointsBatch {
return []*PointsBatch{}
}
diff --git a/nozzle/rollup/package.go b/nozzle/rollup/package.go
index bd0689bf..349a4e33 100644
--- a/nozzle/rollup/package.go
+++ b/nozzle/rollup/package.go
@@ -1,193 +1,194 @@
// code from: https://github.com/cloudfoundry/metric-store-release/tree/develop/src/internal/nozzle/rollup
// license:
-// Apache License
-// Version 2.0, January 2004
-// http://www.apache.org/licenses/
-//
-// TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-//
-// 1. Definitions.
-//
-// "License" shall mean the terms and conditions for use, reproduction,
-// and distribution as defined by Sections 1 through 9 of this document.
-//
-// "Licensor" shall mean the copyright owner or entity authorized by
-// the copyright owner that is granting the License.
-//
-// "Legal Entity" shall mean the union of the acting entity and all
-// other entities that control, are controlled by, or are under common
-// control with that entity. For the purposes of this definition,
-// "control" means (i) the power, direct or indirect, to cause the
-// direction or management of such entity, whether by contract or
-// otherwise, or (ii) ownership of fifty percent (50%) or more of the
-// outstanding shares, or (iii) beneficial ownership of such entity.
-//
-// "You" (or "Your") shall mean an individual or Legal Entity
-// exercising permissions granted by this License.
-//
-// "Source" form shall mean the preferred form for making modifications,
-// including but not limited to software source code, documentation
-// source, and configuration files.
-//
-// "Object" form shall mean any form resulting from mechanical
-// transformation or translation of a Source form, including but
-// not limited to compiled object code, generated documentation,
-// and conversions to other media types.
-//
-// "Work" shall mean the work of authorship, whether in Source or
-// Object form, made available under the License, as indicated by a
-// copyright notice that is included in or attached to the work
-// (an example is provided in the Appendix below).
-//
-// "Derivative Works" shall mean any work, whether in Source or Object
-// form, that is based on (or derived from) the Work and for which the
-// editorial revisions, annotations, elaborations, or other modifications
-// represent, as a whole, an original work of authorship. For the purposes
-// of this License, Derivative Works shall not include works that remain
-// separable from, or merely link (or bind by name) to the interfaces of,
-// the Work and Derivative Works thereof.
-//
-// "Contribution" shall mean any work of authorship, including
-// the original version of the Work and any modifications or additions
-// to that Work or Derivative Works thereof, that is intentionally
-// submitted to Licensor for inclusion in the Work by the copyright owner
-// or by an individual or Legal Entity authorized to submit on behalf of
-// the copyright owner. For the purposes of this definition, "submitted"
-// means any form of electronic, verbal, or written communication sent
-// to the Licensor or its representatives, including but not limited to
-// communication on electronic mailing lists, source code control systems,
-// and issue tracking systems that are managed by, or on behalf of, the
-// Licensor for the purpose of discussing and improving the Work, but
-// excluding communication that is conspicuously marked or otherwise
-// designated in writing by the copyright owner as "Not a Contribution."
-//
-// "Contributor" shall mean Licensor and any individual or Legal Entity
-// on behalf of whom a Contribution has been received by Licensor and
-// subsequently incorporated within the Work.
-//
-// 2. Grant of Copyright License. Subject to the terms and conditions of
-// this License, each Contributor hereby grants to You a perpetual,
-// worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-// copyright license to reproduce, prepare Derivative Works of,
-// publicly display, publicly perform, sublicense, and distribute the
-// Work and such Derivative Works in Source or Object form.
-//
-// 3. Grant of Patent License. Subject to the terms and conditions of
-// this License, each Contributor hereby grants to You a perpetual,
-// worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-// (except as stated in this section) patent license to make, have made,
-// use, offer to sell, sell, import, and otherwise transfer the Work,
-// where such license applies only to those patent claims licensable
-// by such Contributor that are necessarily infringed by their
-// Contribution(s) alone or by combination of their Contribution(s)
-// with the Work to which such Contribution(s) was submitted. If You
-// institute patent litigation against any entity (including a
-// cross-claim or counterclaim in a lawsuit) alleging that the Work
-// or a Contribution incorporated within the Work constitutes direct
-// or contributory patent infringement, then any patent licenses
-// granted to You under this License for that Work shall terminate
-// as of the date such litigation is filed.
-//
-// 4. Redistribution. You may reproduce and distribute copies of the
-// Work or Derivative Works thereof in any medium, with or without
-// modifications, and in Source or Object form, provided that You
-// meet the following conditions:
-//
-// (a) You must give any other recipients of the Work or
-// Derivative Works a copy of this License; and
-//
-// (b) You must cause any modified files to carry prominent notices
-// stating that You changed the files; and
-//
-// (c) You must retain, in the Source form of any Derivative Works
-// that You distribute, all copyright, patent, trademark, and
-// attribution notices from the Source form of the Work,
-// excluding those notices that do not pertain to any part of
-// the Derivative Works; and
-//
-// (d) If the Work includes a "NOTICE" text file as part of its
-// distribution, then any Derivative Works that You distribute must
-// include a readable copy of the attribution notices contained
-// within such NOTICE file, excluding those notices that do not
-// pertain to any part of the Derivative Works, in at least one
-// of the following places: within a NOTICE text file distributed
-// as part of the Derivative Works; within the Source form or
-// documentation, if provided along with the Derivative Works; or,
-// within a display generated by the Derivative Works, if and
-// wherever such third-party notices normally appear. The contents
-// of the NOTICE file are for informational purposes only and
-// do not modify the License. You may add Your own attribution
-// notices within Derivative Works that You distribute, alongside
-// or as an addendum to the NOTICE text from the Work, provided
-// that such additional attribution notices cannot be construed
-// as modifying the License.
-//
-// You may add Your own copyright statement to Your modifications and
-// may provide additional or different license terms and conditions
-// for use, reproduction, or distribution of Your modifications, or
-// for any such Derivative Works as a whole, provided Your use,
-// reproduction, and distribution of the Work otherwise complies with
-// the conditions stated in this License.
-//
-// 5. Submission of Contributions. Unless You explicitly state otherwise,
-// any Contribution intentionally submitted for inclusion in the Work
-// by You to the Licensor shall be under the terms and conditions of
-// this License, without any additional terms or conditions.
-// Notwithstanding the above, nothing herein shall supersede or modify
-// the terms of any separate license agreement you may have executed
-// with Licensor regarding such Contributions.
-//
-// 6. Trademarks. This License does not grant permission to use the trade
-// names, trademarks, service marks, or product names of the Licensor,
-// except as required for reasonable and customary use in describing the
-// origin of the Work and reproducing the content of the NOTICE file.
-//
-// 7. Disclaimer of Warranty. Unless required by applicable law or
-// agreed to in writing, Licensor provides the Work (and each
-// Contributor provides its Contributions) on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-// implied, including, without limitation, any warranties or conditions
-// of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-// PARTICULAR PURPOSE. You are solely responsible for determining the
-// appropriateness of using or redistributing the Work and assume any
-// risks associated with Your exercise of permissions under this License.
-//
-// 8. Limitation of Liability. In no event and under no legal theory,
-// whether in tort (including negligence), contract, or otherwise,
-// unless required by applicable law (such as deliberate and grossly
-// negligent acts) or agreed to in writing, shall any Contributor be
-// liable to You for damages, including any direct, indirect, special,
-// incidental, or consequential damages of any character arising as a
-// result of this License or out of the use or inability to use the
-// Work (including but not limited to damages for loss of goodwill,
-// work stoppage, computer failure or malfunction, or any and all
-// other commercial damages or losses), even if such Contributor
-// has been advised of the possibility of such damages.
-//
-// 9. Accepting Warranty or Additional Liability. While redistributing
-// the Work or Derivative Works thereof, You may choose to offer,
-// and charge a fee for, acceptance of support, warranty, indemnity,
-// or other liability obligations and/or rights consistent with this
-// License. However, in accepting such obligations, You may act only
-// on Your own behalf and on Your sole responsibility, not on behalf
-// of any other Contributor, and only if You agree to indemnify,
-// defend, and hold each Contributor harmless for any liability
-// incurred by, or claims asserted against, such Contributor by reason
-// of your accepting any such warranty or additional liability.
-//
-// END OF TERMS AND CONDITIONS
-//
-// Copyright 2019 CloudFoundry.org Foundation, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+//
+// Apache License
+// Version 2.0, January 2004
+// http://www.apache.org/licenses/
+//
+// TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+//
+// 1. Definitions.
+//
+// "License" shall mean the terms and conditions for use, reproduction,
+// and distribution as defined by Sections 1 through 9 of this document.
+//
+// "Licensor" shall mean the copyright owner or entity authorized by
+// the copyright owner that is granting the License.
+//
+// "Legal Entity" shall mean the union of the acting entity and all
+// other entities that control, are controlled by, or are under common
+// control with that entity. For the purposes of this definition,
+// "control" means (i) the power, direct or indirect, to cause the
+// direction or management of such entity, whether by contract or
+// otherwise, or (ii) ownership of fifty percent (50%) or more of the
+// outstanding shares, or (iii) beneficial ownership of such entity.
+//
+// "You" (or "Your") shall mean an individual or Legal Entity
+// exercising permissions granted by this License.
+//
+// "Source" form shall mean the preferred form for making modifications,
+// including but not limited to software source code, documentation
+// source, and configuration files.
+//
+// "Object" form shall mean any form resulting from mechanical
+// transformation or translation of a Source form, including but
+// not limited to compiled object code, generated documentation,
+// and conversions to other media types.
+//
+// "Work" shall mean the work of authorship, whether in Source or
+// Object form, made available under the License, as indicated by a
+// copyright notice that is included in or attached to the work
+// (an example is provided in the Appendix below).
+//
+// "Derivative Works" shall mean any work, whether in Source or Object
+// form, that is based on (or derived from) the Work and for which the
+// editorial revisions, annotations, elaborations, or other modifications
+// represent, as a whole, an original work of authorship. For the purposes
+// of this License, Derivative Works shall not include works that remain
+// separable from, or merely link (or bind by name) to the interfaces of,
+// the Work and Derivative Works thereof.
+//
+// "Contribution" shall mean any work of authorship, including
+// the original version of the Work and any modifications or additions
+// to that Work or Derivative Works thereof, that is intentionally
+// submitted to Licensor for inclusion in the Work by the copyright owner
+// or by an individual or Legal Entity authorized to submit on behalf of
+// the copyright owner. For the purposes of this definition, "submitted"
+// means any form of electronic, verbal, or written communication sent
+// to the Licensor or its representatives, including but not limited to
+// communication on electronic mailing lists, source code control systems,
+// and issue tracking systems that are managed by, or on behalf of, the
+// Licensor for the purpose of discussing and improving the Work, but
+// excluding communication that is conspicuously marked or otherwise
+// designated in writing by the copyright owner as "Not a Contribution."
+//
+// "Contributor" shall mean Licensor and any individual or Legal Entity
+// on behalf of whom a Contribution has been received by Licensor and
+// subsequently incorporated within the Work.
+//
+// 2. Grant of Copyright License. Subject to the terms and conditions of
+// this License, each Contributor hereby grants to You a perpetual,
+// worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+// copyright license to reproduce, prepare Derivative Works of,
+// publicly display, publicly perform, sublicense, and distribute the
+// Work and such Derivative Works in Source or Object form.
+//
+// 3. Grant of Patent License. Subject to the terms and conditions of
+// this License, each Contributor hereby grants to You a perpetual,
+// worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+// (except as stated in this section) patent license to make, have made,
+// use, offer to sell, sell, import, and otherwise transfer the Work,
+// where such license applies only to those patent claims licensable
+// by such Contributor that are necessarily infringed by their
+// Contribution(s) alone or by combination of their Contribution(s)
+// with the Work to which such Contribution(s) was submitted. If You
+// institute patent litigation against any entity (including a
+// cross-claim or counterclaim in a lawsuit) alleging that the Work
+// or a Contribution incorporated within the Work constitutes direct
+// or contributory patent infringement, then any patent licenses
+// granted to You under this License for that Work shall terminate
+// as of the date such litigation is filed.
+//
+// 4. Redistribution. You may reproduce and distribute copies of the
+// Work or Derivative Works thereof in any medium, with or without
+// modifications, and in Source or Object form, provided that You
+// meet the following conditions:
+//
+// (a) You must give any other recipients of the Work or
+// Derivative Works a copy of this License; and
+//
+// (b) You must cause any modified files to carry prominent notices
+// stating that You changed the files; and
+//
+// (c) You must retain, in the Source form of any Derivative Works
+// that You distribute, all copyright, patent, trademark, and
+// attribution notices from the Source form of the Work,
+// excluding those notices that do not pertain to any part of
+// the Derivative Works; and
+//
+// (d) If the Work includes a "NOTICE" text file as part of its
+// distribution, then any Derivative Works that You distribute must
+// include a readable copy of the attribution notices contained
+// within such NOTICE file, excluding those notices that do not
+// pertain to any part of the Derivative Works, in at least one
+// of the following places: within a NOTICE text file distributed
+// as part of the Derivative Works; within the Source form or
+// documentation, if provided along with the Derivative Works; or,
+// within a display generated by the Derivative Works, if and
+// wherever such third-party notices normally appear. The contents
+// of the NOTICE file are for informational purposes only and
+// do not modify the License. You may add Your own attribution
+// notices within Derivative Works that You distribute, alongside
+// or as an addendum to the NOTICE text from the Work, provided
+// that such additional attribution notices cannot be construed
+// as modifying the License.
+//
+// You may add Your own copyright statement to Your modifications and
+// may provide additional or different license terms and conditions
+// for use, reproduction, or distribution of Your modifications, or
+// for any such Derivative Works as a whole, provided Your use,
+// reproduction, and distribution of the Work otherwise complies with
+// the conditions stated in this License.
+//
+// 5. Submission of Contributions. Unless You explicitly state otherwise,
+// any Contribution intentionally submitted for inclusion in the Work
+// by You to the Licensor shall be under the terms and conditions of
+// this License, without any additional terms or conditions.
+// Notwithstanding the above, nothing herein shall supersede or modify
+// the terms of any separate license agreement you may have executed
+// with Licensor regarding such Contributions.
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor,
+// except as required for reasonable and customary use in describing the
+// origin of the Work and reproducing the content of the NOTICE file.
+//
+// 7. Disclaimer of Warranty. Unless required by applicable law or
+// agreed to in writing, Licensor provides the Work (and each
+// Contributor provides its Contributions) on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied, including, without limitation, any warranties or conditions
+// of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+// PARTICULAR PURPOSE. You are solely responsible for determining the
+// appropriateness of using or redistributing the Work and assume any
+// risks associated with Your exercise of permissions under this License.
+//
+// 8. Limitation of Liability. In no event and under no legal theory,
+// whether in tort (including negligence), contract, or otherwise,
+// unless required by applicable law (such as deliberate and grossly
+// negligent acts) or agreed to in writing, shall any Contributor be
+// liable to You for damages, including any direct, indirect, special,
+// incidental, or consequential damages of any character arising as a
+// result of this License or out of the use or inability to use the
+// Work (including but not limited to damages for loss of goodwill,
+// work stoppage, computer failure or malfunction, or any and all
+// other commercial damages or losses), even if such Contributor
+// has been advised of the possibility of such damages.
+//
+// 9. Accepting Warranty or Additional Liability. While redistributing
+// the Work or Derivative Works thereof, You may choose to offer,
+// and charge a fee for, acceptance of support, warranty, indemnity,
+// or other liability obligations and/or rights consistent with this
+// License. However, in accepting such obligations, You may act only
+// on Your own behalf and on Your sole responsibility, not on behalf
+// of any other Contributor, and only if You agree to indemnify,
+// defend, and hold each Contributor harmless for any liability
+// incurred by, or claims asserted against, such Contributor by reason
+// of your accepting any such warranty or additional liability.
+//
+// END OF TERMS AND CONDITIONS
+//
+// Copyright 2019 CloudFoundry.org Foundation, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package rollup
diff --git a/nozzle/rollup/rollup.go b/nozzle/rollup/rollup.go
index 871768da..a719057a 100644
--- a/nozzle/rollup/rollup.go
+++ b/nozzle/rollup/rollup.go
@@ -1,9 +1,10 @@
package rollup
import (
+ "strings"
+
"github.com/bosh-prometheus/firehose_exporter/metrics"
log "github.com/sirupsen/logrus"
- "strings"
)
type PointsBatch struct {
@@ -12,12 +13,12 @@ type PointsBatch struct {
}
type Rollup interface {
- Record(sourceId string, tags map[string]string, value int64)
+ Record(sourceID string, tags map[string]string, value int64)
Rollup(timestamp int64) []*PointsBatch
}
-func keyFromTags(rollupTags []string, sourceId string, tags map[string]string) string {
- filteredTags := []string{sourceId}
+func keyFromTags(rollupTags []string, sourceID string, tags map[string]string) string {
+ filteredTags := []string{sourceID}
for _, tag := range rollupTags {
filteredTags = append(filteredTags, tags[tag])
@@ -25,14 +26,14 @@ func keyFromTags(rollupTags []string, sourceId string, tags map[string]string) s
return strings.Join(filteredTags, "%%")
}
-func labelsFromKey(key, nodeIndex string, rollupTags []string) (map[string]string, error) {
+func labelsFromKey(key, nodeIndex string, rollupTags []string) map[string]string {
keyParts := strings.Split(key, "%%")
if len(keyParts) != len(rollupTags)+1 {
log.WithField("reason", "skipping rollup metric").WithField("count", len(keyParts)).WithField("key", key).Info(
"skipping rollup metric",
)
- return nil, nil
+ return nil
}
labels := make(map[string]string)
@@ -45,5 +46,5 @@ func labelsFromKey(key, nodeIndex string, rollupTags []string) (map[string]strin
labels["source_id"] = keyParts[0]
labels["node_index"] = nodeIndex
- return labels, nil
+ return labels
}
diff --git a/nozzle/rollup/summary.go b/nozzle/rollup/summary.go
index 68c5181a..86a0f07a 100644
--- a/nozzle/rollup/summary.go
+++ b/nozzle/rollup/summary.go
@@ -1,17 +1,18 @@
package rollup
import (
+ "sync"
+ "time"
+
"github.com/bosh-prometheus/firehose_exporter/metricmaker"
"github.com/bosh-prometheus/firehose_exporter/metrics"
"github.com/bosh-prometheus/firehose_exporter/transform"
"github.com/gogo/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
- "sync"
- "time"
)
-type summaryRollup struct {
+type SummaryRollup struct {
nodeIndex string
rollupTags []string
summariesInInterval *sync.Map
@@ -22,17 +23,17 @@ type summaryRollup struct {
cleanPeriodicDuration time.Duration
}
-type SummaryOpt func(r *summaryRollup)
+type SummaryOpt func(r *SummaryRollup)
func SetSummaryCleaning(metricExpireIn time.Duration, cleanPeriodicDuration time.Duration) SummaryOpt {
- return func(r *summaryRollup) {
+ return func(r *SummaryRollup) {
r.metricExpireIn = metricExpireIn
r.cleanPeriodicDuration = cleanPeriodicDuration
}
}
-func NewSummaryRollup(nodeIndex string, rollupTags []string, opts ...SummaryOpt) *summaryRollup {
- sr := &summaryRollup{
+func NewSummaryRollup(nodeIndex string, rollupTags []string, opts ...SummaryOpt) *SummaryRollup {
+ sr := &SummaryRollup{
nodeIndex: nodeIndex,
rollupTags: rollupTags,
summaries: &sync.Map{},
@@ -50,13 +51,13 @@ func NewSummaryRollup(nodeIndex string, rollupTags []string, opts ...SummaryOpt)
return sr
}
-func (r *summaryRollup) Record(sourceId string, tags map[string]string, value int64) {
- key := keyFromTags(r.rollupTags, sourceId, tags)
+func (r *SummaryRollup) Record(sourceID string, tags map[string]string, value int64) {
+ key := keyFromTags(r.rollupTags, sourceID, tags)
summary, found := r.summaries.Load(key)
if !found {
summary = prometheus.NewSummary(prometheus.SummaryOpts{
- Name: metrics.GorouterHttpSummaryMetricName,
+ Name: metrics.GorouterHTTPSummaryMetricName,
Objectives: map[float64]float64{0.2: 0.05, 0.5: 0.05, 0.75: 0.02, 0.95: 0.01},
})
r.summaries.Store(key, summary)
@@ -67,7 +68,7 @@ func (r *summaryRollup) Record(sourceId string, tags map[string]string, value in
r.keyCleaningTime.Store(key, time.Now())
}
-func (r *summaryRollup) CleanPeriodic() {
+func (r *SummaryRollup) CleanPeriodic() {
for {
time.Sleep(r.cleanPeriodicDuration)
now := time.Now()
@@ -87,23 +88,20 @@ func (r *summaryRollup) CleanPeriodic() {
}
}
-func (r *summaryRollup) Rollup(timestamp int64) []*PointsBatch {
+func (r *SummaryRollup) Rollup(timestamp int64) []*PointsBatch {
var batches []*PointsBatch
r.summariesInInterval.Range(func(k, _ interface{}) bool {
- labels, err := labelsFromKey(k.(string), r.nodeIndex, r.rollupTags)
- if err != nil {
- return true
- }
+ labels := labelsFromKey(k.(string), r.nodeIndex, r.rollupTags)
if _, ok := labels["app_id"]; ok {
- labels["origin"] = "cf_app"
+ labels["origin"] = OriginCfApp
}
m := &dto.Metric{}
summary, _ := r.summaries.Load(k)
_ = summary.(prometheus.Summary).Write(m)
m.Label = transform.LabelsMapToLabelPairs(labels)
- metric := metricmaker.NewRawMetricFromMetric(metrics.GorouterHttpSummaryMetricName, m)
+ metric := metricmaker.NewRawMetricFromMetric(metrics.GorouterHTTPSummaryMetricName, m)
metric.Metric().TimestampMs = proto.Int64(transform.NanosecondsToMilliseconds(timestamp))
batches = append(batches, &PointsBatch{
Points: []*metrics.RawMetric{metric},
diff --git a/nozzle/rollup/summary_test.go b/nozzle/rollup/summary_test.go
index 0a07a658..49be9bb8 100644
--- a/nozzle/rollup/summary_test.go
+++ b/nozzle/rollup/summary_test.go
@@ -13,13 +13,17 @@ import (
. "github.com/onsi/gomega"
)
+const (
+ HTTPResponseName = "http_response_size_bytes"
+)
+
type summary struct {
points []*metrics.RawMetric
}
func (h *summary) Count() int {
for _, p := range h.points {
- if p.MetricName() == "http_response_size_bytes" {
+ if p.MetricName() == HTTPResponseName {
return int(*p.Metric().Summary.SampleCount)
}
}
@@ -29,7 +33,7 @@ func (h *summary) Count() int {
func (h *summary) Sum() int {
for _, p := range h.points {
- if p.MetricName() == "http_response_size_bytes" {
+ if p.MetricName() == HTTPResponseName {
return int(*p.Metric().Summary.SampleSum)
}
}
@@ -43,7 +47,7 @@ func (h *summary) Points() []*metrics.RawMetric {
func (h *summary) Bucket(le string) *dto.Summary {
for _, p := range h.points {
- if p.MetricName() != "http_response_size_bytes" {
+ if p.MetricName() != HTTPResponseName {
continue
}
for _, label := range p.Metric().Label {
@@ -62,9 +66,7 @@ var _ = Describe("summary Rollup", func() {
for _, b := range batches {
h := &summary{}
- for _, p := range b.Points {
- h.points = append(h.points, p)
- }
+ h.points = append(h.points, b.Points...)
summaries = append(summaries, h)
}
diff --git a/testing/matcher.go b/testing/matcher.go
index 882aaa2a..fbe3cdcb 100644
--- a/testing/matcher.go
+++ b/testing/matcher.go
@@ -27,6 +27,7 @@ type containPointsMatcher struct {
expected interface{}
}
+// nolint:gocognit
func (matcher *containPointsMatcher) Match(actual interface{}) (success bool, err error) {
expectedPoints := matcher.expected.([]*metrics.RawMetric)
points := actual.([]*metrics.RawMetric)
@@ -34,7 +35,6 @@ func (matcher *containPointsMatcher) Match(actual interface{}) (success bool, er
for _, point := range points {
for n, expectedPoint := range expectedPoints {
-
matchValue := false
if point.Metric().Counter != nil {
diff --git a/transform/label_pair.go b/transform/label_pair.go
index 33729215..4bcb4c17 100644
--- a/transform/label_pair.go
+++ b/transform/label_pair.go
@@ -48,7 +48,6 @@ func PlaceConstLabelInLabelPair(labels []*dto.LabelPair, constKey string, requir
return labels
}
}
-
}
if required {
labels = append(labels, &dto.LabelPair{
diff --git a/transform/normalize_name.go b/transform/normalize_name.go
index b4bca947..702456dc 100644
--- a/transform/normalize_name.go
+++ b/transform/normalize_name.go
@@ -1,9 +1,10 @@
package transform
import (
- "github.com/iancoleman/strcase"
"regexp"
"strings"
+
+ "github.com/iancoleman/strcase"
)
var (
@@ -11,18 +12,17 @@ var (
)
func NormalizeName(name string) string {
-
return strcase.ToSnake(safeNameRE.ReplaceAllLiteralString(name, "_"))
}
func NormalizeNameDesc(desc string) string {
if strings.HasPrefix(desc, "/p.") {
- return "/p-" + desc[3:len(desc)]
+ return "/p-" + desc[3:]
}
return desc
}
func NormalizeOriginDesc(desc string) string {
- return strings.Replace(desc, ".", "-", -1)
+ return strings.ReplaceAll(desc, ".", "-")
}
diff --git a/utils/utils.go b/utils/utils.go
index 16e5001a..e2eebf17 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -18,8 +18,8 @@ func MetricNameIsContainerMetric(metricName string) bool {
strings.HasSuffix(metricName, "_disk_quota")
}
-func MetricIsHttpMetric(metric *metrics.RawMetric) bool {
- return strings.Contains(metric.MetricName(), metrics.GorouterHttpSummaryMetricName) ||
- strings.Contains(metric.MetricName(), metrics.GorouterHttpHistogramMetricName) ||
- strings.Contains(metric.MetricName(), metrics.GorouterHttpCounterMetricName)
+func MetricIsHTTPMetric(metric *metrics.RawMetric) bool {
+ return strings.Contains(metric.MetricName(), metrics.GorouterHTTPSummaryMetricName) ||
+ strings.Contains(metric.MetricName(), metrics.GorouterHTTPHistogramMetricName) ||
+ strings.Contains(metric.MetricName(), metrics.GorouterHTTPCounterMetricName)
}